1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * MMU support
9 *
10 * Copyright (C) 2006 Qumranet, Inc.
11 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12 *
13 * Authors:
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Avi Kivity <avi@qumranet.com>
16 */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include "irq.h"
20 #include "ioapic.h"
21 #include "mmu.h"
22 #include "mmu_internal.h"
23 #include "tdp_mmu.h"
24 #include "x86.h"
25 #include "kvm_cache_regs.h"
26 #include "smm.h"
27 #include "kvm_emulate.h"
28 #include "page_track.h"
29 #include "cpuid.h"
30 #include "spte.h"
31
32 #include <linux/kvm_host.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/mm.h>
36 #include <linux/highmem.h>
37 #include <linux/moduleparam.h>
38 #include <linux/export.h>
39 #include <linux/swap.h>
40 #include <linux/hugetlb.h>
41 #include <linux/compiler.h>
42 #include <linux/srcu.h>
43 #include <linux/slab.h>
44 #include <linux/sched/signal.h>
45 #include <linux/uaccess.h>
46 #include <linux/hash.h>
47 #include <linux/kern_levels.h>
48 #include <linux/kstrtox.h>
49 #include <linux/kthread.h>
50 #include <linux/wordpart.h>
51
52 #include <asm/page.h>
53 #include <asm/memtype.h>
54 #include <asm/cmpxchg.h>
55 #include <asm/io.h>
56 #include <asm/set_memory.h>
57 #include <asm/spec-ctrl.h>
58 #include <asm/vmx.h>
59
60 #include "trace.h"
61
62 static bool nx_hugepage_mitigation_hard_disabled;
63
64 int __read_mostly nx_huge_pages = -1;
65 static uint __read_mostly nx_huge_pages_recovery_period_ms;
66 #ifdef CONFIG_PREEMPT_RT
67 /* Recovery can cause latency spikes, disable it for PREEMPT_RT. */
68 static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
69 #else
70 static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
71 #endif
72
73 static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp);
74 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
75 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp);
76
77 static const struct kernel_param_ops nx_huge_pages_ops = {
78 .set = set_nx_huge_pages,
79 .get = get_nx_huge_pages,
80 };
81
82 static const struct kernel_param_ops nx_huge_pages_recovery_param_ops = {
83 .set = set_nx_huge_pages_recovery_param,
84 .get = param_get_uint,
85 };
86
87 module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
88 __MODULE_PARM_TYPE(nx_huge_pages, "bool");
89 module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_param_ops,
90 &nx_huge_pages_recovery_ratio, 0644);
91 __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
92 module_param_cb(nx_huge_pages_recovery_period_ms, &nx_huge_pages_recovery_param_ops,
93 &nx_huge_pages_recovery_period_ms, 0644);
94 __MODULE_PARM_TYPE(nx_huge_pages_recovery_period_ms, "uint");
95
96 static bool __read_mostly force_flush_and_sync_on_reuse;
97 module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
98
99 /*
100 * When setting this variable to true it enables Two-Dimensional-Paging
101 * where the hardware walks 2 page tables:
102 * 1. the guest-virtual to guest-physical
103 * 2. while doing 1. it walks guest-physical to host-physical
104 * If the hardware supports that we don't need to do shadow paging.
105 */
106 bool tdp_enabled = false;
107
108 static bool __ro_after_init tdp_mmu_allowed;
109
110 #ifdef CONFIG_X86_64
111 bool __read_mostly tdp_mmu_enabled = true;
112 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0444);
113 #endif
114
115 static int max_huge_page_level __read_mostly;
116 static int tdp_root_level __read_mostly;
117 static int max_tdp_level __read_mostly;
118
119 #define PTE_PREFETCH_NUM 8
120
121 #include <trace/events/kvm.h>
122
123 /* make pte_list_desc fit well in cache lines */
124 #define PTE_LIST_EXT 14
125
126 /*
127 * struct pte_list_desc is the core data structure used to implement a custom
128 * list for tracking a set of related SPTEs, e.g. all the SPTEs that map a
129 * given GFN when used in the context of rmaps. Using a custom list allows KVM
130 * to optimize for the common case where many GFNs will have at most a handful
131 * of SPTEs pointing at them, i.e. allows packing multiple SPTEs into a small
132 * memory footprint, which in turn improves runtime performance by exploiting
133 * cache locality.
134 *
135 * A list is comprised of one or more pte_list_desc objects (descriptors).
136 * Each individual descriptor stores up to PTE_LIST_EXT SPTEs. If a descriptor
137 * is full and a new SPTEs needs to be added, a new descriptor is allocated and
138 * becomes the head of the list. This means that by definitions, all tail
139 * descriptors are full.
140 *
141 * Note, the meta data fields are deliberately placed at the start of the
142 * structure to optimize the cacheline layout; accessing the descriptor will
143 * touch only a single cacheline so long as @spte_count<=6 (or if only the
144 * descriptors metadata is accessed).
145 */
146 struct pte_list_desc {
147 struct pte_list_desc *more;
148 /* The number of PTEs stored in _this_ descriptor. */
149 u32 spte_count;
150 /* The number of PTEs stored in all tails of this descriptor. */
151 u32 tail_count;
152 u64 *sptes[PTE_LIST_EXT];
153 };
154
155 struct kvm_shadow_walk_iterator {
156 u64 addr;
157 hpa_t shadow_addr;
158 u64 *sptep;
159 int level;
160 unsigned index;
161 };
162
163 #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \
164 for (shadow_walk_init_using_root(&(_walker), (_vcpu), \
165 (_root), (_addr)); \
166 shadow_walk_okay(&(_walker)); \
167 shadow_walk_next(&(_walker)))
168
169 #define for_each_shadow_entry(_vcpu, _addr, _walker) \
170 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
171 shadow_walk_okay(&(_walker)); \
172 shadow_walk_next(&(_walker)))
173
174 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \
175 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
176 shadow_walk_okay(&(_walker)) && \
177 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
178 __shadow_walk_next(&(_walker), spte))
179
180 static struct kmem_cache *pte_list_desc_cache;
181 struct kmem_cache *mmu_page_header_cache;
182
183 static void mmu_spte_set(u64 *sptep, u64 spte);
184
185 struct kvm_mmu_role_regs {
186 const unsigned long cr0;
187 const unsigned long cr4;
188 const u64 efer;
189 };
190
191 #define CREATE_TRACE_POINTS
192 #include "mmutrace.h"
193
194 /*
195 * Yes, lot's of underscores. They're a hint that you probably shouldn't be
196 * reading from the role_regs. Once the root_role is constructed, it becomes
197 * the single source of truth for the MMU's state.
198 */
199 #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag) \
200 static inline bool __maybe_unused \
201 ____is_##reg##_##name(const struct kvm_mmu_role_regs *regs) \
202 { \
203 return !!(regs->reg & flag); \
204 }
205 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
206 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
207 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
208 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
209 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
210 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
211 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
212 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
213 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
214 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
215
216 /*
217 * The MMU itself (with a valid role) is the single source of truth for the
218 * MMU. Do not use the regs used to build the MMU/role, nor the vCPU. The
219 * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
220 * and the vCPU may be incorrect/irrelevant.
221 */
222 #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name) \
223 static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu) \
224 { \
225 return !!(mmu->cpu_role. base_or_ext . reg##_##name); \
226 }
227 BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
228 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse);
229 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smep);
230 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap);
231 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke);
232 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57);
233 BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
234 BUILD_MMU_ROLE_ACCESSOR(ext, efer, lma);
235
is_cr0_pg(struct kvm_mmu * mmu)236 static inline bool is_cr0_pg(struct kvm_mmu *mmu)
237 {
238 return mmu->cpu_role.base.level > 0;
239 }
240
is_cr4_pae(struct kvm_mmu * mmu)241 static inline bool is_cr4_pae(struct kvm_mmu *mmu)
242 {
243 return !mmu->cpu_role.base.has_4_byte_gpte;
244 }
245
vcpu_to_role_regs(struct kvm_vcpu * vcpu)246 static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
247 {
248 struct kvm_mmu_role_regs regs = {
249 .cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
250 .cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
251 .efer = vcpu->arch.efer,
252 };
253
254 return regs;
255 }
256
get_guest_cr3(struct kvm_vcpu * vcpu)257 static unsigned long get_guest_cr3(struct kvm_vcpu *vcpu)
258 {
259 return kvm_read_cr3(vcpu);
260 }
261
kvm_mmu_get_guest_pgd(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)262 static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu,
263 struct kvm_mmu *mmu)
264 {
265 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3)
266 return kvm_read_cr3(vcpu);
267
268 return mmu->get_guest_pgd(vcpu);
269 }
270
kvm_available_flush_remote_tlbs_range(void)271 static inline bool kvm_available_flush_remote_tlbs_range(void)
272 {
273 #if IS_ENABLED(CONFIG_HYPERV)
274 return kvm_x86_ops.flush_remote_tlbs_range;
275 #else
276 return false;
277 #endif
278 }
279
280 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
281
282 /* Flush the range of guest memory mapped by the given SPTE. */
kvm_flush_remote_tlbs_sptep(struct kvm * kvm,u64 * sptep)283 static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep)
284 {
285 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
286 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep));
287
288 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
289 }
290
mark_mmio_spte(struct kvm_vcpu * vcpu,u64 * sptep,u64 gfn,unsigned int access)291 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
292 unsigned int access)
293 {
294 u64 spte = make_mmio_spte(vcpu, gfn, access);
295
296 trace_mark_mmio_spte(sptep, gfn, spte);
297 mmu_spte_set(sptep, spte);
298 }
299
get_mmio_spte_gfn(u64 spte)300 static gfn_t get_mmio_spte_gfn(u64 spte)
301 {
302 u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
303
304 gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
305 & shadow_nonpresent_or_rsvd_mask;
306
307 return gpa >> PAGE_SHIFT;
308 }
309
get_mmio_spte_access(u64 spte)310 static unsigned get_mmio_spte_access(u64 spte)
311 {
312 return spte & shadow_mmio_access_mask;
313 }
314
check_mmio_spte(struct kvm_vcpu * vcpu,u64 spte)315 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
316 {
317 u64 kvm_gen, spte_gen, gen;
318
319 gen = kvm_vcpu_memslots(vcpu)->generation;
320 if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
321 return false;
322
323 kvm_gen = gen & MMIO_SPTE_GEN_MASK;
324 spte_gen = get_mmio_spte_generation(spte);
325
326 trace_check_mmio_spte(spte, kvm_gen, spte_gen);
327 return likely(kvm_gen == spte_gen);
328 }
329
is_cpuid_PSE36(void)330 static int is_cpuid_PSE36(void)
331 {
332 return 1;
333 }
334
335 #ifdef CONFIG_X86_64
__set_spte(u64 * sptep,u64 spte)336 static void __set_spte(u64 *sptep, u64 spte)
337 {
338 KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
339 WRITE_ONCE(*sptep, spte);
340 }
341
__update_clear_spte_fast(u64 * sptep,u64 spte)342 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
343 {
344 KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
345 WRITE_ONCE(*sptep, spte);
346 }
347
__update_clear_spte_slow(u64 * sptep,u64 spte)348 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
349 {
350 KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
351 return xchg(sptep, spte);
352 }
353
__get_spte_lockless(u64 * sptep)354 static u64 __get_spte_lockless(u64 *sptep)
355 {
356 return READ_ONCE(*sptep);
357 }
358 #else
359 union split_spte {
360 struct {
361 u32 spte_low;
362 u32 spte_high;
363 };
364 u64 spte;
365 };
366
count_spte_clear(u64 * sptep,u64 spte)367 static void count_spte_clear(u64 *sptep, u64 spte)
368 {
369 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
370
371 if (is_shadow_present_pte(spte))
372 return;
373
374 /* Ensure the spte is completely set before we increase the count */
375 smp_wmb();
376 sp->clear_spte_count++;
377 }
378
__set_spte(u64 * sptep,u64 spte)379 static void __set_spte(u64 *sptep, u64 spte)
380 {
381 union split_spte *ssptep, sspte;
382
383 ssptep = (union split_spte *)sptep;
384 sspte = (union split_spte)spte;
385
386 ssptep->spte_high = sspte.spte_high;
387
388 /*
389 * If we map the spte from nonpresent to present, We should store
390 * the high bits firstly, then set present bit, so cpu can not
391 * fetch this spte while we are setting the spte.
392 */
393 smp_wmb();
394
395 WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
396 }
397
__update_clear_spte_fast(u64 * sptep,u64 spte)398 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
399 {
400 union split_spte *ssptep, sspte;
401
402 ssptep = (union split_spte *)sptep;
403 sspte = (union split_spte)spte;
404
405 WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
406
407 /*
408 * If we map the spte from present to nonpresent, we should clear
409 * present bit firstly to avoid vcpu fetch the old high bits.
410 */
411 smp_wmb();
412
413 ssptep->spte_high = sspte.spte_high;
414 count_spte_clear(sptep, spte);
415 }
416
__update_clear_spte_slow(u64 * sptep,u64 spte)417 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
418 {
419 union split_spte *ssptep, sspte, orig;
420
421 ssptep = (union split_spte *)sptep;
422 sspte = (union split_spte)spte;
423
424 /* xchg acts as a barrier before the setting of the high bits */
425 orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
426 orig.spte_high = ssptep->spte_high;
427 ssptep->spte_high = sspte.spte_high;
428 count_spte_clear(sptep, spte);
429
430 return orig.spte;
431 }
432
433 /*
434 * The idea using the light way get the spte on x86_32 guest is from
435 * gup_get_pte (mm/gup.c).
436 *
437 * An spte tlb flush may be pending, because they are coalesced and
438 * we are running out of the MMU lock. Therefore
439 * we need to protect against in-progress updates of the spte.
440 *
441 * Reading the spte while an update is in progress may get the old value
442 * for the high part of the spte. The race is fine for a present->non-present
443 * change (because the high part of the spte is ignored for non-present spte),
444 * but for a present->present change we must reread the spte.
445 *
446 * All such changes are done in two steps (present->non-present and
447 * non-present->present), hence it is enough to count the number of
448 * present->non-present updates: if it changed while reading the spte,
449 * we might have hit the race. This is done using clear_spte_count.
450 */
__get_spte_lockless(u64 * sptep)451 static u64 __get_spte_lockless(u64 *sptep)
452 {
453 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
454 union split_spte spte, *orig = (union split_spte *)sptep;
455 int count;
456
457 retry:
458 count = sp->clear_spte_count;
459 smp_rmb();
460
461 spte.spte_low = orig->spte_low;
462 smp_rmb();
463
464 spte.spte_high = orig->spte_high;
465 smp_rmb();
466
467 if (unlikely(spte.spte_low != orig->spte_low ||
468 count != sp->clear_spte_count))
469 goto retry;
470
471 return spte.spte;
472 }
473 #endif
474
475 /* Rules for using mmu_spte_set:
476 * Set the sptep from nonpresent to present.
477 * Note: the sptep being assigned *must* be either not present
478 * or in a state where the hardware will not attempt to update
479 * the spte.
480 */
mmu_spte_set(u64 * sptep,u64 new_spte)481 static void mmu_spte_set(u64 *sptep, u64 new_spte)
482 {
483 WARN_ON_ONCE(is_shadow_present_pte(*sptep));
484 __set_spte(sptep, new_spte);
485 }
486
487 /* Rules for using mmu_spte_update:
488 * Update the state bits, it means the mapped pfn is not changed.
489 *
490 * Returns true if the TLB needs to be flushed
491 */
mmu_spte_update(u64 * sptep,u64 new_spte)492 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
493 {
494 u64 old_spte = *sptep;
495
496 WARN_ON_ONCE(!is_shadow_present_pte(new_spte));
497 check_spte_writable_invariants(new_spte);
498
499 if (!is_shadow_present_pte(old_spte)) {
500 mmu_spte_set(sptep, new_spte);
501 return false;
502 }
503
504 if (!spte_has_volatile_bits(old_spte))
505 __update_clear_spte_fast(sptep, new_spte);
506 else
507 old_spte = __update_clear_spte_slow(sptep, new_spte);
508
509 WARN_ON_ONCE(!is_shadow_present_pte(old_spte) ||
510 spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
511
512 return leaf_spte_change_needs_tlb_flush(old_spte, new_spte);
513 }
514
515 /*
516 * Rules for using mmu_spte_clear_track_bits:
517 * It sets the sptep from present to nonpresent, and track the
518 * state bits, it is used to clear the last level sptep.
519 * Returns the old PTE.
520 */
mmu_spte_clear_track_bits(struct kvm * kvm,u64 * sptep)521 static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
522 {
523 u64 old_spte = *sptep;
524 int level = sptep_to_sp(sptep)->role.level;
525
526 if (!is_shadow_present_pte(old_spte) ||
527 !spte_has_volatile_bits(old_spte))
528 __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
529 else
530 old_spte = __update_clear_spte_slow(sptep, SHADOW_NONPRESENT_VALUE);
531
532 if (!is_shadow_present_pte(old_spte))
533 return old_spte;
534
535 kvm_update_page_stats(kvm, level, -1);
536 return old_spte;
537 }
538
539 /*
540 * Rules for using mmu_spte_clear_no_track:
541 * Directly clear spte without caring the state bits of sptep,
542 * it is used to set the upper level spte.
543 */
mmu_spte_clear_no_track(u64 * sptep)544 static void mmu_spte_clear_no_track(u64 *sptep)
545 {
546 __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
547 }
548
mmu_spte_get_lockless(u64 * sptep)549 static u64 mmu_spte_get_lockless(u64 *sptep)
550 {
551 return __get_spte_lockless(sptep);
552 }
553
is_tdp_mmu_active(struct kvm_vcpu * vcpu)554 static inline bool is_tdp_mmu_active(struct kvm_vcpu *vcpu)
555 {
556 return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct;
557 }
558
walk_shadow_page_lockless_begin(struct kvm_vcpu * vcpu)559 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
560 {
561 if (is_tdp_mmu_active(vcpu)) {
562 kvm_tdp_mmu_walk_lockless_begin();
563 } else {
564 /*
565 * Prevent page table teardown by making any free-er wait during
566 * kvm_flush_remote_tlbs() IPI to all active vcpus.
567 */
568 local_irq_disable();
569
570 /*
571 * Make sure a following spte read is not reordered ahead of the write
572 * to vcpu->mode.
573 */
574 smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
575 }
576 }
577
walk_shadow_page_lockless_end(struct kvm_vcpu * vcpu)578 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
579 {
580 if (is_tdp_mmu_active(vcpu)) {
581 kvm_tdp_mmu_walk_lockless_end();
582 } else {
583 /*
584 * Make sure the write to vcpu->mode is not reordered in front of
585 * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us
586 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
587 */
588 smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
589 local_irq_enable();
590 }
591 }
592
mmu_topup_memory_caches(struct kvm_vcpu * vcpu,bool maybe_indirect)593 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
594 {
595 int r;
596
597 /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
598 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
599 1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
600 if (r)
601 return r;
602 if (kvm_has_mirrored_tdp(vcpu->kvm)) {
603 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_external_spt_cache,
604 PT64_ROOT_MAX_LEVEL);
605 if (r)
606 return r;
607 }
608 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
609 PT64_ROOT_MAX_LEVEL);
610 if (r)
611 return r;
612 if (maybe_indirect) {
613 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
614 PT64_ROOT_MAX_LEVEL);
615 if (r)
616 return r;
617 }
618 return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
619 PT64_ROOT_MAX_LEVEL);
620 }
621
mmu_free_memory_caches(struct kvm_vcpu * vcpu)622 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
623 {
624 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
625 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
626 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
627 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_external_spt_cache);
628 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
629 }
630
mmu_free_pte_list_desc(struct pte_list_desc * pte_list_desc)631 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
632 {
633 kmem_cache_free(pte_list_desc_cache, pte_list_desc);
634 }
635
636 static bool sp_has_gptes(struct kvm_mmu_page *sp);
637
kvm_mmu_page_get_gfn(struct kvm_mmu_page * sp,int index)638 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
639 {
640 if (sp->role.passthrough)
641 return sp->gfn;
642
643 if (sp->shadowed_translation)
644 return sp->shadowed_translation[index] >> PAGE_SHIFT;
645
646 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
647 }
648
649 /*
650 * For leaf SPTEs, fetch the *guest* access permissions being shadowed. Note
651 * that the SPTE itself may have a more constrained access permissions that
652 * what the guest enforces. For example, a guest may create an executable
653 * huge PTE but KVM may disallow execution to mitigate iTLB multihit.
654 */
kvm_mmu_page_get_access(struct kvm_mmu_page * sp,int index)655 static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
656 {
657 if (sp->shadowed_translation)
658 return sp->shadowed_translation[index] & ACC_ALL;
659
660 /*
661 * For direct MMUs (e.g. TDP or non-paging guests) or passthrough SPs,
662 * KVM is not shadowing any guest page tables, so the "guest access
663 * permissions" are just ACC_ALL.
664 *
665 * For direct SPs in indirect MMUs (shadow paging), i.e. when KVM
666 * is shadowing a guest huge page with small pages, the guest access
667 * permissions being shadowed are the access permissions of the huge
668 * page.
669 *
670 * In both cases, sp->role.access contains the correct access bits.
671 */
672 return sp->role.access;
673 }
674
kvm_mmu_page_set_translation(struct kvm_mmu_page * sp,int index,gfn_t gfn,unsigned int access)675 static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
676 gfn_t gfn, unsigned int access)
677 {
678 if (sp->shadowed_translation) {
679 sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
680 return;
681 }
682
683 WARN_ONCE(access != kvm_mmu_page_get_access(sp, index),
684 "access mismatch under %s page %llx (expected %u, got %u)\n",
685 sp->role.passthrough ? "passthrough" : "direct",
686 sp->gfn, kvm_mmu_page_get_access(sp, index), access);
687
688 WARN_ONCE(gfn != kvm_mmu_page_get_gfn(sp, index),
689 "gfn mismatch under %s page %llx (expected %llx, got %llx)\n",
690 sp->role.passthrough ? "passthrough" : "direct",
691 sp->gfn, kvm_mmu_page_get_gfn(sp, index), gfn);
692 }
693
kvm_mmu_page_set_access(struct kvm_mmu_page * sp,int index,unsigned int access)694 static void kvm_mmu_page_set_access(struct kvm_mmu_page *sp, int index,
695 unsigned int access)
696 {
697 gfn_t gfn = kvm_mmu_page_get_gfn(sp, index);
698
699 kvm_mmu_page_set_translation(sp, index, gfn, access);
700 }
701
702 /*
703 * Return the pointer to the large page information for a given gfn,
704 * handling slots that are not large page aligned.
705 */
lpage_info_slot(gfn_t gfn,const struct kvm_memory_slot * slot,int level)706 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
707 const struct kvm_memory_slot *slot, int level)
708 {
709 unsigned long idx;
710
711 idx = gfn_to_index(gfn, slot->base_gfn, level);
712 return &slot->arch.lpage_info[level - 2][idx];
713 }
714
715 /*
716 * The most significant bit in disallow_lpage tracks whether or not memory
717 * attributes are mixed, i.e. not identical for all gfns at the current level.
718 * The lower order bits are used to refcount other cases where a hugepage is
719 * disallowed, e.g. if KVM has shadow a page table at the gfn.
720 */
721 #define KVM_LPAGE_MIXED_FLAG BIT(31)
722
update_gfn_disallow_lpage_count(const struct kvm_memory_slot * slot,gfn_t gfn,int count)723 static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
724 gfn_t gfn, int count)
725 {
726 struct kvm_lpage_info *linfo;
727 int old, i;
728
729 for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
730 linfo = lpage_info_slot(gfn, slot, i);
731
732 old = linfo->disallow_lpage;
733 linfo->disallow_lpage += count;
734 WARN_ON_ONCE((old ^ linfo->disallow_lpage) & KVM_LPAGE_MIXED_FLAG);
735 }
736 }
737
kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot * slot,gfn_t gfn)738 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
739 {
740 update_gfn_disallow_lpage_count(slot, gfn, 1);
741 }
742
kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot * slot,gfn_t gfn)743 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
744 {
745 update_gfn_disallow_lpage_count(slot, gfn, -1);
746 }
747
account_shadowed(struct kvm * kvm,struct kvm_mmu_page * sp)748 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
749 {
750 struct kvm_memslots *slots;
751 struct kvm_memory_slot *slot;
752 gfn_t gfn;
753
754 kvm->arch.indirect_shadow_pages++;
755 /*
756 * Ensure indirect_shadow_pages is elevated prior to re-reading guest
757 * child PTEs in FNAME(gpte_changed), i.e. guarantee either in-flight
758 * emulated writes are visible before re-reading guest PTEs, or that
759 * an emulated write will see the elevated count and acquire mmu_lock
760 * to update SPTEs. Pairs with the smp_mb() in kvm_mmu_track_write().
761 */
762 smp_mb();
763
764 gfn = sp->gfn;
765 slots = kvm_memslots_for_spte_role(kvm, sp->role);
766 slot = __gfn_to_memslot(slots, gfn);
767
768 /* the non-leaf shadow pages are keeping readonly. */
769 if (sp->role.level > PG_LEVEL_4K)
770 return __kvm_write_track_add_gfn(kvm, slot, gfn);
771
772 kvm_mmu_gfn_disallow_lpage(slot, gfn);
773
774 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
775 kvm_flush_remote_tlbs_gfn(kvm, gfn, PG_LEVEL_4K);
776 }
777
track_possible_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp)778 void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
779 {
780 /*
781 * If it's possible to replace the shadow page with an NX huge page,
782 * i.e. if the shadow page is the only thing currently preventing KVM
783 * from using a huge page, add the shadow page to the list of "to be
784 * zapped for NX recovery" pages. Note, the shadow page can already be
785 * on the list if KVM is reusing an existing shadow page, i.e. if KVM
786 * links a shadow page at multiple points.
787 */
788 if (!list_empty(&sp->possible_nx_huge_page_link))
789 return;
790
791 ++kvm->stat.nx_lpage_splits;
792 list_add_tail(&sp->possible_nx_huge_page_link,
793 &kvm->arch.possible_nx_huge_pages);
794 }
795
account_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp,bool nx_huge_page_possible)796 static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
797 bool nx_huge_page_possible)
798 {
799 sp->nx_huge_page_disallowed = true;
800
801 if (nx_huge_page_possible)
802 track_possible_nx_huge_page(kvm, sp);
803 }
804
unaccount_shadowed(struct kvm * kvm,struct kvm_mmu_page * sp)805 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
806 {
807 struct kvm_memslots *slots;
808 struct kvm_memory_slot *slot;
809 gfn_t gfn;
810
811 kvm->arch.indirect_shadow_pages--;
812 gfn = sp->gfn;
813 slots = kvm_memslots_for_spte_role(kvm, sp->role);
814 slot = __gfn_to_memslot(slots, gfn);
815 if (sp->role.level > PG_LEVEL_4K)
816 return __kvm_write_track_remove_gfn(kvm, slot, gfn);
817
818 kvm_mmu_gfn_allow_lpage(slot, gfn);
819 }
820
untrack_possible_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp)821 void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
822 {
823 if (list_empty(&sp->possible_nx_huge_page_link))
824 return;
825
826 --kvm->stat.nx_lpage_splits;
827 list_del_init(&sp->possible_nx_huge_page_link);
828 }
829
unaccount_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp)830 static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
831 {
832 sp->nx_huge_page_disallowed = false;
833
834 untrack_possible_nx_huge_page(kvm, sp);
835 }
836
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu * vcpu,gfn_t gfn,bool no_dirty_log)837 static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu,
838 gfn_t gfn,
839 bool no_dirty_log)
840 {
841 struct kvm_memory_slot *slot;
842
843 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
844 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
845 return NULL;
846 if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
847 return NULL;
848
849 return slot;
850 }
851
852 /*
853 * About rmap_head encoding:
854 *
855 * If the bit zero of rmap_head->val is clear, then it points to the only spte
856 * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
857 * pte_list_desc containing more mappings.
858 */
859 #define KVM_RMAP_MANY BIT(0)
860
861 /*
862 * Returns the number of pointers in the rmap chain, not counting the new one.
863 */
pte_list_add(struct kvm_mmu_memory_cache * cache,u64 * spte,struct kvm_rmap_head * rmap_head)864 static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
865 struct kvm_rmap_head *rmap_head)
866 {
867 struct pte_list_desc *desc;
868 int count = 0;
869
870 if (!rmap_head->val) {
871 rmap_head->val = (unsigned long)spte;
872 } else if (!(rmap_head->val & KVM_RMAP_MANY)) {
873 desc = kvm_mmu_memory_cache_alloc(cache);
874 desc->sptes[0] = (u64 *)rmap_head->val;
875 desc->sptes[1] = spte;
876 desc->spte_count = 2;
877 desc->tail_count = 0;
878 rmap_head->val = (unsigned long)desc | KVM_RMAP_MANY;
879 ++count;
880 } else {
881 desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
882 count = desc->tail_count + desc->spte_count;
883
884 /*
885 * If the previous head is full, allocate a new head descriptor
886 * as tail descriptors are always kept full.
887 */
888 if (desc->spte_count == PTE_LIST_EXT) {
889 desc = kvm_mmu_memory_cache_alloc(cache);
890 desc->more = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
891 desc->spte_count = 0;
892 desc->tail_count = count;
893 rmap_head->val = (unsigned long)desc | KVM_RMAP_MANY;
894 }
895 desc->sptes[desc->spte_count++] = spte;
896 }
897 return count;
898 }
899
pte_list_desc_remove_entry(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct pte_list_desc * desc,int i)900 static void pte_list_desc_remove_entry(struct kvm *kvm,
901 struct kvm_rmap_head *rmap_head,
902 struct pte_list_desc *desc, int i)
903 {
904 struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
905 int j = head_desc->spte_count - 1;
906
907 /*
908 * The head descriptor should never be empty. A new head is added only
909 * when adding an entry and the previous head is full, and heads are
910 * removed (this flow) when they become empty.
911 */
912 KVM_BUG_ON_DATA_CORRUPTION(j < 0, kvm);
913
914 /*
915 * Replace the to-be-freed SPTE with the last valid entry from the head
916 * descriptor to ensure that tail descriptors are full at all times.
917 * Note, this also means that tail_count is stable for each descriptor.
918 */
919 desc->sptes[i] = head_desc->sptes[j];
920 head_desc->sptes[j] = NULL;
921 head_desc->spte_count--;
922 if (head_desc->spte_count)
923 return;
924
925 /*
926 * The head descriptor is empty. If there are no tail descriptors,
927 * nullify the rmap head to mark the list as empty, else point the rmap
928 * head at the next descriptor, i.e. the new head.
929 */
930 if (!head_desc->more)
931 rmap_head->val = 0;
932 else
933 rmap_head->val = (unsigned long)head_desc->more | KVM_RMAP_MANY;
934 mmu_free_pte_list_desc(head_desc);
935 }
936
pte_list_remove(struct kvm * kvm,u64 * spte,struct kvm_rmap_head * rmap_head)937 static void pte_list_remove(struct kvm *kvm, u64 *spte,
938 struct kvm_rmap_head *rmap_head)
939 {
940 struct pte_list_desc *desc;
941 int i;
942
943 if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_head->val, kvm))
944 return;
945
946 if (!(rmap_head->val & KVM_RMAP_MANY)) {
947 if (KVM_BUG_ON_DATA_CORRUPTION((u64 *)rmap_head->val != spte, kvm))
948 return;
949
950 rmap_head->val = 0;
951 } else {
952 desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
953 while (desc) {
954 for (i = 0; i < desc->spte_count; ++i) {
955 if (desc->sptes[i] == spte) {
956 pte_list_desc_remove_entry(kvm, rmap_head,
957 desc, i);
958 return;
959 }
960 }
961 desc = desc->more;
962 }
963
964 KVM_BUG_ON_DATA_CORRUPTION(true, kvm);
965 }
966 }
967
kvm_zap_one_rmap_spte(struct kvm * kvm,struct kvm_rmap_head * rmap_head,u64 * sptep)968 static void kvm_zap_one_rmap_spte(struct kvm *kvm,
969 struct kvm_rmap_head *rmap_head, u64 *sptep)
970 {
971 mmu_spte_clear_track_bits(kvm, sptep);
972 pte_list_remove(kvm, sptep, rmap_head);
973 }
974
975 /* Return true if at least one SPTE was zapped, false otherwise */
kvm_zap_all_rmap_sptes(struct kvm * kvm,struct kvm_rmap_head * rmap_head)976 static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
977 struct kvm_rmap_head *rmap_head)
978 {
979 struct pte_list_desc *desc, *next;
980 int i;
981
982 if (!rmap_head->val)
983 return false;
984
985 if (!(rmap_head->val & KVM_RMAP_MANY)) {
986 mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
987 goto out;
988 }
989
990 desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
991
992 for (; desc; desc = next) {
993 for (i = 0; i < desc->spte_count; i++)
994 mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
995 next = desc->more;
996 mmu_free_pte_list_desc(desc);
997 }
998 out:
999 /* rmap_head is meaningless now, remember to reset it */
1000 rmap_head->val = 0;
1001 return true;
1002 }
1003
pte_list_count(struct kvm_rmap_head * rmap_head)1004 unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
1005 {
1006 struct pte_list_desc *desc;
1007
1008 if (!rmap_head->val)
1009 return 0;
1010 else if (!(rmap_head->val & KVM_RMAP_MANY))
1011 return 1;
1012
1013 desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
1014 return desc->tail_count + desc->spte_count;
1015 }
1016
gfn_to_rmap(gfn_t gfn,int level,const struct kvm_memory_slot * slot)1017 static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
1018 const struct kvm_memory_slot *slot)
1019 {
1020 unsigned long idx;
1021
1022 idx = gfn_to_index(gfn, slot->base_gfn, level);
1023 return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1024 }
1025
rmap_remove(struct kvm * kvm,u64 * spte)1026 static void rmap_remove(struct kvm *kvm, u64 *spte)
1027 {
1028 struct kvm_memslots *slots;
1029 struct kvm_memory_slot *slot;
1030 struct kvm_mmu_page *sp;
1031 gfn_t gfn;
1032 struct kvm_rmap_head *rmap_head;
1033
1034 sp = sptep_to_sp(spte);
1035 gfn = kvm_mmu_page_get_gfn(sp, spte_index(spte));
1036
1037 /*
1038 * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
1039 * so we have to determine which memslots to use based on context
1040 * information in sp->role.
1041 */
1042 slots = kvm_memslots_for_spte_role(kvm, sp->role);
1043
1044 slot = __gfn_to_memslot(slots, gfn);
1045 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1046
1047 pte_list_remove(kvm, spte, rmap_head);
1048 }
1049
1050 /*
1051 * Used by the following functions to iterate through the sptes linked by a
1052 * rmap. All fields are private and not assumed to be used outside.
1053 */
1054 struct rmap_iterator {
1055 /* private fields */
1056 struct pte_list_desc *desc; /* holds the sptep if not NULL */
1057 int pos; /* index of the sptep */
1058 };
1059
1060 /*
1061 * Iteration must be started by this function. This should also be used after
1062 * removing/dropping sptes from the rmap link because in such cases the
1063 * information in the iterator may not be valid.
1064 *
1065 * Returns sptep if found, NULL otherwise.
1066 */
rmap_get_first(struct kvm_rmap_head * rmap_head,struct rmap_iterator * iter)1067 static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1068 struct rmap_iterator *iter)
1069 {
1070 u64 *sptep;
1071
1072 if (!rmap_head->val)
1073 return NULL;
1074
1075 if (!(rmap_head->val & KVM_RMAP_MANY)) {
1076 iter->desc = NULL;
1077 sptep = (u64 *)rmap_head->val;
1078 goto out;
1079 }
1080
1081 iter->desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
1082 iter->pos = 0;
1083 sptep = iter->desc->sptes[iter->pos];
1084 out:
1085 BUG_ON(!is_shadow_present_pte(*sptep));
1086 return sptep;
1087 }
1088
1089 /*
1090 * Must be used with a valid iterator: e.g. after rmap_get_first().
1091 *
1092 * Returns sptep if found, NULL otherwise.
1093 */
rmap_get_next(struct rmap_iterator * iter)1094 static u64 *rmap_get_next(struct rmap_iterator *iter)
1095 {
1096 u64 *sptep;
1097
1098 if (iter->desc) {
1099 if (iter->pos < PTE_LIST_EXT - 1) {
1100 ++iter->pos;
1101 sptep = iter->desc->sptes[iter->pos];
1102 if (sptep)
1103 goto out;
1104 }
1105
1106 iter->desc = iter->desc->more;
1107
1108 if (iter->desc) {
1109 iter->pos = 0;
1110 /* desc->sptes[0] cannot be NULL */
1111 sptep = iter->desc->sptes[iter->pos];
1112 goto out;
1113 }
1114 }
1115
1116 return NULL;
1117 out:
1118 BUG_ON(!is_shadow_present_pte(*sptep));
1119 return sptep;
1120 }
1121
1122 #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \
1123 for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \
1124 _spte_; _spte_ = rmap_get_next(_iter_))
1125
drop_spte(struct kvm * kvm,u64 * sptep)1126 static void drop_spte(struct kvm *kvm, u64 *sptep)
1127 {
1128 u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
1129
1130 if (is_shadow_present_pte(old_spte))
1131 rmap_remove(kvm, sptep);
1132 }
1133
drop_large_spte(struct kvm * kvm,u64 * sptep,bool flush)1134 static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush)
1135 {
1136 struct kvm_mmu_page *sp;
1137
1138 sp = sptep_to_sp(sptep);
1139 WARN_ON_ONCE(sp->role.level == PG_LEVEL_4K);
1140
1141 drop_spte(kvm, sptep);
1142
1143 if (flush)
1144 kvm_flush_remote_tlbs_sptep(kvm, sptep);
1145 }
1146
1147 /*
1148 * Write-protect on the specified @sptep, @pt_protect indicates whether
1149 * spte write-protection is caused by protecting shadow page table.
1150 *
1151 * Note: write protection is difference between dirty logging and spte
1152 * protection:
1153 * - for dirty logging, the spte can be set to writable at anytime if
1154 * its dirty bitmap is properly set.
1155 * - for spte protection, the spte can be writable only after unsync-ing
1156 * shadow page.
1157 *
1158 * Return true if tlb need be flushed.
1159 */
spte_write_protect(u64 * sptep,bool pt_protect)1160 static bool spte_write_protect(u64 *sptep, bool pt_protect)
1161 {
1162 u64 spte = *sptep;
1163
1164 if (!is_writable_pte(spte) &&
1165 !(pt_protect && is_mmu_writable_spte(spte)))
1166 return false;
1167
1168 if (pt_protect)
1169 spte &= ~shadow_mmu_writable_mask;
1170 spte = spte & ~PT_WRITABLE_MASK;
1171
1172 return mmu_spte_update(sptep, spte);
1173 }
1174
rmap_write_protect(struct kvm_rmap_head * rmap_head,bool pt_protect)1175 static bool rmap_write_protect(struct kvm_rmap_head *rmap_head,
1176 bool pt_protect)
1177 {
1178 u64 *sptep;
1179 struct rmap_iterator iter;
1180 bool flush = false;
1181
1182 for_each_rmap_spte(rmap_head, &iter, sptep)
1183 flush |= spte_write_protect(sptep, pt_protect);
1184
1185 return flush;
1186 }
1187
spte_clear_dirty(u64 * sptep)1188 static bool spte_clear_dirty(u64 *sptep)
1189 {
1190 u64 spte = *sptep;
1191
1192 KVM_MMU_WARN_ON(!spte_ad_enabled(spte));
1193 spte &= ~shadow_dirty_mask;
1194 return mmu_spte_update(sptep, spte);
1195 }
1196
1197 /*
1198 * Gets the GFN ready for another round of dirty logging by clearing the
1199 * - D bit on ad-enabled SPTEs, and
1200 * - W bit on ad-disabled SPTEs.
1201 * Returns true iff any D or W bits were cleared.
1202 */
__rmap_clear_dirty(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)1203 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1204 const struct kvm_memory_slot *slot)
1205 {
1206 u64 *sptep;
1207 struct rmap_iterator iter;
1208 bool flush = false;
1209
1210 for_each_rmap_spte(rmap_head, &iter, sptep)
1211 if (spte_ad_need_write_protect(*sptep))
1212 flush |= test_and_clear_bit(PT_WRITABLE_SHIFT,
1213 (unsigned long *)sptep);
1214 else
1215 flush |= spte_clear_dirty(sptep);
1216
1217 return flush;
1218 }
1219
kvm_mmu_write_protect_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1220 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1221 struct kvm_memory_slot *slot,
1222 gfn_t gfn_offset, unsigned long mask)
1223 {
1224 struct kvm_rmap_head *rmap_head;
1225
1226 if (tdp_mmu_enabled)
1227 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1228 slot->base_gfn + gfn_offset, mask, true);
1229
1230 if (!kvm_memslots_have_rmaps(kvm))
1231 return;
1232
1233 while (mask) {
1234 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1235 PG_LEVEL_4K, slot);
1236 rmap_write_protect(rmap_head, false);
1237
1238 /* clear the first set bit */
1239 mask &= mask - 1;
1240 }
1241 }
1242
kvm_mmu_clear_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1243 static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1244 struct kvm_memory_slot *slot,
1245 gfn_t gfn_offset, unsigned long mask)
1246 {
1247 struct kvm_rmap_head *rmap_head;
1248
1249 if (tdp_mmu_enabled)
1250 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1251 slot->base_gfn + gfn_offset, mask, false);
1252
1253 if (!kvm_memslots_have_rmaps(kvm))
1254 return;
1255
1256 while (mask) {
1257 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1258 PG_LEVEL_4K, slot);
1259 __rmap_clear_dirty(kvm, rmap_head, slot);
1260
1261 /* clear the first set bit */
1262 mask &= mask - 1;
1263 }
1264 }
1265
kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1266 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1267 struct kvm_memory_slot *slot,
1268 gfn_t gfn_offset, unsigned long mask)
1269 {
1270 /*
1271 * If the slot was assumed to be "initially all dirty", write-protect
1272 * huge pages to ensure they are split to 4KiB on the first write (KVM
1273 * dirty logs at 4KiB granularity). If eager page splitting is enabled,
1274 * immediately try to split huge pages, e.g. so that vCPUs don't get
1275 * saddled with the cost of splitting.
1276 *
1277 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
1278 * of memslot has no such restriction, so the range can cross two large
1279 * pages.
1280 */
1281 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
1282 gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
1283 gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
1284
1285 if (READ_ONCE(eager_page_split))
1286 kvm_mmu_try_split_huge_pages(kvm, slot, start, end + 1, PG_LEVEL_4K);
1287
1288 kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
1289
1290 /* Cross two large pages? */
1291 if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
1292 ALIGN(end << PAGE_SHIFT, PMD_SIZE))
1293 kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
1294 PG_LEVEL_2M);
1295 }
1296
1297 /*
1298 * (Re)Enable dirty logging for all 4KiB SPTEs that map the GFNs in
1299 * mask. If PML is enabled and the GFN doesn't need to be write-
1300 * protected for other reasons, e.g. shadow paging, clear the Dirty bit.
1301 * Otherwise clear the Writable bit.
1302 *
1303 * Note that kvm_mmu_clear_dirty_pt_masked() is called whenever PML is
1304 * enabled but it chooses between clearing the Dirty bit and Writeable
1305 * bit based on the context.
1306 */
1307 if (kvm_x86_ops.cpu_dirty_log_size)
1308 kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1309 else
1310 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1311 }
1312
kvm_cpu_dirty_log_size(void)1313 int kvm_cpu_dirty_log_size(void)
1314 {
1315 return kvm_x86_ops.cpu_dirty_log_size;
1316 }
1317
kvm_mmu_slot_gfn_write_protect(struct kvm * kvm,struct kvm_memory_slot * slot,u64 gfn,int min_level)1318 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1319 struct kvm_memory_slot *slot, u64 gfn,
1320 int min_level)
1321 {
1322 struct kvm_rmap_head *rmap_head;
1323 int i;
1324 bool write_protected = false;
1325
1326 if (kvm_memslots_have_rmaps(kvm)) {
1327 for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1328 rmap_head = gfn_to_rmap(gfn, i, slot);
1329 write_protected |= rmap_write_protect(rmap_head, true);
1330 }
1331 }
1332
1333 if (tdp_mmu_enabled)
1334 write_protected |=
1335 kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
1336
1337 return write_protected;
1338 }
1339
kvm_vcpu_write_protect_gfn(struct kvm_vcpu * vcpu,u64 gfn)1340 static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
1341 {
1342 struct kvm_memory_slot *slot;
1343
1344 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1345 return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1346 }
1347
kvm_zap_rmap(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)1348 static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1349 const struct kvm_memory_slot *slot)
1350 {
1351 return kvm_zap_all_rmap_sptes(kvm, rmap_head);
1352 }
1353
1354 struct slot_rmap_walk_iterator {
1355 /* input fields. */
1356 const struct kvm_memory_slot *slot;
1357 gfn_t start_gfn;
1358 gfn_t end_gfn;
1359 int start_level;
1360 int end_level;
1361
1362 /* output fields. */
1363 gfn_t gfn;
1364 struct kvm_rmap_head *rmap;
1365 int level;
1366
1367 /* private field. */
1368 struct kvm_rmap_head *end_rmap;
1369 };
1370
rmap_walk_init_level(struct slot_rmap_walk_iterator * iterator,int level)1371 static void rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator,
1372 int level)
1373 {
1374 iterator->level = level;
1375 iterator->gfn = iterator->start_gfn;
1376 iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
1377 iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
1378 }
1379
slot_rmap_walk_init(struct slot_rmap_walk_iterator * iterator,const struct kvm_memory_slot * slot,int start_level,int end_level,gfn_t start_gfn,gfn_t end_gfn)1380 static void slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1381 const struct kvm_memory_slot *slot,
1382 int start_level, int end_level,
1383 gfn_t start_gfn, gfn_t end_gfn)
1384 {
1385 iterator->slot = slot;
1386 iterator->start_level = start_level;
1387 iterator->end_level = end_level;
1388 iterator->start_gfn = start_gfn;
1389 iterator->end_gfn = end_gfn;
1390
1391 rmap_walk_init_level(iterator, iterator->start_level);
1392 }
1393
slot_rmap_walk_okay(struct slot_rmap_walk_iterator * iterator)1394 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1395 {
1396 return !!iterator->rmap;
1397 }
1398
slot_rmap_walk_next(struct slot_rmap_walk_iterator * iterator)1399 static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1400 {
1401 while (++iterator->rmap <= iterator->end_rmap) {
1402 iterator->gfn += KVM_PAGES_PER_HPAGE(iterator->level);
1403
1404 if (iterator->rmap->val)
1405 return;
1406 }
1407
1408 if (++iterator->level > iterator->end_level) {
1409 iterator->rmap = NULL;
1410 return;
1411 }
1412
1413 rmap_walk_init_level(iterator, iterator->level);
1414 }
1415
1416 #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_, \
1417 _start_gfn, _end_gfn, _iter_) \
1418 for (slot_rmap_walk_init(_iter_, _slot_, _start_level_, \
1419 _end_level_, _start_gfn, _end_gfn); \
1420 slot_rmap_walk_okay(_iter_); \
1421 slot_rmap_walk_next(_iter_))
1422
1423 /* The return value indicates if tlb flush on all vcpus is needed. */
1424 typedef bool (*slot_rmaps_handler) (struct kvm *kvm,
1425 struct kvm_rmap_head *rmap_head,
1426 const struct kvm_memory_slot *slot);
1427
__walk_slot_rmaps(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,int start_level,int end_level,gfn_t start_gfn,gfn_t end_gfn,bool can_yield,bool flush_on_yield,bool flush)1428 static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
1429 const struct kvm_memory_slot *slot,
1430 slot_rmaps_handler fn,
1431 int start_level, int end_level,
1432 gfn_t start_gfn, gfn_t end_gfn,
1433 bool can_yield, bool flush_on_yield,
1434 bool flush)
1435 {
1436 struct slot_rmap_walk_iterator iterator;
1437
1438 lockdep_assert_held_write(&kvm->mmu_lock);
1439
1440 for_each_slot_rmap_range(slot, start_level, end_level, start_gfn,
1441 end_gfn, &iterator) {
1442 if (iterator.rmap)
1443 flush |= fn(kvm, iterator.rmap, slot);
1444
1445 if (!can_yield)
1446 continue;
1447
1448 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
1449 if (flush && flush_on_yield) {
1450 kvm_flush_remote_tlbs_range(kvm, start_gfn,
1451 iterator.gfn - start_gfn + 1);
1452 flush = false;
1453 }
1454 cond_resched_rwlock_write(&kvm->mmu_lock);
1455 }
1456 }
1457
1458 return flush;
1459 }
1460
walk_slot_rmaps(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,int start_level,int end_level,bool flush_on_yield)1461 static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
1462 const struct kvm_memory_slot *slot,
1463 slot_rmaps_handler fn,
1464 int start_level, int end_level,
1465 bool flush_on_yield)
1466 {
1467 return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
1468 slot->base_gfn, slot->base_gfn + slot->npages - 1,
1469 true, flush_on_yield, false);
1470 }
1471
walk_slot_rmaps_4k(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,bool flush_on_yield)1472 static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
1473 const struct kvm_memory_slot *slot,
1474 slot_rmaps_handler fn,
1475 bool flush_on_yield)
1476 {
1477 return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield);
1478 }
1479
__kvm_rmap_zap_gfn_range(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t start,gfn_t end,bool can_yield,bool flush)1480 static bool __kvm_rmap_zap_gfn_range(struct kvm *kvm,
1481 const struct kvm_memory_slot *slot,
1482 gfn_t start, gfn_t end, bool can_yield,
1483 bool flush)
1484 {
1485 return __walk_slot_rmaps(kvm, slot, kvm_zap_rmap,
1486 PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
1487 start, end - 1, can_yield, true, flush);
1488 }
1489
kvm_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range)1490 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1491 {
1492 bool flush = false;
1493
1494 /*
1495 * To prevent races with vCPUs faulting in a gfn using stale data,
1496 * zapping a gfn range must be protected by mmu_invalidate_in_progress
1497 * (and mmu_invalidate_seq). The only exception is memslot deletion;
1498 * in that case, SRCU synchronization ensures that SPTEs are zapped
1499 * after all vCPUs have unlocked SRCU, guaranteeing that vCPUs see the
1500 * invalid slot.
1501 */
1502 lockdep_assert_once(kvm->mmu_invalidate_in_progress ||
1503 lockdep_is_held(&kvm->slots_lock));
1504
1505 if (kvm_memslots_have_rmaps(kvm))
1506 flush = __kvm_rmap_zap_gfn_range(kvm, range->slot,
1507 range->start, range->end,
1508 range->may_block, flush);
1509
1510 if (tdp_mmu_enabled)
1511 flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1512
1513 if (kvm_x86_ops.set_apic_access_page_addr &&
1514 range->slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT)
1515 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
1516
1517 return flush;
1518 }
1519
1520 #define RMAP_RECYCLE_THRESHOLD 1000
1521
__rmap_add(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,const struct kvm_memory_slot * slot,u64 * spte,gfn_t gfn,unsigned int access)1522 static void __rmap_add(struct kvm *kvm,
1523 struct kvm_mmu_memory_cache *cache,
1524 const struct kvm_memory_slot *slot,
1525 u64 *spte, gfn_t gfn, unsigned int access)
1526 {
1527 struct kvm_mmu_page *sp;
1528 struct kvm_rmap_head *rmap_head;
1529 int rmap_count;
1530
1531 sp = sptep_to_sp(spte);
1532 kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access);
1533 kvm_update_page_stats(kvm, sp->role.level, 1);
1534
1535 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1536 rmap_count = pte_list_add(cache, spte, rmap_head);
1537
1538 if (rmap_count > kvm->stat.max_mmu_rmap_size)
1539 kvm->stat.max_mmu_rmap_size = rmap_count;
1540 if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
1541 kvm_zap_all_rmap_sptes(kvm, rmap_head);
1542 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
1543 }
1544 }
1545
rmap_add(struct kvm_vcpu * vcpu,const struct kvm_memory_slot * slot,u64 * spte,gfn_t gfn,unsigned int access)1546 static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
1547 u64 *spte, gfn_t gfn, unsigned int access)
1548 {
1549 struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_pte_list_desc_cache;
1550
1551 __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
1552 }
1553
kvm_rmap_age_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range,bool test_only)1554 static bool kvm_rmap_age_gfn_range(struct kvm *kvm,
1555 struct kvm_gfn_range *range, bool test_only)
1556 {
1557 struct slot_rmap_walk_iterator iterator;
1558 struct rmap_iterator iter;
1559 bool young = false;
1560 u64 *sptep;
1561
1562 for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
1563 range->start, range->end - 1, &iterator) {
1564 for_each_rmap_spte(iterator.rmap, &iter, sptep) {
1565 u64 spte = *sptep;
1566
1567 if (!is_accessed_spte(spte))
1568 continue;
1569
1570 if (test_only)
1571 return true;
1572
1573 if (spte_ad_enabled(spte)) {
1574 clear_bit((ffs(shadow_accessed_mask) - 1),
1575 (unsigned long *)sptep);
1576 } else {
1577 /*
1578 * WARN if mmu_spte_update() signals the need
1579 * for a TLB flush, as Access tracking a SPTE
1580 * should never trigger an _immediate_ flush.
1581 */
1582 spte = mark_spte_for_access_track(spte);
1583 WARN_ON_ONCE(mmu_spte_update(sptep, spte));
1584 }
1585 young = true;
1586 }
1587 }
1588 return young;
1589 }
1590
kvm_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)1591 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1592 {
1593 bool young = false;
1594
1595 if (kvm_memslots_have_rmaps(kvm))
1596 young = kvm_rmap_age_gfn_range(kvm, range, false);
1597
1598 if (tdp_mmu_enabled)
1599 young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
1600
1601 return young;
1602 }
1603
kvm_test_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)1604 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1605 {
1606 bool young = false;
1607
1608 if (kvm_memslots_have_rmaps(kvm))
1609 young = kvm_rmap_age_gfn_range(kvm, range, true);
1610
1611 if (tdp_mmu_enabled)
1612 young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
1613
1614 return young;
1615 }
1616
kvm_mmu_check_sptes_at_free(struct kvm_mmu_page * sp)1617 static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp)
1618 {
1619 #ifdef CONFIG_KVM_PROVE_MMU
1620 int i;
1621
1622 for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
1623 if (KVM_MMU_WARN_ON(is_shadow_present_pte(sp->spt[i])))
1624 pr_err_ratelimited("SPTE %llx (@ %p) for gfn %llx shadow-present at free",
1625 sp->spt[i], &sp->spt[i],
1626 kvm_mmu_page_get_gfn(sp, i));
1627 }
1628 #endif
1629 }
1630
kvm_account_mmu_page(struct kvm * kvm,struct kvm_mmu_page * sp)1631 static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1632 {
1633 kvm->arch.n_used_mmu_pages++;
1634 kvm_account_pgtable_pages((void *)sp->spt, +1);
1635 }
1636
kvm_unaccount_mmu_page(struct kvm * kvm,struct kvm_mmu_page * sp)1637 static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1638 {
1639 kvm->arch.n_used_mmu_pages--;
1640 kvm_account_pgtable_pages((void *)sp->spt, -1);
1641 }
1642
kvm_mmu_free_shadow_page(struct kvm_mmu_page * sp)1643 static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
1644 {
1645 kvm_mmu_check_sptes_at_free(sp);
1646
1647 hlist_del(&sp->hash_link);
1648 list_del(&sp->link);
1649 free_page((unsigned long)sp->spt);
1650 free_page((unsigned long)sp->shadowed_translation);
1651 kmem_cache_free(mmu_page_header_cache, sp);
1652 }
1653
kvm_page_table_hashfn(gfn_t gfn)1654 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1655 {
1656 return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1657 }
1658
mmu_page_add_parent_pte(struct kvm_mmu_memory_cache * cache,struct kvm_mmu_page * sp,u64 * parent_pte)1659 static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache,
1660 struct kvm_mmu_page *sp, u64 *parent_pte)
1661 {
1662 if (!parent_pte)
1663 return;
1664
1665 pte_list_add(cache, parent_pte, &sp->parent_ptes);
1666 }
1667
mmu_page_remove_parent_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * parent_pte)1668 static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
1669 u64 *parent_pte)
1670 {
1671 pte_list_remove(kvm, parent_pte, &sp->parent_ptes);
1672 }
1673
drop_parent_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * parent_pte)1674 static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
1675 u64 *parent_pte)
1676 {
1677 mmu_page_remove_parent_pte(kvm, sp, parent_pte);
1678 mmu_spte_clear_no_track(parent_pte);
1679 }
1680
1681 static void mark_unsync(u64 *spte);
kvm_mmu_mark_parents_unsync(struct kvm_mmu_page * sp)1682 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1683 {
1684 u64 *sptep;
1685 struct rmap_iterator iter;
1686
1687 for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1688 mark_unsync(sptep);
1689 }
1690 }
1691
mark_unsync(u64 * spte)1692 static void mark_unsync(u64 *spte)
1693 {
1694 struct kvm_mmu_page *sp;
1695
1696 sp = sptep_to_sp(spte);
1697 if (__test_and_set_bit(spte_index(spte), sp->unsync_child_bitmap))
1698 return;
1699 if (sp->unsync_children++)
1700 return;
1701 kvm_mmu_mark_parents_unsync(sp);
1702 }
1703
1704 #define KVM_PAGE_ARRAY_NR 16
1705
1706 struct kvm_mmu_pages {
1707 struct mmu_page_and_offset {
1708 struct kvm_mmu_page *sp;
1709 unsigned int idx;
1710 } page[KVM_PAGE_ARRAY_NR];
1711 unsigned int nr;
1712 };
1713
mmu_pages_add(struct kvm_mmu_pages * pvec,struct kvm_mmu_page * sp,int idx)1714 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1715 int idx)
1716 {
1717 int i;
1718
1719 if (sp->unsync)
1720 for (i=0; i < pvec->nr; i++)
1721 if (pvec->page[i].sp == sp)
1722 return 0;
1723
1724 pvec->page[pvec->nr].sp = sp;
1725 pvec->page[pvec->nr].idx = idx;
1726 pvec->nr++;
1727 return (pvec->nr == KVM_PAGE_ARRAY_NR);
1728 }
1729
clear_unsync_child_bit(struct kvm_mmu_page * sp,int idx)1730 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1731 {
1732 --sp->unsync_children;
1733 WARN_ON_ONCE((int)sp->unsync_children < 0);
1734 __clear_bit(idx, sp->unsync_child_bitmap);
1735 }
1736
__mmu_unsync_walk(struct kvm_mmu_page * sp,struct kvm_mmu_pages * pvec)1737 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1738 struct kvm_mmu_pages *pvec)
1739 {
1740 int i, ret, nr_unsync_leaf = 0;
1741
1742 for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1743 struct kvm_mmu_page *child;
1744 u64 ent = sp->spt[i];
1745
1746 if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1747 clear_unsync_child_bit(sp, i);
1748 continue;
1749 }
1750
1751 child = spte_to_child_sp(ent);
1752
1753 if (child->unsync_children) {
1754 if (mmu_pages_add(pvec, child, i))
1755 return -ENOSPC;
1756
1757 ret = __mmu_unsync_walk(child, pvec);
1758 if (!ret) {
1759 clear_unsync_child_bit(sp, i);
1760 continue;
1761 } else if (ret > 0) {
1762 nr_unsync_leaf += ret;
1763 } else
1764 return ret;
1765 } else if (child->unsync) {
1766 nr_unsync_leaf++;
1767 if (mmu_pages_add(pvec, child, i))
1768 return -ENOSPC;
1769 } else
1770 clear_unsync_child_bit(sp, i);
1771 }
1772
1773 return nr_unsync_leaf;
1774 }
1775
1776 #define INVALID_INDEX (-1)
1777
mmu_unsync_walk(struct kvm_mmu_page * sp,struct kvm_mmu_pages * pvec)1778 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1779 struct kvm_mmu_pages *pvec)
1780 {
1781 pvec->nr = 0;
1782 if (!sp->unsync_children)
1783 return 0;
1784
1785 mmu_pages_add(pvec, sp, INVALID_INDEX);
1786 return __mmu_unsync_walk(sp, pvec);
1787 }
1788
kvm_unlink_unsync_page(struct kvm * kvm,struct kvm_mmu_page * sp)1789 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1790 {
1791 WARN_ON_ONCE(!sp->unsync);
1792 trace_kvm_mmu_sync_page(sp);
1793 sp->unsync = 0;
1794 --kvm->stat.mmu_unsync;
1795 }
1796
1797 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1798 struct list_head *invalid_list);
1799 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1800 struct list_head *invalid_list);
1801
sp_has_gptes(struct kvm_mmu_page * sp)1802 static bool sp_has_gptes(struct kvm_mmu_page *sp)
1803 {
1804 if (sp->role.direct)
1805 return false;
1806
1807 if (sp->role.passthrough)
1808 return false;
1809
1810 return true;
1811 }
1812
1813 #define for_each_valid_sp(_kvm, _sp, _list) \
1814 hlist_for_each_entry(_sp, _list, hash_link) \
1815 if (is_obsolete_sp((_kvm), (_sp))) { \
1816 } else
1817
1818 #define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \
1819 for_each_valid_sp(_kvm, _sp, \
1820 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
1821 if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
1822
kvm_sync_page_check(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp)1823 static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1824 {
1825 union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
1826
1827 /*
1828 * Ignore various flags when verifying that it's safe to sync a shadow
1829 * page using the current MMU context.
1830 *
1831 * - level: not part of the overall MMU role and will never match as the MMU's
1832 * level tracks the root level
1833 * - access: updated based on the new guest PTE
1834 * - quadrant: not part of the overall MMU role (similar to level)
1835 */
1836 const union kvm_mmu_page_role sync_role_ign = {
1837 .level = 0xf,
1838 .access = 0x7,
1839 .quadrant = 0x3,
1840 .passthrough = 0x1,
1841 };
1842
1843 /*
1844 * Direct pages can never be unsync, and KVM should never attempt to
1845 * sync a shadow page for a different MMU context, e.g. if the role
1846 * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
1847 * reserved bits checks will be wrong, etc...
1848 */
1849 if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
1850 (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
1851 return false;
1852
1853 return true;
1854 }
1855
kvm_sync_spte(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,int i)1856 static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
1857 {
1858 /* sp->spt[i] has initial value of shadow page table allocation */
1859 if (sp->spt[i] == SHADOW_NONPRESENT_VALUE)
1860 return 0;
1861
1862 return vcpu->arch.mmu->sync_spte(vcpu, sp, i);
1863 }
1864
__kvm_sync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp)1865 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1866 {
1867 int flush = 0;
1868 int i;
1869
1870 if (!kvm_sync_page_check(vcpu, sp))
1871 return -1;
1872
1873 for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
1874 int ret = kvm_sync_spte(vcpu, sp, i);
1875
1876 if (ret < -1)
1877 return -1;
1878 flush |= ret;
1879 }
1880
1881 /*
1882 * Note, any flush is purely for KVM's correctness, e.g. when dropping
1883 * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
1884 * unmap or dirty logging event doesn't fail to flush. The guest is
1885 * responsible for flushing the TLB to ensure any changes in protection
1886 * bits are recognized, i.e. until the guest flushes or page faults on
1887 * a relevant address, KVM is architecturally allowed to let vCPUs use
1888 * cached translations with the old protection bits.
1889 */
1890 return flush;
1891 }
1892
kvm_sync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,struct list_head * invalid_list)1893 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1894 struct list_head *invalid_list)
1895 {
1896 int ret = __kvm_sync_page(vcpu, sp);
1897
1898 if (ret < 0)
1899 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1900 return ret;
1901 }
1902
kvm_mmu_remote_flush_or_zap(struct kvm * kvm,struct list_head * invalid_list,bool remote_flush)1903 static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
1904 struct list_head *invalid_list,
1905 bool remote_flush)
1906 {
1907 if (!remote_flush && list_empty(invalid_list))
1908 return false;
1909
1910 if (!list_empty(invalid_list))
1911 kvm_mmu_commit_zap_page(kvm, invalid_list);
1912 else
1913 kvm_flush_remote_tlbs(kvm);
1914 return true;
1915 }
1916
is_obsolete_sp(struct kvm * kvm,struct kvm_mmu_page * sp)1917 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
1918 {
1919 if (sp->role.invalid)
1920 return true;
1921
1922 /* TDP MMU pages do not use the MMU generation. */
1923 return !is_tdp_mmu_page(sp) &&
1924 unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1925 }
1926
1927 struct mmu_page_path {
1928 struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
1929 unsigned int idx[PT64_ROOT_MAX_LEVEL];
1930 };
1931
1932 #define for_each_sp(pvec, sp, parents, i) \
1933 for (i = mmu_pages_first(&pvec, &parents); \
1934 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
1935 i = mmu_pages_next(&pvec, &parents, i))
1936
mmu_pages_next(struct kvm_mmu_pages * pvec,struct mmu_page_path * parents,int i)1937 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1938 struct mmu_page_path *parents,
1939 int i)
1940 {
1941 int n;
1942
1943 for (n = i+1; n < pvec->nr; n++) {
1944 struct kvm_mmu_page *sp = pvec->page[n].sp;
1945 unsigned idx = pvec->page[n].idx;
1946 int level = sp->role.level;
1947
1948 parents->idx[level-1] = idx;
1949 if (level == PG_LEVEL_4K)
1950 break;
1951
1952 parents->parent[level-2] = sp;
1953 }
1954
1955 return n;
1956 }
1957
mmu_pages_first(struct kvm_mmu_pages * pvec,struct mmu_page_path * parents)1958 static int mmu_pages_first(struct kvm_mmu_pages *pvec,
1959 struct mmu_page_path *parents)
1960 {
1961 struct kvm_mmu_page *sp;
1962 int level;
1963
1964 if (pvec->nr == 0)
1965 return 0;
1966
1967 WARN_ON_ONCE(pvec->page[0].idx != INVALID_INDEX);
1968
1969 sp = pvec->page[0].sp;
1970 level = sp->role.level;
1971 WARN_ON_ONCE(level == PG_LEVEL_4K);
1972
1973 parents->parent[level-2] = sp;
1974
1975 /* Also set up a sentinel. Further entries in pvec are all
1976 * children of sp, so this element is never overwritten.
1977 */
1978 parents->parent[level-1] = NULL;
1979 return mmu_pages_next(pvec, parents, 0);
1980 }
1981
mmu_pages_clear_parents(struct mmu_page_path * parents)1982 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1983 {
1984 struct kvm_mmu_page *sp;
1985 unsigned int level = 0;
1986
1987 do {
1988 unsigned int idx = parents->idx[level];
1989 sp = parents->parent[level];
1990 if (!sp)
1991 return;
1992
1993 WARN_ON_ONCE(idx == INVALID_INDEX);
1994 clear_unsync_child_bit(sp, idx);
1995 level++;
1996 } while (!sp->unsync_children);
1997 }
1998
mmu_sync_children(struct kvm_vcpu * vcpu,struct kvm_mmu_page * parent,bool can_yield)1999 static int mmu_sync_children(struct kvm_vcpu *vcpu,
2000 struct kvm_mmu_page *parent, bool can_yield)
2001 {
2002 int i;
2003 struct kvm_mmu_page *sp;
2004 struct mmu_page_path parents;
2005 struct kvm_mmu_pages pages;
2006 LIST_HEAD(invalid_list);
2007 bool flush = false;
2008
2009 while (mmu_unsync_walk(parent, &pages)) {
2010 bool protected = false;
2011
2012 for_each_sp(pages, sp, parents, i)
2013 protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn);
2014
2015 if (protected) {
2016 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true);
2017 flush = false;
2018 }
2019
2020 for_each_sp(pages, sp, parents, i) {
2021 kvm_unlink_unsync_page(vcpu->kvm, sp);
2022 flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0;
2023 mmu_pages_clear_parents(&parents);
2024 }
2025 if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2026 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2027 if (!can_yield) {
2028 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2029 return -EINTR;
2030 }
2031
2032 cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2033 flush = false;
2034 }
2035 }
2036
2037 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2038 return 0;
2039 }
2040
__clear_sp_write_flooding_count(struct kvm_mmu_page * sp)2041 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
2042 {
2043 atomic_set(&sp->write_flooding_count, 0);
2044 }
2045
clear_sp_write_flooding_count(u64 * spte)2046 static void clear_sp_write_flooding_count(u64 *spte)
2047 {
2048 __clear_sp_write_flooding_count(sptep_to_sp(spte));
2049 }
2050
2051 /*
2052 * The vCPU is required when finding indirect shadow pages; the shadow
2053 * page may already exist and syncing it needs the vCPU pointer in
2054 * order to read guest page tables. Direct shadow pages are never
2055 * unsync, thus @vcpu can be NULL if @role.direct is true.
2056 */
kvm_mmu_find_shadow_page(struct kvm * kvm,struct kvm_vcpu * vcpu,gfn_t gfn,struct hlist_head * sp_list,union kvm_mmu_page_role role)2057 static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm,
2058 struct kvm_vcpu *vcpu,
2059 gfn_t gfn,
2060 struct hlist_head *sp_list,
2061 union kvm_mmu_page_role role)
2062 {
2063 struct kvm_mmu_page *sp;
2064 int ret;
2065 int collisions = 0;
2066 LIST_HEAD(invalid_list);
2067
2068 for_each_valid_sp(kvm, sp, sp_list) {
2069 if (sp->gfn != gfn) {
2070 collisions++;
2071 continue;
2072 }
2073
2074 if (sp->role.word != role.word) {
2075 /*
2076 * If the guest is creating an upper-level page, zap
2077 * unsync pages for the same gfn. While it's possible
2078 * the guest is using recursive page tables, in all
2079 * likelihood the guest has stopped using the unsync
2080 * page and is installing a completely unrelated page.
2081 * Unsync pages must not be left as is, because the new
2082 * upper-level page will be write-protected.
2083 */
2084 if (role.level > PG_LEVEL_4K && sp->unsync)
2085 kvm_mmu_prepare_zap_page(kvm, sp,
2086 &invalid_list);
2087 continue;
2088 }
2089
2090 /* unsync and write-flooding only apply to indirect SPs. */
2091 if (sp->role.direct)
2092 goto out;
2093
2094 if (sp->unsync) {
2095 if (KVM_BUG_ON(!vcpu, kvm))
2096 break;
2097
2098 /*
2099 * The page is good, but is stale. kvm_sync_page does
2100 * get the latest guest state, but (unlike mmu_unsync_children)
2101 * it doesn't write-protect the page or mark it synchronized!
2102 * This way the validity of the mapping is ensured, but the
2103 * overhead of write protection is not incurred until the
2104 * guest invalidates the TLB mapping. This allows multiple
2105 * SPs for a single gfn to be unsync.
2106 *
2107 * If the sync fails, the page is zapped. If so, break
2108 * in order to rebuild it.
2109 */
2110 ret = kvm_sync_page(vcpu, sp, &invalid_list);
2111 if (ret < 0)
2112 break;
2113
2114 WARN_ON_ONCE(!list_empty(&invalid_list));
2115 if (ret > 0)
2116 kvm_flush_remote_tlbs(kvm);
2117 }
2118
2119 __clear_sp_write_flooding_count(sp);
2120
2121 goto out;
2122 }
2123
2124 sp = NULL;
2125 ++kvm->stat.mmu_cache_miss;
2126
2127 out:
2128 kvm_mmu_commit_zap_page(kvm, &invalid_list);
2129
2130 if (collisions > kvm->stat.max_mmu_page_hash_collisions)
2131 kvm->stat.max_mmu_page_hash_collisions = collisions;
2132 return sp;
2133 }
2134
2135 /* Caches used when allocating a new shadow page. */
2136 struct shadow_page_caches {
2137 struct kvm_mmu_memory_cache *page_header_cache;
2138 struct kvm_mmu_memory_cache *shadow_page_cache;
2139 struct kvm_mmu_memory_cache *shadowed_info_cache;
2140 };
2141
kvm_mmu_alloc_shadow_page(struct kvm * kvm,struct shadow_page_caches * caches,gfn_t gfn,struct hlist_head * sp_list,union kvm_mmu_page_role role)2142 static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
2143 struct shadow_page_caches *caches,
2144 gfn_t gfn,
2145 struct hlist_head *sp_list,
2146 union kvm_mmu_page_role role)
2147 {
2148 struct kvm_mmu_page *sp;
2149
2150 sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
2151 sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache);
2152 if (!role.direct && role.level <= KVM_MAX_HUGEPAGE_LEVEL)
2153 sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache);
2154
2155 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
2156
2157 INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
2158
2159 /*
2160 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
2161 * depends on valid pages being added to the head of the list. See
2162 * comments in kvm_zap_obsolete_pages().
2163 */
2164 sp->mmu_valid_gen = kvm->arch.mmu_valid_gen;
2165 list_add(&sp->link, &kvm->arch.active_mmu_pages);
2166 kvm_account_mmu_page(kvm, sp);
2167
2168 sp->gfn = gfn;
2169 sp->role = role;
2170 hlist_add_head(&sp->hash_link, sp_list);
2171 if (sp_has_gptes(sp))
2172 account_shadowed(kvm, sp);
2173
2174 return sp;
2175 }
2176
2177 /* Note, @vcpu may be NULL if @role.direct is true; see kvm_mmu_find_shadow_page. */
__kvm_mmu_get_shadow_page(struct kvm * kvm,struct kvm_vcpu * vcpu,struct shadow_page_caches * caches,gfn_t gfn,union kvm_mmu_page_role role)2178 static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm,
2179 struct kvm_vcpu *vcpu,
2180 struct shadow_page_caches *caches,
2181 gfn_t gfn,
2182 union kvm_mmu_page_role role)
2183 {
2184 struct hlist_head *sp_list;
2185 struct kvm_mmu_page *sp;
2186 bool created = false;
2187
2188 sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2189
2190 sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role);
2191 if (!sp) {
2192 created = true;
2193 sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role);
2194 }
2195
2196 trace_kvm_mmu_get_page(sp, created);
2197 return sp;
2198 }
2199
kvm_mmu_get_shadow_page(struct kvm_vcpu * vcpu,gfn_t gfn,union kvm_mmu_page_role role)2200 static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
2201 gfn_t gfn,
2202 union kvm_mmu_page_role role)
2203 {
2204 struct shadow_page_caches caches = {
2205 .page_header_cache = &vcpu->arch.mmu_page_header_cache,
2206 .shadow_page_cache = &vcpu->arch.mmu_shadow_page_cache,
2207 .shadowed_info_cache = &vcpu->arch.mmu_shadowed_info_cache,
2208 };
2209
2210 return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role);
2211 }
2212
kvm_mmu_child_role(u64 * sptep,bool direct,unsigned int access)2213 static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct,
2214 unsigned int access)
2215 {
2216 struct kvm_mmu_page *parent_sp = sptep_to_sp(sptep);
2217 union kvm_mmu_page_role role;
2218
2219 role = parent_sp->role;
2220 role.level--;
2221 role.access = access;
2222 role.direct = direct;
2223 role.passthrough = 0;
2224
2225 /*
2226 * If the guest has 4-byte PTEs then that means it's using 32-bit,
2227 * 2-level, non-PAE paging. KVM shadows such guests with PAE paging
2228 * (i.e. 8-byte PTEs). The difference in PTE size means that KVM must
2229 * shadow each guest page table with multiple shadow page tables, which
2230 * requires extra bookkeeping in the role.
2231 *
2232 * Specifically, to shadow the guest's page directory (which covers a
2233 * 4GiB address space), KVM uses 4 PAE page directories, each mapping
2234 * 1GiB of the address space. @role.quadrant encodes which quarter of
2235 * the address space each maps.
2236 *
2237 * To shadow the guest's page tables (which each map a 4MiB region), KVM
2238 * uses 2 PAE page tables, each mapping a 2MiB region. For these,
2239 * @role.quadrant encodes which half of the region they map.
2240 *
2241 * Concretely, a 4-byte PDE consumes bits 31:22, while an 8-byte PDE
2242 * consumes bits 29:21. To consume bits 31:30, KVM's uses 4 shadow
2243 * PDPTEs; those 4 PAE page directories are pre-allocated and their
2244 * quadrant is assigned in mmu_alloc_root(). A 4-byte PTE consumes
2245 * bits 21:12, while an 8-byte PTE consumes bits 20:12. To consume
2246 * bit 21 in the PTE (the child here), KVM propagates that bit to the
2247 * quadrant, i.e. sets quadrant to '0' or '1'. The parent 8-byte PDE
2248 * covers bit 21 (see above), thus the quadrant is calculated from the
2249 * _least_ significant bit of the PDE index.
2250 */
2251 if (role.has_4_byte_gpte) {
2252 WARN_ON_ONCE(role.level != PG_LEVEL_4K);
2253 role.quadrant = spte_index(sptep) & 1;
2254 }
2255
2256 return role;
2257 }
2258
kvm_mmu_get_child_sp(struct kvm_vcpu * vcpu,u64 * sptep,gfn_t gfn,bool direct,unsigned int access)2259 static struct kvm_mmu_page *kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu,
2260 u64 *sptep, gfn_t gfn,
2261 bool direct, unsigned int access)
2262 {
2263 union kvm_mmu_page_role role;
2264
2265 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
2266 return ERR_PTR(-EEXIST);
2267
2268 role = kvm_mmu_child_role(sptep, direct, access);
2269 return kvm_mmu_get_shadow_page(vcpu, gfn, role);
2270 }
2271
shadow_walk_init_using_root(struct kvm_shadow_walk_iterator * iterator,struct kvm_vcpu * vcpu,hpa_t root,u64 addr)2272 static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2273 struct kvm_vcpu *vcpu, hpa_t root,
2274 u64 addr)
2275 {
2276 iterator->addr = addr;
2277 iterator->shadow_addr = root;
2278 iterator->level = vcpu->arch.mmu->root_role.level;
2279
2280 if (iterator->level >= PT64_ROOT_4LEVEL &&
2281 vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
2282 !vcpu->arch.mmu->root_role.direct)
2283 iterator->level = PT32E_ROOT_LEVEL;
2284
2285 if (iterator->level == PT32E_ROOT_LEVEL) {
2286 /*
2287 * prev_root is currently only used for 64-bit hosts. So only
2288 * the active root_hpa is valid here.
2289 */
2290 BUG_ON(root != vcpu->arch.mmu->root.hpa);
2291
2292 iterator->shadow_addr
2293 = vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2294 iterator->shadow_addr &= SPTE_BASE_ADDR_MASK;
2295 --iterator->level;
2296 if (!iterator->shadow_addr)
2297 iterator->level = 0;
2298 }
2299 }
2300
shadow_walk_init(struct kvm_shadow_walk_iterator * iterator,struct kvm_vcpu * vcpu,u64 addr)2301 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2302 struct kvm_vcpu *vcpu, u64 addr)
2303 {
2304 shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root.hpa,
2305 addr);
2306 }
2307
shadow_walk_okay(struct kvm_shadow_walk_iterator * iterator)2308 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2309 {
2310 if (iterator->level < PG_LEVEL_4K)
2311 return false;
2312
2313 iterator->index = SPTE_INDEX(iterator->addr, iterator->level);
2314 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2315 return true;
2316 }
2317
__shadow_walk_next(struct kvm_shadow_walk_iterator * iterator,u64 spte)2318 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2319 u64 spte)
2320 {
2321 if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) {
2322 iterator->level = 0;
2323 return;
2324 }
2325
2326 iterator->shadow_addr = spte & SPTE_BASE_ADDR_MASK;
2327 --iterator->level;
2328 }
2329
shadow_walk_next(struct kvm_shadow_walk_iterator * iterator)2330 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2331 {
2332 __shadow_walk_next(iterator, *iterator->sptep);
2333 }
2334
__link_shadow_page(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,u64 * sptep,struct kvm_mmu_page * sp,bool flush)2335 static void __link_shadow_page(struct kvm *kvm,
2336 struct kvm_mmu_memory_cache *cache, u64 *sptep,
2337 struct kvm_mmu_page *sp, bool flush)
2338 {
2339 u64 spte;
2340
2341 BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2342
2343 /*
2344 * If an SPTE is present already, it must be a leaf and therefore
2345 * a large one. Drop it, and flush the TLB if needed, before
2346 * installing sp.
2347 */
2348 if (is_shadow_present_pte(*sptep))
2349 drop_large_spte(kvm, sptep, flush);
2350
2351 spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2352
2353 mmu_spte_set(sptep, spte);
2354
2355 mmu_page_add_parent_pte(cache, sp, sptep);
2356
2357 /*
2358 * The non-direct sub-pagetable must be updated before linking. For
2359 * L1 sp, the pagetable is updated via kvm_sync_page() in
2360 * kvm_mmu_find_shadow_page() without write-protecting the gfn,
2361 * so sp->unsync can be true or false. For higher level non-direct
2362 * sp, the pagetable is updated/synced via mmu_sync_children() in
2363 * FNAME(fetch)(), so sp->unsync_children can only be false.
2364 * WARN_ON_ONCE() if anything happens unexpectedly.
2365 */
2366 if (WARN_ON_ONCE(sp->unsync_children) || sp->unsync)
2367 mark_unsync(sptep);
2368 }
2369
link_shadow_page(struct kvm_vcpu * vcpu,u64 * sptep,struct kvm_mmu_page * sp)2370 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2371 struct kvm_mmu_page *sp)
2372 {
2373 __link_shadow_page(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, sptep, sp, true);
2374 }
2375
validate_direct_spte(struct kvm_vcpu * vcpu,u64 * sptep,unsigned direct_access)2376 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2377 unsigned direct_access)
2378 {
2379 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2380 struct kvm_mmu_page *child;
2381
2382 /*
2383 * For the direct sp, if the guest pte's dirty bit
2384 * changed form clean to dirty, it will corrupt the
2385 * sp's access: allow writable in the read-only sp,
2386 * so we should update the spte at this point to get
2387 * a new sp with the correct access.
2388 */
2389 child = spte_to_child_sp(*sptep);
2390 if (child->role.access == direct_access)
2391 return;
2392
2393 drop_parent_pte(vcpu->kvm, child, sptep);
2394 kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep);
2395 }
2396 }
2397
2398 /* Returns the number of zapped non-leaf child shadow pages. */
mmu_page_zap_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * spte,struct list_head * invalid_list)2399 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2400 u64 *spte, struct list_head *invalid_list)
2401 {
2402 u64 pte;
2403 struct kvm_mmu_page *child;
2404
2405 pte = *spte;
2406 if (is_shadow_present_pte(pte)) {
2407 if (is_last_spte(pte, sp->role.level)) {
2408 drop_spte(kvm, spte);
2409 } else {
2410 child = spte_to_child_sp(pte);
2411 drop_parent_pte(kvm, child, spte);
2412
2413 /*
2414 * Recursively zap nested TDP SPs, parentless SPs are
2415 * unlikely to be used again in the near future. This
2416 * avoids retaining a large number of stale nested SPs.
2417 */
2418 if (tdp_enabled && invalid_list &&
2419 child->role.guest_mode && !child->parent_ptes.val)
2420 return kvm_mmu_prepare_zap_page(kvm, child,
2421 invalid_list);
2422 }
2423 } else if (is_mmio_spte(kvm, pte)) {
2424 mmu_spte_clear_no_track(spte);
2425 }
2426 return 0;
2427 }
2428
kvm_mmu_page_unlink_children(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list)2429 static int kvm_mmu_page_unlink_children(struct kvm *kvm,
2430 struct kvm_mmu_page *sp,
2431 struct list_head *invalid_list)
2432 {
2433 int zapped = 0;
2434 unsigned i;
2435
2436 for (i = 0; i < SPTE_ENT_PER_PAGE; ++i)
2437 zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
2438
2439 return zapped;
2440 }
2441
kvm_mmu_unlink_parents(struct kvm * kvm,struct kvm_mmu_page * sp)2442 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2443 {
2444 u64 *sptep;
2445 struct rmap_iterator iter;
2446
2447 while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2448 drop_parent_pte(kvm, sp, sptep);
2449 }
2450
mmu_zap_unsync_children(struct kvm * kvm,struct kvm_mmu_page * parent,struct list_head * invalid_list)2451 static int mmu_zap_unsync_children(struct kvm *kvm,
2452 struct kvm_mmu_page *parent,
2453 struct list_head *invalid_list)
2454 {
2455 int i, zapped = 0;
2456 struct mmu_page_path parents;
2457 struct kvm_mmu_pages pages;
2458
2459 if (parent->role.level == PG_LEVEL_4K)
2460 return 0;
2461
2462 while (mmu_unsync_walk(parent, &pages)) {
2463 struct kvm_mmu_page *sp;
2464
2465 for_each_sp(pages, sp, parents, i) {
2466 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2467 mmu_pages_clear_parents(&parents);
2468 zapped++;
2469 }
2470 }
2471
2472 return zapped;
2473 }
2474
__kvm_mmu_prepare_zap_page(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list,int * nr_zapped)2475 static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2476 struct kvm_mmu_page *sp,
2477 struct list_head *invalid_list,
2478 int *nr_zapped)
2479 {
2480 bool list_unstable, zapped_root = false;
2481
2482 lockdep_assert_held_write(&kvm->mmu_lock);
2483 trace_kvm_mmu_prepare_zap_page(sp);
2484 ++kvm->stat.mmu_shadow_zapped;
2485 *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2486 *nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2487 kvm_mmu_unlink_parents(kvm, sp);
2488
2489 /* Zapping children means active_mmu_pages has become unstable. */
2490 list_unstable = *nr_zapped;
2491
2492 if (!sp->role.invalid && sp_has_gptes(sp))
2493 unaccount_shadowed(kvm, sp);
2494
2495 if (sp->unsync)
2496 kvm_unlink_unsync_page(kvm, sp);
2497 if (!sp->root_count) {
2498 /* Count self */
2499 (*nr_zapped)++;
2500
2501 /*
2502 * Already invalid pages (previously active roots) are not on
2503 * the active page list. See list_del() in the "else" case of
2504 * !sp->root_count.
2505 */
2506 if (sp->role.invalid)
2507 list_add(&sp->link, invalid_list);
2508 else
2509 list_move(&sp->link, invalid_list);
2510 kvm_unaccount_mmu_page(kvm, sp);
2511 } else {
2512 /*
2513 * Remove the active root from the active page list, the root
2514 * will be explicitly freed when the root_count hits zero.
2515 */
2516 list_del(&sp->link);
2517
2518 /*
2519 * Obsolete pages cannot be used on any vCPUs, see the comment
2520 * in kvm_mmu_zap_all_fast(). Note, is_obsolete_sp() also
2521 * treats invalid shadow pages as being obsolete.
2522 */
2523 zapped_root = !is_obsolete_sp(kvm, sp);
2524 }
2525
2526 if (sp->nx_huge_page_disallowed)
2527 unaccount_nx_huge_page(kvm, sp);
2528
2529 sp->role.invalid = 1;
2530
2531 /*
2532 * Make the request to free obsolete roots after marking the root
2533 * invalid, otherwise other vCPUs may not see it as invalid.
2534 */
2535 if (zapped_root)
2536 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);
2537 return list_unstable;
2538 }
2539
kvm_mmu_prepare_zap_page(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list)2540 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2541 struct list_head *invalid_list)
2542 {
2543 int nr_zapped;
2544
2545 __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2546 return nr_zapped;
2547 }
2548
kvm_mmu_commit_zap_page(struct kvm * kvm,struct list_head * invalid_list)2549 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2550 struct list_head *invalid_list)
2551 {
2552 struct kvm_mmu_page *sp, *nsp;
2553
2554 if (list_empty(invalid_list))
2555 return;
2556
2557 /*
2558 * We need to make sure everyone sees our modifications to
2559 * the page tables and see changes to vcpu->mode here. The barrier
2560 * in the kvm_flush_remote_tlbs() achieves this. This pairs
2561 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2562 *
2563 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2564 * guest mode and/or lockless shadow page table walks.
2565 */
2566 kvm_flush_remote_tlbs(kvm);
2567
2568 list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2569 WARN_ON_ONCE(!sp->role.invalid || sp->root_count);
2570 kvm_mmu_free_shadow_page(sp);
2571 }
2572 }
2573
kvm_mmu_zap_oldest_mmu_pages(struct kvm * kvm,unsigned long nr_to_zap)2574 static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
2575 unsigned long nr_to_zap)
2576 {
2577 unsigned long total_zapped = 0;
2578 struct kvm_mmu_page *sp, *tmp;
2579 LIST_HEAD(invalid_list);
2580 bool unstable;
2581 int nr_zapped;
2582
2583 if (list_empty(&kvm->arch.active_mmu_pages))
2584 return 0;
2585
2586 restart:
2587 list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2588 /*
2589 * Don't zap active root pages, the page itself can't be freed
2590 * and zapping it will just force vCPUs to realloc and reload.
2591 */
2592 if (sp->root_count)
2593 continue;
2594
2595 unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
2596 &nr_zapped);
2597 total_zapped += nr_zapped;
2598 if (total_zapped >= nr_to_zap)
2599 break;
2600
2601 if (unstable)
2602 goto restart;
2603 }
2604
2605 kvm_mmu_commit_zap_page(kvm, &invalid_list);
2606
2607 kvm->stat.mmu_recycled += total_zapped;
2608 return total_zapped;
2609 }
2610
kvm_mmu_available_pages(struct kvm * kvm)2611 static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2612 {
2613 if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2614 return kvm->arch.n_max_mmu_pages -
2615 kvm->arch.n_used_mmu_pages;
2616
2617 return 0;
2618 }
2619
make_mmu_pages_available(struct kvm_vcpu * vcpu)2620 static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2621 {
2622 unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2623
2624 if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2625 return 0;
2626
2627 kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2628
2629 /*
2630 * Note, this check is intentionally soft, it only guarantees that one
2631 * page is available, while the caller may end up allocating as many as
2632 * four pages, e.g. for PAE roots or for 5-level paging. Temporarily
2633 * exceeding the (arbitrary by default) limit will not harm the host,
2634 * being too aggressive may unnecessarily kill the guest, and getting an
2635 * exact count is far more trouble than it's worth, especially in the
2636 * page fault paths.
2637 */
2638 if (!kvm_mmu_available_pages(vcpu->kvm))
2639 return -ENOSPC;
2640 return 0;
2641 }
2642
2643 /*
2644 * Changing the number of mmu pages allocated to the vm
2645 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2646 */
kvm_mmu_change_mmu_pages(struct kvm * kvm,unsigned long goal_nr_mmu_pages)2647 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2648 {
2649 write_lock(&kvm->mmu_lock);
2650
2651 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2652 kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
2653 goal_nr_mmu_pages);
2654
2655 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2656 }
2657
2658 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2659
2660 write_unlock(&kvm->mmu_lock);
2661 }
2662
__kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,bool always_retry)2663 bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
2664 bool always_retry)
2665 {
2666 struct kvm *kvm = vcpu->kvm;
2667 LIST_HEAD(invalid_list);
2668 struct kvm_mmu_page *sp;
2669 gpa_t gpa = cr2_or_gpa;
2670 bool r = false;
2671
2672 /*
2673 * Bail early if there aren't any write-protected shadow pages to avoid
2674 * unnecessarily taking mmu_lock lock, e.g. if the gfn is write-tracked
2675 * by a third party. Reading indirect_shadow_pages without holding
2676 * mmu_lock is safe, as this is purely an optimization, i.e. a false
2677 * positive is benign, and a false negative will simply result in KVM
2678 * skipping the unprotect+retry path, which is also an optimization.
2679 */
2680 if (!READ_ONCE(kvm->arch.indirect_shadow_pages))
2681 goto out;
2682
2683 if (!vcpu->arch.mmu->root_role.direct) {
2684 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
2685 if (gpa == INVALID_GPA)
2686 goto out;
2687 }
2688
2689 write_lock(&kvm->mmu_lock);
2690 for_each_gfn_valid_sp_with_gptes(kvm, sp, gpa_to_gfn(gpa))
2691 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2692
2693 /*
2694 * Snapshot the result before zapping, as zapping will remove all list
2695 * entries, i.e. checking the list later would yield a false negative.
2696 */
2697 r = !list_empty(&invalid_list);
2698 kvm_mmu_commit_zap_page(kvm, &invalid_list);
2699 write_unlock(&kvm->mmu_lock);
2700
2701 out:
2702 if (r || always_retry) {
2703 vcpu->arch.last_retry_eip = kvm_rip_read(vcpu);
2704 vcpu->arch.last_retry_addr = cr2_or_gpa;
2705 }
2706 return r;
2707 }
2708
kvm_unsync_page(struct kvm * kvm,struct kvm_mmu_page * sp)2709 static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
2710 {
2711 trace_kvm_mmu_unsync_page(sp);
2712 ++kvm->stat.mmu_unsync;
2713 sp->unsync = 1;
2714
2715 kvm_mmu_mark_parents_unsync(sp);
2716 }
2717
2718 /*
2719 * Attempt to unsync any shadow pages that can be reached by the specified gfn,
2720 * KVM is creating a writable mapping for said gfn. Returns 0 if all pages
2721 * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
2722 * be write-protected.
2723 */
mmu_try_to_unsync_pages(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn,bool synchronizing,bool prefetch)2724 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
2725 gfn_t gfn, bool synchronizing, bool prefetch)
2726 {
2727 struct kvm_mmu_page *sp;
2728 bool locked = false;
2729
2730 /*
2731 * Force write-protection if the page is being tracked. Note, the page
2732 * track machinery is used to write-protect upper-level shadow pages,
2733 * i.e. this guards the role.level == 4K assertion below!
2734 */
2735 if (kvm_gfn_is_write_tracked(kvm, slot, gfn))
2736 return -EPERM;
2737
2738 /*
2739 * The page is not write-tracked, mark existing shadow pages unsync
2740 * unless KVM is synchronizing an unsync SP. In that case, KVM must
2741 * complete emulation of the guest TLB flush before allowing shadow
2742 * pages to become unsync (writable by the guest).
2743 */
2744 for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
2745 if (synchronizing)
2746 return -EPERM;
2747
2748 if (sp->unsync)
2749 continue;
2750
2751 if (prefetch)
2752 return -EEXIST;
2753
2754 /*
2755 * TDP MMU page faults require an additional spinlock as they
2756 * run with mmu_lock held for read, not write, and the unsync
2757 * logic is not thread safe. Take the spinklock regardless of
2758 * the MMU type to avoid extra conditionals/parameters, there's
2759 * no meaningful penalty if mmu_lock is held for write.
2760 */
2761 if (!locked) {
2762 locked = true;
2763 spin_lock(&kvm->arch.mmu_unsync_pages_lock);
2764
2765 /*
2766 * Recheck after taking the spinlock, a different vCPU
2767 * may have since marked the page unsync. A false
2768 * negative on the unprotected check above is not
2769 * possible as clearing sp->unsync _must_ hold mmu_lock
2770 * for write, i.e. unsync cannot transition from 1->0
2771 * while this CPU holds mmu_lock for read (or write).
2772 */
2773 if (READ_ONCE(sp->unsync))
2774 continue;
2775 }
2776
2777 WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K);
2778 kvm_unsync_page(kvm, sp);
2779 }
2780 if (locked)
2781 spin_unlock(&kvm->arch.mmu_unsync_pages_lock);
2782
2783 /*
2784 * We need to ensure that the marking of unsync pages is visible
2785 * before the SPTE is updated to allow writes because
2786 * kvm_mmu_sync_roots() checks the unsync flags without holding
2787 * the MMU lock and so can race with this. If the SPTE was updated
2788 * before the page had been marked as unsync-ed, something like the
2789 * following could happen:
2790 *
2791 * CPU 1 CPU 2
2792 * ---------------------------------------------------------------------
2793 * 1.2 Host updates SPTE
2794 * to be writable
2795 * 2.1 Guest writes a GPTE for GVA X.
2796 * (GPTE being in the guest page table shadowed
2797 * by the SP from CPU 1.)
2798 * This reads SPTE during the page table walk.
2799 * Since SPTE.W is read as 1, there is no
2800 * fault.
2801 *
2802 * 2.2 Guest issues TLB flush.
2803 * That causes a VM Exit.
2804 *
2805 * 2.3 Walking of unsync pages sees sp->unsync is
2806 * false and skips the page.
2807 *
2808 * 2.4 Guest accesses GVA X.
2809 * Since the mapping in the SP was not updated,
2810 * so the old mapping for GVA X incorrectly
2811 * gets used.
2812 * 1.1 Host marks SP
2813 * as unsync
2814 * (sp->unsync = true)
2815 *
2816 * The write barrier below ensures that 1.1 happens before 1.2 and thus
2817 * the situation in 2.4 does not arise. It pairs with the read barrier
2818 * in is_unsync_root(), placed between 2.1's load of SPTE.W and 2.3.
2819 */
2820 smp_wmb();
2821
2822 return 0;
2823 }
2824
mmu_set_spte(struct kvm_vcpu * vcpu,struct kvm_memory_slot * slot,u64 * sptep,unsigned int pte_access,gfn_t gfn,kvm_pfn_t pfn,struct kvm_page_fault * fault)2825 static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
2826 u64 *sptep, unsigned int pte_access, gfn_t gfn,
2827 kvm_pfn_t pfn, struct kvm_page_fault *fault)
2828 {
2829 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
2830 int level = sp->role.level;
2831 int was_rmapped = 0;
2832 int ret = RET_PF_FIXED;
2833 bool flush = false;
2834 bool wrprot;
2835 u64 spte;
2836
2837 /* Prefetching always gets a writable pfn. */
2838 bool host_writable = !fault || fault->map_writable;
2839 bool prefetch = !fault || fault->prefetch;
2840 bool write_fault = fault && fault->write;
2841
2842 if (unlikely(is_noslot_pfn(pfn))) {
2843 vcpu->stat.pf_mmio_spte_created++;
2844 mark_mmio_spte(vcpu, sptep, gfn, pte_access);
2845 return RET_PF_EMULATE;
2846 }
2847
2848 if (is_shadow_present_pte(*sptep)) {
2849 if (prefetch)
2850 return RET_PF_SPURIOUS;
2851
2852 /*
2853 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2854 * the parent of the now unreachable PTE.
2855 */
2856 if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
2857 struct kvm_mmu_page *child;
2858 u64 pte = *sptep;
2859
2860 child = spte_to_child_sp(pte);
2861 drop_parent_pte(vcpu->kvm, child, sptep);
2862 flush = true;
2863 } else if (pfn != spte_to_pfn(*sptep)) {
2864 drop_spte(vcpu->kvm, sptep);
2865 flush = true;
2866 } else
2867 was_rmapped = 1;
2868 }
2869
2870 wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
2871 false, host_writable, &spte);
2872
2873 if (*sptep == spte) {
2874 ret = RET_PF_SPURIOUS;
2875 } else {
2876 flush |= mmu_spte_update(sptep, spte);
2877 trace_kvm_mmu_set_spte(level, gfn, sptep);
2878 }
2879
2880 if (wrprot && write_fault)
2881 ret = RET_PF_WRITE_PROTECTED;
2882
2883 if (flush)
2884 kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level);
2885
2886 if (!was_rmapped) {
2887 WARN_ON_ONCE(ret == RET_PF_SPURIOUS);
2888 rmap_add(vcpu, slot, sptep, gfn, pte_access);
2889 } else {
2890 /* Already rmapped but the pte_access bits may have changed. */
2891 kvm_mmu_page_set_access(sp, spte_index(sptep), pte_access);
2892 }
2893
2894 return ret;
2895 }
2896
kvm_mmu_prefetch_sptes(struct kvm_vcpu * vcpu,gfn_t gfn,u64 * sptep,int nr_pages,unsigned int access)2897 static bool kvm_mmu_prefetch_sptes(struct kvm_vcpu *vcpu, gfn_t gfn, u64 *sptep,
2898 int nr_pages, unsigned int access)
2899 {
2900 struct page *pages[PTE_PREFETCH_NUM];
2901 struct kvm_memory_slot *slot;
2902 int i;
2903
2904 if (WARN_ON_ONCE(nr_pages > PTE_PREFETCH_NUM))
2905 return false;
2906
2907 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2908 if (!slot)
2909 return false;
2910
2911 nr_pages = kvm_prefetch_pages(slot, gfn, pages, nr_pages);
2912 if (nr_pages <= 0)
2913 return false;
2914
2915 for (i = 0; i < nr_pages; i++, gfn++, sptep++) {
2916 mmu_set_spte(vcpu, slot, sptep, access, gfn,
2917 page_to_pfn(pages[i]), NULL);
2918
2919 /*
2920 * KVM always prefetches writable pages from the primary MMU,
2921 * and KVM can make its SPTE writable in the fast page handler,
2922 * without notifying the primary MMU. Mark pages/folios dirty
2923 * now to ensure file data is written back if it ends up being
2924 * written by the guest. Because KVM's prefetching GUPs
2925 * writable PTEs, the probability of unnecessary writeback is
2926 * extremely low.
2927 */
2928 kvm_release_page_dirty(pages[i]);
2929 }
2930
2931 return true;
2932 }
2933
direct_pte_prefetch_many(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * start,u64 * end)2934 static bool direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2935 struct kvm_mmu_page *sp,
2936 u64 *start, u64 *end)
2937 {
2938 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(start));
2939 unsigned int access = sp->role.access;
2940
2941 return kvm_mmu_prefetch_sptes(vcpu, gfn, start, end - start, access);
2942 }
2943
__direct_pte_prefetch(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * sptep)2944 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2945 struct kvm_mmu_page *sp, u64 *sptep)
2946 {
2947 u64 *spte, *start = NULL;
2948 int i;
2949
2950 WARN_ON_ONCE(!sp->role.direct);
2951
2952 i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1);
2953 spte = sp->spt + i;
2954
2955 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2956 if (is_shadow_present_pte(*spte) || spte == sptep) {
2957 if (!start)
2958 continue;
2959 if (!direct_pte_prefetch_many(vcpu, sp, start, spte))
2960 return;
2961
2962 start = NULL;
2963 } else if (!start)
2964 start = spte;
2965 }
2966 if (start)
2967 direct_pte_prefetch_many(vcpu, sp, start, spte);
2968 }
2969
direct_pte_prefetch(struct kvm_vcpu * vcpu,u64 * sptep)2970 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2971 {
2972 struct kvm_mmu_page *sp;
2973
2974 sp = sptep_to_sp(sptep);
2975
2976 /*
2977 * Without accessed bits, there's no way to distinguish between
2978 * actually accessed translations and prefetched, so disable pte
2979 * prefetch if accessed bits aren't available.
2980 */
2981 if (sp_ad_disabled(sp))
2982 return;
2983
2984 if (sp->role.level > PG_LEVEL_4K)
2985 return;
2986
2987 /*
2988 * If addresses are being invalidated, skip prefetching to avoid
2989 * accidentally prefetching those addresses.
2990 */
2991 if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
2992 return;
2993
2994 __direct_pte_prefetch(vcpu, sp, sptep);
2995 }
2996
2997 /*
2998 * Lookup the mapping level for @gfn in the current mm.
2999 *
3000 * WARNING! Use of host_pfn_mapping_level() requires the caller and the end
3001 * consumer to be tied into KVM's handlers for MMU notifier events!
3002 *
3003 * There are several ways to safely use this helper:
3004 *
3005 * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
3006 * consuming it. In this case, mmu_lock doesn't need to be held during the
3007 * lookup, but it does need to be held while checking the MMU notifier.
3008 *
3009 * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation
3010 * event for the hva. This can be done by explicit checking the MMU notifier
3011 * or by ensuring that KVM already has a valid mapping that covers the hva.
3012 *
3013 * - Do not use the result to install new mappings, e.g. use the host mapping
3014 * level only to decide whether or not to zap an entry. In this case, it's
3015 * not required to hold mmu_lock (though it's highly likely the caller will
3016 * want to hold mmu_lock anyways, e.g. to modify SPTEs).
3017 *
3018 * Note! The lookup can still race with modifications to host page tables, but
3019 * the above "rules" ensure KVM will not _consume_ the result of the walk if a
3020 * race with the primary MMU occurs.
3021 */
host_pfn_mapping_level(struct kvm * kvm,gfn_t gfn,const struct kvm_memory_slot * slot)3022 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
3023 const struct kvm_memory_slot *slot)
3024 {
3025 int level = PG_LEVEL_4K;
3026 unsigned long hva;
3027 unsigned long flags;
3028 pgd_t pgd;
3029 p4d_t p4d;
3030 pud_t pud;
3031 pmd_t pmd;
3032
3033 /*
3034 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
3035 * is not solely for performance, it's also necessary to avoid the
3036 * "writable" check in __gfn_to_hva_many(), which will always fail on
3037 * read-only memslots due to gfn_to_hva() assuming writes. Earlier
3038 * page fault steps have already verified the guest isn't writing a
3039 * read-only memslot.
3040 */
3041 hva = __gfn_to_hva_memslot(slot, gfn);
3042
3043 /*
3044 * Disable IRQs to prevent concurrent tear down of host page tables,
3045 * e.g. if the primary MMU promotes a P*D to a huge page and then frees
3046 * the original page table.
3047 */
3048 local_irq_save(flags);
3049
3050 /*
3051 * Read each entry once. As above, a non-leaf entry can be promoted to
3052 * a huge page _during_ this walk. Re-reading the entry could send the
3053 * walk into the weeks, e.g. p*d_leaf() returns false (sees the old
3054 * value) and then p*d_offset() walks into the target huge page instead
3055 * of the old page table (sees the new value).
3056 */
3057 pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
3058 if (pgd_none(pgd))
3059 goto out;
3060
3061 p4d = READ_ONCE(*p4d_offset(&pgd, hva));
3062 if (p4d_none(p4d) || !p4d_present(p4d))
3063 goto out;
3064
3065 pud = READ_ONCE(*pud_offset(&p4d, hva));
3066 if (pud_none(pud) || !pud_present(pud))
3067 goto out;
3068
3069 if (pud_leaf(pud)) {
3070 level = PG_LEVEL_1G;
3071 goto out;
3072 }
3073
3074 pmd = READ_ONCE(*pmd_offset(&pud, hva));
3075 if (pmd_none(pmd) || !pmd_present(pmd))
3076 goto out;
3077
3078 if (pmd_leaf(pmd))
3079 level = PG_LEVEL_2M;
3080
3081 out:
3082 local_irq_restore(flags);
3083 return level;
3084 }
3085
__kvm_mmu_max_mapping_level(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn,int max_level,bool is_private)3086 static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
3087 const struct kvm_memory_slot *slot,
3088 gfn_t gfn, int max_level, bool is_private)
3089 {
3090 struct kvm_lpage_info *linfo;
3091 int host_level;
3092
3093 max_level = min(max_level, max_huge_page_level);
3094 for ( ; max_level > PG_LEVEL_4K; max_level--) {
3095 linfo = lpage_info_slot(gfn, slot, max_level);
3096 if (!linfo->disallow_lpage)
3097 break;
3098 }
3099
3100 if (is_private)
3101 return max_level;
3102
3103 if (max_level == PG_LEVEL_4K)
3104 return PG_LEVEL_4K;
3105
3106 host_level = host_pfn_mapping_level(kvm, gfn, slot);
3107 return min(host_level, max_level);
3108 }
3109
kvm_mmu_max_mapping_level(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn)3110 int kvm_mmu_max_mapping_level(struct kvm *kvm,
3111 const struct kvm_memory_slot *slot, gfn_t gfn)
3112 {
3113 bool is_private = kvm_slot_can_be_private(slot) &&
3114 kvm_mem_is_private(kvm, gfn);
3115
3116 return __kvm_mmu_max_mapping_level(kvm, slot, gfn, PG_LEVEL_NUM, is_private);
3117 }
3118
kvm_mmu_hugepage_adjust(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3119 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3120 {
3121 struct kvm_memory_slot *slot = fault->slot;
3122 kvm_pfn_t mask;
3123
3124 fault->huge_page_disallowed = fault->exec && fault->nx_huge_page_workaround_enabled;
3125
3126 if (unlikely(fault->max_level == PG_LEVEL_4K))
3127 return;
3128
3129 if (is_error_noslot_pfn(fault->pfn))
3130 return;
3131
3132 if (kvm_slot_dirty_track_enabled(slot))
3133 return;
3134
3135 /*
3136 * Enforce the iTLB multihit workaround after capturing the requested
3137 * level, which will be used to do precise, accurate accounting.
3138 */
3139 fault->req_level = __kvm_mmu_max_mapping_level(vcpu->kvm, slot,
3140 fault->gfn, fault->max_level,
3141 fault->is_private);
3142 if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
3143 return;
3144
3145 /*
3146 * mmu_invalidate_retry() was successful and mmu_lock is held, so
3147 * the pmd can't be split from under us.
3148 */
3149 fault->goal_level = fault->req_level;
3150 mask = KVM_PAGES_PER_HPAGE(fault->goal_level) - 1;
3151 VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask));
3152 fault->pfn &= ~mask;
3153 }
3154
disallowed_hugepage_adjust(struct kvm_page_fault * fault,u64 spte,int cur_level)3155 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
3156 {
3157 if (cur_level > PG_LEVEL_4K &&
3158 cur_level == fault->goal_level &&
3159 is_shadow_present_pte(spte) &&
3160 !is_large_pte(spte) &&
3161 spte_to_child_sp(spte)->nx_huge_page_disallowed) {
3162 /*
3163 * A small SPTE exists for this pfn, but FNAME(fetch),
3164 * direct_map(), or kvm_tdp_mmu_map() would like to create a
3165 * large PTE instead: just force them to go down another level,
3166 * patching back for them into pfn the next 9 bits of the
3167 * address.
3168 */
3169 u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) -
3170 KVM_PAGES_PER_HPAGE(cur_level - 1);
3171 fault->pfn |= fault->gfn & page_mask;
3172 fault->goal_level--;
3173 }
3174 }
3175
direct_map(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3176 static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3177 {
3178 struct kvm_shadow_walk_iterator it;
3179 struct kvm_mmu_page *sp;
3180 int ret;
3181 gfn_t base_gfn = fault->gfn;
3182
3183 kvm_mmu_hugepage_adjust(vcpu, fault);
3184
3185 trace_kvm_mmu_spte_requested(fault);
3186 for_each_shadow_entry(vcpu, fault->addr, it) {
3187 /*
3188 * We cannot overwrite existing page tables with an NX
3189 * large page, as the leaf could be executable.
3190 */
3191 if (fault->nx_huge_page_workaround_enabled)
3192 disallowed_hugepage_adjust(fault, *it.sptep, it.level);
3193
3194 base_gfn = gfn_round_for_level(fault->gfn, it.level);
3195 if (it.level == fault->goal_level)
3196 break;
3197
3198 sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, true, ACC_ALL);
3199 if (sp == ERR_PTR(-EEXIST))
3200 continue;
3201
3202 link_shadow_page(vcpu, it.sptep, sp);
3203 if (fault->huge_page_disallowed)
3204 account_nx_huge_page(vcpu->kvm, sp,
3205 fault->req_level >= it.level);
3206 }
3207
3208 if (WARN_ON_ONCE(it.level != fault->goal_level))
3209 return -EFAULT;
3210
3211 ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL,
3212 base_gfn, fault->pfn, fault);
3213 if (ret == RET_PF_SPURIOUS)
3214 return ret;
3215
3216 direct_pte_prefetch(vcpu, it.sptep);
3217 return ret;
3218 }
3219
kvm_send_hwpoison_signal(struct kvm_memory_slot * slot,gfn_t gfn)3220 static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn)
3221 {
3222 unsigned long hva = gfn_to_hva_memslot(slot, gfn);
3223
3224 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, PAGE_SHIFT, current);
3225 }
3226
kvm_handle_error_pfn(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3227 static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3228 {
3229 if (is_sigpending_pfn(fault->pfn)) {
3230 kvm_handle_signal_exit(vcpu);
3231 return -EINTR;
3232 }
3233
3234 /*
3235 * Do not cache the mmio info caused by writing the readonly gfn
3236 * into the spte otherwise read access on readonly gfn also can
3237 * caused mmio page fault and treat it as mmio access.
3238 */
3239 if (fault->pfn == KVM_PFN_ERR_RO_FAULT)
3240 return RET_PF_EMULATE;
3241
3242 if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
3243 kvm_send_hwpoison_signal(fault->slot, fault->gfn);
3244 return RET_PF_RETRY;
3245 }
3246
3247 return -EFAULT;
3248 }
3249
kvm_handle_noslot_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,unsigned int access)3250 static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu,
3251 struct kvm_page_fault *fault,
3252 unsigned int access)
3253 {
3254 gva_t gva = fault->is_tdp ? 0 : fault->addr;
3255
3256 if (fault->is_private) {
3257 kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
3258 return -EFAULT;
3259 }
3260
3261 vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
3262 access & shadow_mmio_access_mask);
3263
3264 fault->slot = NULL;
3265 fault->pfn = KVM_PFN_NOSLOT;
3266 fault->map_writable = false;
3267
3268 /*
3269 * If MMIO caching is disabled, emulate immediately without
3270 * touching the shadow page tables as attempting to install an
3271 * MMIO SPTE will just be an expensive nop.
3272 */
3273 if (unlikely(!enable_mmio_caching))
3274 return RET_PF_EMULATE;
3275
3276 /*
3277 * Do not create an MMIO SPTE for a gfn greater than host.MAXPHYADDR,
3278 * any guest that generates such gfns is running nested and is being
3279 * tricked by L0 userspace (you can observe gfn > L1.MAXPHYADDR if and
3280 * only if L1's MAXPHYADDR is inaccurate with respect to the
3281 * hardware's).
3282 */
3283 if (unlikely(fault->gfn > kvm_mmu_max_gfn()))
3284 return RET_PF_EMULATE;
3285
3286 return RET_PF_CONTINUE;
3287 }
3288
page_fault_can_be_fast(struct kvm * kvm,struct kvm_page_fault * fault)3289 static bool page_fault_can_be_fast(struct kvm *kvm, struct kvm_page_fault *fault)
3290 {
3291 /*
3292 * Page faults with reserved bits set, i.e. faults on MMIO SPTEs, only
3293 * reach the common page fault handler if the SPTE has an invalid MMIO
3294 * generation number. Refreshing the MMIO generation needs to go down
3295 * the slow path. Note, EPT Misconfigs do NOT set the PRESENT flag!
3296 */
3297 if (fault->rsvd)
3298 return false;
3299
3300 /*
3301 * For hardware-protected VMs, certain conditions like attempting to
3302 * perform a write to a page which is not in the state that the guest
3303 * expects it to be in can result in a nested/extended #PF. In this
3304 * case, the below code might misconstrue this situation as being the
3305 * result of a write-protected access, and treat it as a spurious case
3306 * rather than taking any action to satisfy the real source of the #PF
3307 * such as generating a KVM_EXIT_MEMORY_FAULT. This can lead to the
3308 * guest spinning on a #PF indefinitely, so don't attempt the fast path
3309 * in this case.
3310 *
3311 * Note that the kvm_mem_is_private() check might race with an
3312 * attribute update, but this will either result in the guest spinning
3313 * on RET_PF_SPURIOUS until the update completes, or an actual spurious
3314 * case might go down the slow path. Either case will resolve itself.
3315 */
3316 if (kvm->arch.has_private_mem &&
3317 fault->is_private != kvm_mem_is_private(kvm, fault->gfn))
3318 return false;
3319
3320 /*
3321 * #PF can be fast if:
3322 *
3323 * 1. The shadow page table entry is not present and A/D bits are
3324 * disabled _by KVM_, which could mean that the fault is potentially
3325 * caused by access tracking (if enabled). If A/D bits are enabled
3326 * by KVM, but disabled by L1 for L2, KVM is forced to disable A/D
3327 * bits for L2 and employ access tracking, but the fast page fault
3328 * mechanism only supports direct MMUs.
3329 * 2. The shadow page table entry is present, the access is a write,
3330 * and no reserved bits are set (MMIO SPTEs cannot be "fixed"), i.e.
3331 * the fault was caused by a write-protection violation. If the
3332 * SPTE is MMU-writable (determined later), the fault can be fixed
3333 * by setting the Writable bit, which can be done out of mmu_lock.
3334 */
3335 if (!fault->present)
3336 return !kvm_ad_enabled;
3337
3338 /*
3339 * Note, instruction fetches and writes are mutually exclusive, ignore
3340 * the "exec" flag.
3341 */
3342 return fault->write;
3343 }
3344
3345 /*
3346 * Returns true if the SPTE was fixed successfully. Otherwise,
3347 * someone else modified the SPTE from its original value.
3348 */
fast_pf_fix_direct_spte(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,u64 * sptep,u64 old_spte,u64 new_spte)3349 static bool fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu,
3350 struct kvm_page_fault *fault,
3351 u64 *sptep, u64 old_spte, u64 new_spte)
3352 {
3353 /*
3354 * Theoretically we could also set dirty bit (and flush TLB) here in
3355 * order to eliminate unnecessary PML logging. See comments in
3356 * set_spte. But fast_page_fault is very unlikely to happen with PML
3357 * enabled, so we do not do this. This might result in the same GPA
3358 * to be logged in PML buffer again when the write really happens, and
3359 * eventually to be called by mark_page_dirty twice. But it's also no
3360 * harm. This also avoids the TLB flush needed after setting dirty bit
3361 * so non-PML cases won't be impacted.
3362 *
3363 * Compare with make_spte() where instead shadow_dirty_mask is set.
3364 */
3365 if (!try_cmpxchg64(sptep, &old_spte, new_spte))
3366 return false;
3367
3368 if (is_writable_pte(new_spte) && !is_writable_pte(old_spte))
3369 mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn);
3370
3371 return true;
3372 }
3373
3374 /*
3375 * Returns the last level spte pointer of the shadow page walk for the given
3376 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
3377 * walk could be performed, returns NULL and *spte does not contain valid data.
3378 *
3379 * Contract:
3380 * - Must be called between walk_shadow_page_lockless_{begin,end}.
3381 * - The returned sptep must not be used after walk_shadow_page_lockless_end.
3382 */
fast_pf_get_last_sptep(struct kvm_vcpu * vcpu,gpa_t gpa,u64 * spte)3383 static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
3384 {
3385 struct kvm_shadow_walk_iterator iterator;
3386 u64 old_spte;
3387 u64 *sptep = NULL;
3388
3389 for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
3390 sptep = iterator.sptep;
3391 *spte = old_spte;
3392 }
3393
3394 return sptep;
3395 }
3396
3397 /*
3398 * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3399 */
fast_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3400 static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3401 {
3402 struct kvm_mmu_page *sp;
3403 int ret = RET_PF_INVALID;
3404 u64 spte;
3405 u64 *sptep;
3406 uint retry_count = 0;
3407
3408 if (!page_fault_can_be_fast(vcpu->kvm, fault))
3409 return ret;
3410
3411 walk_shadow_page_lockless_begin(vcpu);
3412
3413 do {
3414 u64 new_spte;
3415
3416 if (tdp_mmu_enabled)
3417 sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->gfn, &spte);
3418 else
3419 sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3420
3421 /*
3422 * It's entirely possible for the mapping to have been zapped
3423 * by a different task, but the root page should always be
3424 * available as the vCPU holds a reference to its root(s).
3425 */
3426 if (WARN_ON_ONCE(!sptep))
3427 spte = FROZEN_SPTE;
3428
3429 if (!is_shadow_present_pte(spte))
3430 break;
3431
3432 sp = sptep_to_sp(sptep);
3433 if (!is_last_spte(spte, sp->role.level))
3434 break;
3435
3436 /*
3437 * Check whether the memory access that caused the fault would
3438 * still cause it if it were to be performed right now. If not,
3439 * then this is a spurious fault caused by TLB lazily flushed,
3440 * or some other CPU has already fixed the PTE after the
3441 * current CPU took the fault.
3442 *
3443 * Need not check the access of upper level table entries since
3444 * they are always ACC_ALL.
3445 */
3446 if (is_access_allowed(fault, spte)) {
3447 ret = RET_PF_SPURIOUS;
3448 break;
3449 }
3450
3451 new_spte = spte;
3452
3453 /*
3454 * KVM only supports fixing page faults outside of MMU lock for
3455 * direct MMUs, nested MMUs are always indirect, and KVM always
3456 * uses A/D bits for non-nested MMUs. Thus, if A/D bits are
3457 * enabled, the SPTE can't be an access-tracked SPTE.
3458 */
3459 if (unlikely(!kvm_ad_enabled) && is_access_track_spte(spte))
3460 new_spte = restore_acc_track_spte(new_spte) |
3461 shadow_accessed_mask;
3462
3463 /*
3464 * To keep things simple, only SPTEs that are MMU-writable can
3465 * be made fully writable outside of mmu_lock, e.g. only SPTEs
3466 * that were write-protected for dirty-logging or access
3467 * tracking are handled here. Don't bother checking if the
3468 * SPTE is writable to prioritize running with A/D bits enabled.
3469 * The is_access_allowed() check above handles the common case
3470 * of the fault being spurious, and the SPTE is known to be
3471 * shadow-present, i.e. except for access tracking restoration
3472 * making the new SPTE writable, the check is wasteful.
3473 */
3474 if (fault->write && is_mmu_writable_spte(spte)) {
3475 new_spte |= PT_WRITABLE_MASK;
3476
3477 /*
3478 * Do not fix write-permission on the large spte when
3479 * dirty logging is enabled. Since we only dirty the
3480 * first page into the dirty-bitmap in
3481 * fast_pf_fix_direct_spte(), other pages are missed
3482 * if its slot has dirty logging enabled.
3483 *
3484 * Instead, we let the slow page fault path create a
3485 * normal spte to fix the access.
3486 */
3487 if (sp->role.level > PG_LEVEL_4K &&
3488 kvm_slot_dirty_track_enabled(fault->slot))
3489 break;
3490 }
3491
3492 /* Verify that the fault can be handled in the fast path */
3493 if (new_spte == spte ||
3494 !is_access_allowed(fault, new_spte))
3495 break;
3496
3497 /*
3498 * Currently, fast page fault only works for direct mapping
3499 * since the gfn is not stable for indirect shadow page. See
3500 * Documentation/virt/kvm/locking.rst to get more detail.
3501 */
3502 if (fast_pf_fix_direct_spte(vcpu, fault, sptep, spte, new_spte)) {
3503 ret = RET_PF_FIXED;
3504 break;
3505 }
3506
3507 if (++retry_count > 4) {
3508 pr_warn_once("Fast #PF retrying more than 4 times.\n");
3509 break;
3510 }
3511
3512 } while (true);
3513
3514 trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
3515 walk_shadow_page_lockless_end(vcpu);
3516
3517 if (ret != RET_PF_INVALID)
3518 vcpu->stat.pf_fast++;
3519
3520 return ret;
3521 }
3522
mmu_free_root_page(struct kvm * kvm,hpa_t * root_hpa,struct list_head * invalid_list)3523 static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3524 struct list_head *invalid_list)
3525 {
3526 struct kvm_mmu_page *sp;
3527
3528 if (!VALID_PAGE(*root_hpa))
3529 return;
3530
3531 sp = root_to_sp(*root_hpa);
3532 if (WARN_ON_ONCE(!sp))
3533 return;
3534
3535 if (is_tdp_mmu_page(sp)) {
3536 lockdep_assert_held_read(&kvm->mmu_lock);
3537 kvm_tdp_mmu_put_root(kvm, sp);
3538 } else {
3539 lockdep_assert_held_write(&kvm->mmu_lock);
3540 if (!--sp->root_count && sp->role.invalid)
3541 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3542 }
3543
3544 *root_hpa = INVALID_PAGE;
3545 }
3546
3547 /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
kvm_mmu_free_roots(struct kvm * kvm,struct kvm_mmu * mmu,ulong roots_to_free)3548 void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
3549 ulong roots_to_free)
3550 {
3551 bool is_tdp_mmu = tdp_mmu_enabled && mmu->root_role.direct;
3552 int i;
3553 LIST_HEAD(invalid_list);
3554 bool free_active_root;
3555
3556 WARN_ON_ONCE(roots_to_free & ~KVM_MMU_ROOTS_ALL);
3557
3558 BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3559
3560 /* Before acquiring the MMU lock, see if we need to do any real work. */
3561 free_active_root = (roots_to_free & KVM_MMU_ROOT_CURRENT)
3562 && VALID_PAGE(mmu->root.hpa);
3563
3564 if (!free_active_root) {
3565 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3566 if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3567 VALID_PAGE(mmu->prev_roots[i].hpa))
3568 break;
3569
3570 if (i == KVM_MMU_NUM_PREV_ROOTS)
3571 return;
3572 }
3573
3574 if (is_tdp_mmu)
3575 read_lock(&kvm->mmu_lock);
3576 else
3577 write_lock(&kvm->mmu_lock);
3578
3579 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3580 if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
3581 mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3582 &invalid_list);
3583
3584 if (free_active_root) {
3585 if (kvm_mmu_is_dummy_root(mmu->root.hpa)) {
3586 /* Nothing to cleanup for dummy roots. */
3587 } else if (root_to_sp(mmu->root.hpa)) {
3588 mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list);
3589 } else if (mmu->pae_root) {
3590 for (i = 0; i < 4; ++i) {
3591 if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
3592 continue;
3593
3594 mmu_free_root_page(kvm, &mmu->pae_root[i],
3595 &invalid_list);
3596 mmu->pae_root[i] = INVALID_PAE_ROOT;
3597 }
3598 }
3599 mmu->root.hpa = INVALID_PAGE;
3600 mmu->root.pgd = 0;
3601 }
3602
3603 if (is_tdp_mmu) {
3604 read_unlock(&kvm->mmu_lock);
3605 WARN_ON_ONCE(!list_empty(&invalid_list));
3606 } else {
3607 kvm_mmu_commit_zap_page(kvm, &invalid_list);
3608 write_unlock(&kvm->mmu_lock);
3609 }
3610 }
3611 EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3612
kvm_mmu_free_guest_mode_roots(struct kvm * kvm,struct kvm_mmu * mmu)3613 void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
3614 {
3615 unsigned long roots_to_free = 0;
3616 struct kvm_mmu_page *sp;
3617 hpa_t root_hpa;
3618 int i;
3619
3620 /*
3621 * This should not be called while L2 is active, L2 can't invalidate
3622 * _only_ its own roots, e.g. INVVPID unconditionally exits.
3623 */
3624 WARN_ON_ONCE(mmu->root_role.guest_mode);
3625
3626 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3627 root_hpa = mmu->prev_roots[i].hpa;
3628 if (!VALID_PAGE(root_hpa))
3629 continue;
3630
3631 sp = root_to_sp(root_hpa);
3632 if (!sp || sp->role.guest_mode)
3633 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
3634 }
3635
3636 kvm_mmu_free_roots(kvm, mmu, roots_to_free);
3637 }
3638 EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
3639
mmu_alloc_root(struct kvm_vcpu * vcpu,gfn_t gfn,int quadrant,u8 level)3640 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
3641 u8 level)
3642 {
3643 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
3644 struct kvm_mmu_page *sp;
3645
3646 role.level = level;
3647 role.quadrant = quadrant;
3648
3649 WARN_ON_ONCE(quadrant && !role.has_4_byte_gpte);
3650 WARN_ON_ONCE(role.direct && role.has_4_byte_gpte);
3651
3652 sp = kvm_mmu_get_shadow_page(vcpu, gfn, role);
3653 ++sp->root_count;
3654
3655 return __pa(sp->spt);
3656 }
3657
mmu_alloc_direct_roots(struct kvm_vcpu * vcpu)3658 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3659 {
3660 struct kvm_mmu *mmu = vcpu->arch.mmu;
3661 u8 shadow_root_level = mmu->root_role.level;
3662 hpa_t root;
3663 unsigned i;
3664 int r;
3665
3666 if (tdp_mmu_enabled) {
3667 if (kvm_has_mirrored_tdp(vcpu->kvm) &&
3668 !VALID_PAGE(mmu->mirror_root_hpa))
3669 kvm_tdp_mmu_alloc_root(vcpu, true);
3670 kvm_tdp_mmu_alloc_root(vcpu, false);
3671 return 0;
3672 }
3673
3674 write_lock(&vcpu->kvm->mmu_lock);
3675 r = make_mmu_pages_available(vcpu);
3676 if (r < 0)
3677 goto out_unlock;
3678
3679 if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3680 root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level);
3681 mmu->root.hpa = root;
3682 } else if (shadow_root_level == PT32E_ROOT_LEVEL) {
3683 if (WARN_ON_ONCE(!mmu->pae_root)) {
3684 r = -EIO;
3685 goto out_unlock;
3686 }
3687
3688 for (i = 0; i < 4; ++i) {
3689 WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3690
3691 root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), 0,
3692 PT32_ROOT_LEVEL);
3693 mmu->pae_root[i] = root | PT_PRESENT_MASK |
3694 shadow_me_value;
3695 }
3696 mmu->root.hpa = __pa(mmu->pae_root);
3697 } else {
3698 WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
3699 r = -EIO;
3700 goto out_unlock;
3701 }
3702
3703 /* root.pgd is ignored for direct MMUs. */
3704 mmu->root.pgd = 0;
3705 out_unlock:
3706 write_unlock(&vcpu->kvm->mmu_lock);
3707 return r;
3708 }
3709
mmu_first_shadow_root_alloc(struct kvm * kvm)3710 static int mmu_first_shadow_root_alloc(struct kvm *kvm)
3711 {
3712 struct kvm_memslots *slots;
3713 struct kvm_memory_slot *slot;
3714 int r = 0, i, bkt;
3715
3716 /*
3717 * Check if this is the first shadow root being allocated before
3718 * taking the lock.
3719 */
3720 if (kvm_shadow_root_allocated(kvm))
3721 return 0;
3722
3723 mutex_lock(&kvm->slots_arch_lock);
3724
3725 /* Recheck, under the lock, whether this is the first shadow root. */
3726 if (kvm_shadow_root_allocated(kvm))
3727 goto out_unlock;
3728
3729 /*
3730 * Check if anything actually needs to be allocated, e.g. all metadata
3731 * will be allocated upfront if TDP is disabled.
3732 */
3733 if (kvm_memslots_have_rmaps(kvm) &&
3734 kvm_page_track_write_tracking_enabled(kvm))
3735 goto out_success;
3736
3737 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
3738 slots = __kvm_memslots(kvm, i);
3739 kvm_for_each_memslot(slot, bkt, slots) {
3740 /*
3741 * Both of these functions are no-ops if the target is
3742 * already allocated, so unconditionally calling both
3743 * is safe. Intentionally do NOT free allocations on
3744 * failure to avoid having to track which allocations
3745 * were made now versus when the memslot was created.
3746 * The metadata is guaranteed to be freed when the slot
3747 * is freed, and will be kept/used if userspace retries
3748 * KVM_RUN instead of killing the VM.
3749 */
3750 r = memslot_rmap_alloc(slot, slot->npages);
3751 if (r)
3752 goto out_unlock;
3753 r = kvm_page_track_write_tracking_alloc(slot);
3754 if (r)
3755 goto out_unlock;
3756 }
3757 }
3758
3759 /*
3760 * Ensure that shadow_root_allocated becomes true strictly after
3761 * all the related pointers are set.
3762 */
3763 out_success:
3764 smp_store_release(&kvm->arch.shadow_root_allocated, true);
3765
3766 out_unlock:
3767 mutex_unlock(&kvm->slots_arch_lock);
3768 return r;
3769 }
3770
mmu_alloc_shadow_roots(struct kvm_vcpu * vcpu)3771 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3772 {
3773 struct kvm_mmu *mmu = vcpu->arch.mmu;
3774 u64 pdptrs[4], pm_mask;
3775 gfn_t root_gfn, root_pgd;
3776 int quadrant, i, r;
3777 hpa_t root;
3778
3779 root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu);
3780 root_gfn = (root_pgd & __PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
3781
3782 if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3783 mmu->root.hpa = kvm_mmu_get_dummy_root();
3784 return 0;
3785 }
3786
3787 /*
3788 * On SVM, reading PDPTRs might access guest memory, which might fault
3789 * and thus might sleep. Grab the PDPTRs before acquiring mmu_lock.
3790 */
3791 if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
3792 for (i = 0; i < 4; ++i) {
3793 pdptrs[i] = mmu->get_pdptr(vcpu, i);
3794 if (!(pdptrs[i] & PT_PRESENT_MASK))
3795 continue;
3796
3797 if (!kvm_vcpu_is_visible_gfn(vcpu, pdptrs[i] >> PAGE_SHIFT))
3798 pdptrs[i] = 0;
3799 }
3800 }
3801
3802 r = mmu_first_shadow_root_alloc(vcpu->kvm);
3803 if (r)
3804 return r;
3805
3806 write_lock(&vcpu->kvm->mmu_lock);
3807 r = make_mmu_pages_available(vcpu);
3808 if (r < 0)
3809 goto out_unlock;
3810
3811 /*
3812 * Do we shadow a long mode page table? If so we need to
3813 * write-protect the guests page table root.
3814 */
3815 if (mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
3816 root = mmu_alloc_root(vcpu, root_gfn, 0,
3817 mmu->root_role.level);
3818 mmu->root.hpa = root;
3819 goto set_root_pgd;
3820 }
3821
3822 if (WARN_ON_ONCE(!mmu->pae_root)) {
3823 r = -EIO;
3824 goto out_unlock;
3825 }
3826
3827 /*
3828 * We shadow a 32 bit page table. This may be a legacy 2-level
3829 * or a PAE 3-level page table. In either case we need to be aware that
3830 * the shadow page table may be a PAE or a long mode page table.
3831 */
3832 pm_mask = PT_PRESENT_MASK | shadow_me_value;
3833 if (mmu->root_role.level >= PT64_ROOT_4LEVEL) {
3834 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3835
3836 if (WARN_ON_ONCE(!mmu->pml4_root)) {
3837 r = -EIO;
3838 goto out_unlock;
3839 }
3840 mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
3841
3842 if (mmu->root_role.level == PT64_ROOT_5LEVEL) {
3843 if (WARN_ON_ONCE(!mmu->pml5_root)) {
3844 r = -EIO;
3845 goto out_unlock;
3846 }
3847 mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;
3848 }
3849 }
3850
3851 for (i = 0; i < 4; ++i) {
3852 WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3853
3854 if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
3855 if (!(pdptrs[i] & PT_PRESENT_MASK)) {
3856 mmu->pae_root[i] = INVALID_PAE_ROOT;
3857 continue;
3858 }
3859 root_gfn = pdptrs[i] >> PAGE_SHIFT;
3860 }
3861
3862 /*
3863 * If shadowing 32-bit non-PAE page tables, each PAE page
3864 * directory maps one quarter of the guest's non-PAE page
3865 * directory. Othwerise each PAE page direct shadows one guest
3866 * PAE page directory so that quadrant should be 0.
3867 */
3868 quadrant = (mmu->cpu_role.base.level == PT32_ROOT_LEVEL) ? i : 0;
3869
3870 root = mmu_alloc_root(vcpu, root_gfn, quadrant, PT32_ROOT_LEVEL);
3871 mmu->pae_root[i] = root | pm_mask;
3872 }
3873
3874 if (mmu->root_role.level == PT64_ROOT_5LEVEL)
3875 mmu->root.hpa = __pa(mmu->pml5_root);
3876 else if (mmu->root_role.level == PT64_ROOT_4LEVEL)
3877 mmu->root.hpa = __pa(mmu->pml4_root);
3878 else
3879 mmu->root.hpa = __pa(mmu->pae_root);
3880
3881 set_root_pgd:
3882 mmu->root.pgd = root_pgd;
3883 out_unlock:
3884 write_unlock(&vcpu->kvm->mmu_lock);
3885
3886 return r;
3887 }
3888
mmu_alloc_special_roots(struct kvm_vcpu * vcpu)3889 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
3890 {
3891 struct kvm_mmu *mmu = vcpu->arch.mmu;
3892 bool need_pml5 = mmu->root_role.level > PT64_ROOT_4LEVEL;
3893 u64 *pml5_root = NULL;
3894 u64 *pml4_root = NULL;
3895 u64 *pae_root;
3896
3897 /*
3898 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
3899 * tables are allocated and initialized at root creation as there is no
3900 * equivalent level in the guest's NPT to shadow. Allocate the tables
3901 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
3902 */
3903 if (mmu->root_role.direct ||
3904 mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL ||
3905 mmu->root_role.level < PT64_ROOT_4LEVEL)
3906 return 0;
3907
3908 /*
3909 * NPT, the only paging mode that uses this horror, uses a fixed number
3910 * of levels for the shadow page tables, e.g. all MMUs are 4-level or
3911 * all MMus are 5-level. Thus, this can safely require that pml5_root
3912 * is allocated if the other roots are valid and pml5 is needed, as any
3913 * prior MMU would also have required pml5.
3914 */
3915 if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root))
3916 return 0;
3917
3918 /*
3919 * The special roots should always be allocated in concert. Yell and
3920 * bail if KVM ends up in a state where only one of the roots is valid.
3921 */
3922 if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
3923 (need_pml5 && mmu->pml5_root)))
3924 return -EIO;
3925
3926 /*
3927 * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
3928 * doesn't need to be decrypted.
3929 */
3930 pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3931 if (!pae_root)
3932 return -ENOMEM;
3933
3934 #ifdef CONFIG_X86_64
3935 pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3936 if (!pml4_root)
3937 goto err_pml4;
3938
3939 if (need_pml5) {
3940 pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3941 if (!pml5_root)
3942 goto err_pml5;
3943 }
3944 #endif
3945
3946 mmu->pae_root = pae_root;
3947 mmu->pml4_root = pml4_root;
3948 mmu->pml5_root = pml5_root;
3949
3950 return 0;
3951
3952 #ifdef CONFIG_X86_64
3953 err_pml5:
3954 free_page((unsigned long)pml4_root);
3955 err_pml4:
3956 free_page((unsigned long)pae_root);
3957 return -ENOMEM;
3958 #endif
3959 }
3960
is_unsync_root(hpa_t root)3961 static bool is_unsync_root(hpa_t root)
3962 {
3963 struct kvm_mmu_page *sp;
3964
3965 if (!VALID_PAGE(root) || kvm_mmu_is_dummy_root(root))
3966 return false;
3967
3968 /*
3969 * The read barrier orders the CPU's read of SPTE.W during the page table
3970 * walk before the reads of sp->unsync/sp->unsync_children here.
3971 *
3972 * Even if another CPU was marking the SP as unsync-ed simultaneously,
3973 * any guest page table changes are not guaranteed to be visible anyway
3974 * until this VCPU issues a TLB flush strictly after those changes are
3975 * made. We only need to ensure that the other CPU sets these flags
3976 * before any actual changes to the page tables are made. The comments
3977 * in mmu_try_to_unsync_pages() describe what could go wrong if this
3978 * requirement isn't satisfied.
3979 */
3980 smp_rmb();
3981 sp = root_to_sp(root);
3982
3983 /*
3984 * PAE roots (somewhat arbitrarily) aren't backed by shadow pages, the
3985 * PDPTEs for a given PAE root need to be synchronized individually.
3986 */
3987 if (WARN_ON_ONCE(!sp))
3988 return false;
3989
3990 if (sp->unsync || sp->unsync_children)
3991 return true;
3992
3993 return false;
3994 }
3995
kvm_mmu_sync_roots(struct kvm_vcpu * vcpu)3996 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3997 {
3998 int i;
3999 struct kvm_mmu_page *sp;
4000
4001 if (vcpu->arch.mmu->root_role.direct)
4002 return;
4003
4004 if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
4005 return;
4006
4007 vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4008
4009 if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
4010 hpa_t root = vcpu->arch.mmu->root.hpa;
4011
4012 if (!is_unsync_root(root))
4013 return;
4014
4015 sp = root_to_sp(root);
4016
4017 write_lock(&vcpu->kvm->mmu_lock);
4018 mmu_sync_children(vcpu, sp, true);
4019 write_unlock(&vcpu->kvm->mmu_lock);
4020 return;
4021 }
4022
4023 write_lock(&vcpu->kvm->mmu_lock);
4024
4025 for (i = 0; i < 4; ++i) {
4026 hpa_t root = vcpu->arch.mmu->pae_root[i];
4027
4028 if (IS_VALID_PAE_ROOT(root)) {
4029 sp = spte_to_child_sp(root);
4030 mmu_sync_children(vcpu, sp, true);
4031 }
4032 }
4033
4034 write_unlock(&vcpu->kvm->mmu_lock);
4035 }
4036
kvm_mmu_sync_prev_roots(struct kvm_vcpu * vcpu)4037 void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
4038 {
4039 unsigned long roots_to_free = 0;
4040 int i;
4041
4042 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
4043 if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa))
4044 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
4045
4046 /* sync prev_roots by simply freeing them */
4047 kvm_mmu_free_roots(vcpu->kvm, vcpu->arch.mmu, roots_to_free);
4048 }
4049
nonpaging_gva_to_gpa(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,gpa_t vaddr,u64 access,struct x86_exception * exception)4050 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
4051 gpa_t vaddr, u64 access,
4052 struct x86_exception *exception)
4053 {
4054 if (exception)
4055 exception->error_code = 0;
4056 return kvm_translate_gpa(vcpu, mmu, vaddr, access, exception);
4057 }
4058
mmio_info_in_cache(struct kvm_vcpu * vcpu,u64 addr,bool direct)4059 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
4060 {
4061 /*
4062 * A nested guest cannot use the MMIO cache if it is using nested
4063 * page tables, because cr2 is a nGPA while the cache stores GPAs.
4064 */
4065 if (mmu_is_nested(vcpu))
4066 return false;
4067
4068 if (direct)
4069 return vcpu_match_mmio_gpa(vcpu, addr);
4070
4071 return vcpu_match_mmio_gva(vcpu, addr);
4072 }
4073
4074 /*
4075 * Return the level of the lowest level SPTE added to sptes.
4076 * That SPTE may be non-present.
4077 *
4078 * Must be called between walk_shadow_page_lockless_{begin,end}.
4079 */
get_walk(struct kvm_vcpu * vcpu,u64 addr,u64 * sptes,int * root_level)4080 static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
4081 {
4082 struct kvm_shadow_walk_iterator iterator;
4083 int leaf = -1;
4084 u64 spte;
4085
4086 for (shadow_walk_init(&iterator, vcpu, addr),
4087 *root_level = iterator.level;
4088 shadow_walk_okay(&iterator);
4089 __shadow_walk_next(&iterator, spte)) {
4090 leaf = iterator.level;
4091 spte = mmu_spte_get_lockless(iterator.sptep);
4092
4093 sptes[leaf] = spte;
4094 }
4095
4096 return leaf;
4097 }
4098
get_sptes_lockless(struct kvm_vcpu * vcpu,u64 addr,u64 * sptes,int * root_level)4099 static int get_sptes_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
4100 int *root_level)
4101 {
4102 int leaf;
4103
4104 walk_shadow_page_lockless_begin(vcpu);
4105
4106 if (is_tdp_mmu_active(vcpu))
4107 leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root_level);
4108 else
4109 leaf = get_walk(vcpu, addr, sptes, root_level);
4110
4111 walk_shadow_page_lockless_end(vcpu);
4112 return leaf;
4113 }
4114
4115 /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
get_mmio_spte(struct kvm_vcpu * vcpu,u64 addr,u64 * sptep)4116 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
4117 {
4118 u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
4119 struct rsvd_bits_validate *rsvd_check;
4120 int root, leaf, level;
4121 bool reserved = false;
4122
4123 leaf = get_sptes_lockless(vcpu, addr, sptes, &root);
4124 if (unlikely(leaf < 0)) {
4125 *sptep = 0ull;
4126 return reserved;
4127 }
4128
4129 *sptep = sptes[leaf];
4130
4131 /*
4132 * Skip reserved bits checks on the terminal leaf if it's not a valid
4133 * SPTE. Note, this also (intentionally) skips MMIO SPTEs, which, by
4134 * design, always have reserved bits set. The purpose of the checks is
4135 * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
4136 */
4137 if (!is_shadow_present_pte(sptes[leaf]))
4138 leaf++;
4139
4140 rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
4141
4142 for (level = root; level >= leaf; level--)
4143 reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
4144
4145 if (reserved) {
4146 pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
4147 __func__, addr);
4148 for (level = root; level >= leaf; level--)
4149 pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
4150 sptes[level], level,
4151 get_rsvd_bits(rsvd_check, sptes[level], level));
4152 }
4153
4154 return reserved;
4155 }
4156
handle_mmio_page_fault(struct kvm_vcpu * vcpu,u64 addr,bool direct)4157 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
4158 {
4159 u64 spte;
4160 bool reserved;
4161
4162 if (mmio_info_in_cache(vcpu, addr, direct))
4163 return RET_PF_EMULATE;
4164
4165 reserved = get_mmio_spte(vcpu, addr, &spte);
4166 if (WARN_ON_ONCE(reserved))
4167 return -EINVAL;
4168
4169 if (is_mmio_spte(vcpu->kvm, spte)) {
4170 gfn_t gfn = get_mmio_spte_gfn(spte);
4171 unsigned int access = get_mmio_spte_access(spte);
4172
4173 if (!check_mmio_spte(vcpu, spte))
4174 return RET_PF_INVALID;
4175
4176 if (direct)
4177 addr = 0;
4178
4179 trace_handle_mmio_page_fault(addr, gfn, access);
4180 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
4181 return RET_PF_EMULATE;
4182 }
4183
4184 /*
4185 * If the page table is zapped by other cpus, let CPU fault again on
4186 * the address.
4187 */
4188 return RET_PF_RETRY;
4189 }
4190
page_fault_handle_page_track(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4191 static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
4192 struct kvm_page_fault *fault)
4193 {
4194 if (unlikely(fault->rsvd))
4195 return false;
4196
4197 if (!fault->present || !fault->write)
4198 return false;
4199
4200 /*
4201 * guest is writing the page which is write tracked which can
4202 * not be fixed by page fault handler.
4203 */
4204 if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn))
4205 return true;
4206
4207 return false;
4208 }
4209
shadow_page_table_clear_flood(struct kvm_vcpu * vcpu,gva_t addr)4210 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
4211 {
4212 struct kvm_shadow_walk_iterator iterator;
4213 u64 spte;
4214
4215 walk_shadow_page_lockless_begin(vcpu);
4216 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
4217 clear_sp_write_flooding_count(iterator.sptep);
4218 walk_shadow_page_lockless_end(vcpu);
4219 }
4220
alloc_apf_token(struct kvm_vcpu * vcpu)4221 static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
4222 {
4223 /* make sure the token value is not 0 */
4224 u32 id = vcpu->arch.apf.id;
4225
4226 if (id << 12 == 0)
4227 vcpu->arch.apf.id = 1;
4228
4229 return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
4230 }
4231
kvm_arch_setup_async_pf(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4232 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu,
4233 struct kvm_page_fault *fault)
4234 {
4235 struct kvm_arch_async_pf arch;
4236
4237 arch.token = alloc_apf_token(vcpu);
4238 arch.gfn = fault->gfn;
4239 arch.error_code = fault->error_code;
4240 arch.direct_map = vcpu->arch.mmu->root_role.direct;
4241 arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu);
4242
4243 return kvm_setup_async_pf(vcpu, fault->addr,
4244 kvm_vcpu_gfn_to_hva(vcpu, fault->gfn), &arch);
4245 }
4246
kvm_arch_async_page_ready(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)4247 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
4248 {
4249 int r;
4250
4251 if (WARN_ON_ONCE(work->arch.error_code & PFERR_PRIVATE_ACCESS))
4252 return;
4253
4254 if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
4255 work->wakeup_all)
4256 return;
4257
4258 r = kvm_mmu_reload(vcpu);
4259 if (unlikely(r))
4260 return;
4261
4262 if (!vcpu->arch.mmu->root_role.direct &&
4263 work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
4264 return;
4265
4266 r = kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code,
4267 true, NULL, NULL);
4268
4269 /*
4270 * Account fixed page faults, otherwise they'll never be counted, but
4271 * ignore stats for all other return times. Page-ready "faults" aren't
4272 * truly spurious and never trigger emulation
4273 */
4274 if (r == RET_PF_FIXED)
4275 vcpu->stat.pf_fixed++;
4276 }
4277
kvm_max_level_for_order(int order)4278 static inline u8 kvm_max_level_for_order(int order)
4279 {
4280 BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
4281
4282 KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) &&
4283 order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) &&
4284 order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K));
4285
4286 if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
4287 return PG_LEVEL_1G;
4288
4289 if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
4290 return PG_LEVEL_2M;
4291
4292 return PG_LEVEL_4K;
4293 }
4294
kvm_max_private_mapping_level(struct kvm * kvm,kvm_pfn_t pfn,u8 max_level,int gmem_order)4295 static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
4296 u8 max_level, int gmem_order)
4297 {
4298 u8 req_max_level;
4299
4300 if (max_level == PG_LEVEL_4K)
4301 return PG_LEVEL_4K;
4302
4303 max_level = min(kvm_max_level_for_order(gmem_order), max_level);
4304 if (max_level == PG_LEVEL_4K)
4305 return PG_LEVEL_4K;
4306
4307 req_max_level = kvm_x86_call(private_max_mapping_level)(kvm, pfn);
4308 if (req_max_level)
4309 max_level = min(max_level, req_max_level);
4310
4311 return max_level;
4312 }
4313
kvm_mmu_finish_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,int r)4314 static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu,
4315 struct kvm_page_fault *fault, int r)
4316 {
4317 kvm_release_faultin_page(vcpu->kvm, fault->refcounted_page,
4318 r == RET_PF_RETRY, fault->map_writable);
4319 }
4320
kvm_mmu_faultin_pfn_private(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4321 static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu,
4322 struct kvm_page_fault *fault)
4323 {
4324 int max_order, r;
4325
4326 if (!kvm_slot_can_be_private(fault->slot)) {
4327 kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4328 return -EFAULT;
4329 }
4330
4331 r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn,
4332 &fault->refcounted_page, &max_order);
4333 if (r) {
4334 kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4335 return r;
4336 }
4337
4338 fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY);
4339 fault->max_level = kvm_max_private_mapping_level(vcpu->kvm, fault->pfn,
4340 fault->max_level, max_order);
4341
4342 return RET_PF_CONTINUE;
4343 }
4344
__kvm_mmu_faultin_pfn(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4345 static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
4346 struct kvm_page_fault *fault)
4347 {
4348 unsigned int foll = fault->write ? FOLL_WRITE : 0;
4349
4350 if (fault->is_private)
4351 return kvm_mmu_faultin_pfn_private(vcpu, fault);
4352
4353 foll |= FOLL_NOWAIT;
4354 fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll,
4355 &fault->map_writable, &fault->refcounted_page);
4356
4357 /*
4358 * If resolving the page failed because I/O is needed to fault-in the
4359 * page, then either set up an asynchronous #PF to do the I/O, or if
4360 * doing an async #PF isn't possible, retry with I/O allowed. All
4361 * other failures are terminal, i.e. retrying won't help.
4362 */
4363 if (fault->pfn != KVM_PFN_ERR_NEEDS_IO)
4364 return RET_PF_CONTINUE;
4365
4366 if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
4367 trace_kvm_try_async_get_page(fault->addr, fault->gfn);
4368 if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
4369 trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn);
4370 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
4371 return RET_PF_RETRY;
4372 } else if (kvm_arch_setup_async_pf(vcpu, fault)) {
4373 return RET_PF_RETRY;
4374 }
4375 }
4376
4377 /*
4378 * Allow gup to bail on pending non-fatal signals when it's also allowed
4379 * to wait for IO. Note, gup always bails if it is unable to quickly
4380 * get a page and a fatal signal, i.e. SIGKILL, is pending.
4381 */
4382 foll |= FOLL_INTERRUPTIBLE;
4383 foll &= ~FOLL_NOWAIT;
4384 fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll,
4385 &fault->map_writable, &fault->refcounted_page);
4386
4387 return RET_PF_CONTINUE;
4388 }
4389
kvm_mmu_faultin_pfn(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,unsigned int access)4390 static int kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
4391 struct kvm_page_fault *fault, unsigned int access)
4392 {
4393 struct kvm_memory_slot *slot = fault->slot;
4394 struct kvm *kvm = vcpu->kvm;
4395 int ret;
4396
4397 if (KVM_BUG_ON(kvm_is_gfn_alias(kvm, fault->gfn), kvm))
4398 return -EFAULT;
4399
4400 /*
4401 * Note that the mmu_invalidate_seq also serves to detect a concurrent
4402 * change in attributes. is_page_fault_stale() will detect an
4403 * invalidation relate to fault->fn and resume the guest without
4404 * installing a mapping in the page tables.
4405 */
4406 fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
4407 smp_rmb();
4408
4409 /*
4410 * Now that we have a snapshot of mmu_invalidate_seq we can check for a
4411 * private vs. shared mismatch.
4412 */
4413 if (fault->is_private != kvm_mem_is_private(kvm, fault->gfn)) {
4414 kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4415 return -EFAULT;
4416 }
4417
4418 if (unlikely(!slot))
4419 return kvm_handle_noslot_fault(vcpu, fault, access);
4420
4421 /*
4422 * Retry the page fault if the gfn hit a memslot that is being deleted
4423 * or moved. This ensures any existing SPTEs for the old memslot will
4424 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
4425 */
4426 if (slot->flags & KVM_MEMSLOT_INVALID)
4427 return RET_PF_RETRY;
4428
4429 if (slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) {
4430 /*
4431 * Don't map L1's APIC access page into L2, KVM doesn't support
4432 * using APICv/AVIC to accelerate L2 accesses to L1's APIC,
4433 * i.e. the access needs to be emulated. Emulating access to
4434 * L1's APIC is also correct if L1 is accelerating L2's own
4435 * virtual APIC, but for some reason L1 also maps _L1's_ APIC
4436 * into L2. Note, vcpu_is_mmio_gpa() always treats access to
4437 * the APIC as MMIO. Allow an MMIO SPTE to be created, as KVM
4438 * uses different roots for L1 vs. L2, i.e. there is no danger
4439 * of breaking APICv/AVIC for L1.
4440 */
4441 if (is_guest_mode(vcpu))
4442 return kvm_handle_noslot_fault(vcpu, fault, access);
4443
4444 /*
4445 * If the APIC access page exists but is disabled, go directly
4446 * to emulation without caching the MMIO access or creating a
4447 * MMIO SPTE. That way the cache doesn't need to be purged
4448 * when the AVIC is re-enabled.
4449 */
4450 if (!kvm_apicv_activated(vcpu->kvm))
4451 return RET_PF_EMULATE;
4452 }
4453
4454 /*
4455 * Check for a relevant mmu_notifier invalidation event before getting
4456 * the pfn from the primary MMU, and before acquiring mmu_lock.
4457 *
4458 * For mmu_lock, if there is an in-progress invalidation and the kernel
4459 * allows preemption, the invalidation task may drop mmu_lock and yield
4460 * in response to mmu_lock being contended, which is *very* counter-
4461 * productive as this vCPU can't actually make forward progress until
4462 * the invalidation completes.
4463 *
4464 * Retrying now can also avoid unnessary lock contention in the primary
4465 * MMU, as the primary MMU doesn't necessarily hold a single lock for
4466 * the duration of the invalidation, i.e. faulting in a conflicting pfn
4467 * can cause the invalidation to take longer by holding locks that are
4468 * needed to complete the invalidation.
4469 *
4470 * Do the pre-check even for non-preemtible kernels, i.e. even if KVM
4471 * will never yield mmu_lock in response to contention, as this vCPU is
4472 * *guaranteed* to need to retry, i.e. waiting until mmu_lock is held
4473 * to detect retry guarantees the worst case latency for the vCPU.
4474 */
4475 if (mmu_invalidate_retry_gfn_unsafe(kvm, fault->mmu_seq, fault->gfn))
4476 return RET_PF_RETRY;
4477
4478 ret = __kvm_mmu_faultin_pfn(vcpu, fault);
4479 if (ret != RET_PF_CONTINUE)
4480 return ret;
4481
4482 if (unlikely(is_error_pfn(fault->pfn)))
4483 return kvm_handle_error_pfn(vcpu, fault);
4484
4485 if (WARN_ON_ONCE(!fault->slot || is_noslot_pfn(fault->pfn)))
4486 return kvm_handle_noslot_fault(vcpu, fault, access);
4487
4488 /*
4489 * Check again for a relevant mmu_notifier invalidation event purely to
4490 * avoid contending mmu_lock. Most invalidations will be detected by
4491 * the previous check, but checking is extremely cheap relative to the
4492 * overall cost of failing to detect the invalidation until after
4493 * mmu_lock is acquired.
4494 */
4495 if (mmu_invalidate_retry_gfn_unsafe(kvm, fault->mmu_seq, fault->gfn)) {
4496 kvm_mmu_finish_page_fault(vcpu, fault, RET_PF_RETRY);
4497 return RET_PF_RETRY;
4498 }
4499
4500 return RET_PF_CONTINUE;
4501 }
4502
4503 /*
4504 * Returns true if the page fault is stale and needs to be retried, i.e. if the
4505 * root was invalidated by a memslot update or a relevant mmu_notifier fired.
4506 */
is_page_fault_stale(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4507 static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
4508 struct kvm_page_fault *fault)
4509 {
4510 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
4511
4512 /* Special roots, e.g. pae_root, are not backed by shadow pages. */
4513 if (sp && is_obsolete_sp(vcpu->kvm, sp))
4514 return true;
4515
4516 /*
4517 * Roots without an associated shadow page are considered invalid if
4518 * there is a pending request to free obsolete roots. The request is
4519 * only a hint that the current root _may_ be obsolete and needs to be
4520 * reloaded, e.g. if the guest frees a PGD that KVM is tracking as a
4521 * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs
4522 * to reload even if no vCPU is actively using the root.
4523 */
4524 if (!sp && kvm_test_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
4525 return true;
4526
4527 /*
4528 * Check for a relevant mmu_notifier invalidation event one last time
4529 * now that mmu_lock is held, as the "unsafe" checks performed without
4530 * holding mmu_lock can get false negatives.
4531 */
4532 return fault->slot &&
4533 mmu_invalidate_retry_gfn(vcpu->kvm, fault->mmu_seq, fault->gfn);
4534 }
4535
direct_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4536 static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4537 {
4538 int r;
4539
4540 /* Dummy roots are used only for shadowing bad guest roots. */
4541 if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa)))
4542 return RET_PF_RETRY;
4543
4544 if (page_fault_handle_page_track(vcpu, fault))
4545 return RET_PF_WRITE_PROTECTED;
4546
4547 r = fast_page_fault(vcpu, fault);
4548 if (r != RET_PF_INVALID)
4549 return r;
4550
4551 r = mmu_topup_memory_caches(vcpu, false);
4552 if (r)
4553 return r;
4554
4555 r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL);
4556 if (r != RET_PF_CONTINUE)
4557 return r;
4558
4559 r = RET_PF_RETRY;
4560 write_lock(&vcpu->kvm->mmu_lock);
4561
4562 if (is_page_fault_stale(vcpu, fault))
4563 goto out_unlock;
4564
4565 r = make_mmu_pages_available(vcpu);
4566 if (r)
4567 goto out_unlock;
4568
4569 r = direct_map(vcpu, fault);
4570
4571 out_unlock:
4572 kvm_mmu_finish_page_fault(vcpu, fault, r);
4573 write_unlock(&vcpu->kvm->mmu_lock);
4574 return r;
4575 }
4576
nonpaging_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4577 static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
4578 struct kvm_page_fault *fault)
4579 {
4580 /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
4581 fault->max_level = PG_LEVEL_2M;
4582 return direct_page_fault(vcpu, fault);
4583 }
4584
kvm_handle_page_fault(struct kvm_vcpu * vcpu,u64 error_code,u64 fault_address,char * insn,int insn_len)4585 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
4586 u64 fault_address, char *insn, int insn_len)
4587 {
4588 int r = 1;
4589 u32 flags = vcpu->arch.apf.host_apf_flags;
4590
4591 #ifndef CONFIG_X86_64
4592 /* A 64-bit CR2 should be impossible on 32-bit KVM. */
4593 if (WARN_ON_ONCE(fault_address >> 32))
4594 return -EFAULT;
4595 #endif
4596 /*
4597 * Legacy #PF exception only have a 32-bit error code. Simply drop the
4598 * upper bits as KVM doesn't use them for #PF (because they are never
4599 * set), and to ensure there are no collisions with KVM-defined bits.
4600 */
4601 if (WARN_ON_ONCE(error_code >> 32))
4602 error_code = lower_32_bits(error_code);
4603
4604 /*
4605 * Restrict KVM-defined flags to bits 63:32 so that it's impossible for
4606 * them to conflict with #PF error codes, which are limited to 32 bits.
4607 */
4608 BUILD_BUG_ON(lower_32_bits(PFERR_SYNTHETIC_MASK));
4609
4610 vcpu->arch.l1tf_flush_l1d = true;
4611 if (!flags) {
4612 trace_kvm_page_fault(vcpu, fault_address, error_code);
4613
4614 r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
4615 insn_len);
4616 } else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
4617 vcpu->arch.apf.host_apf_flags = 0;
4618 local_irq_disable();
4619 kvm_async_pf_task_wait_schedule(fault_address);
4620 local_irq_enable();
4621 } else {
4622 WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
4623 }
4624
4625 return r;
4626 }
4627 EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
4628
4629 #ifdef CONFIG_X86_64
kvm_tdp_mmu_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4630 static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
4631 struct kvm_page_fault *fault)
4632 {
4633 int r;
4634
4635 if (page_fault_handle_page_track(vcpu, fault))
4636 return RET_PF_WRITE_PROTECTED;
4637
4638 r = fast_page_fault(vcpu, fault);
4639 if (r != RET_PF_INVALID)
4640 return r;
4641
4642 r = mmu_topup_memory_caches(vcpu, false);
4643 if (r)
4644 return r;
4645
4646 r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL);
4647 if (r != RET_PF_CONTINUE)
4648 return r;
4649
4650 r = RET_PF_RETRY;
4651 read_lock(&vcpu->kvm->mmu_lock);
4652
4653 if (is_page_fault_stale(vcpu, fault))
4654 goto out_unlock;
4655
4656 r = kvm_tdp_mmu_map(vcpu, fault);
4657
4658 out_unlock:
4659 kvm_mmu_finish_page_fault(vcpu, fault, r);
4660 read_unlock(&vcpu->kvm->mmu_lock);
4661 return r;
4662 }
4663 #endif
4664
kvm_mmu_may_ignore_guest_pat(void)4665 bool kvm_mmu_may_ignore_guest_pat(void)
4666 {
4667 /*
4668 * When EPT is enabled (shadow_memtype_mask is non-zero), and the VM
4669 * has non-coherent DMA (DMA doesn't snoop CPU caches), KVM's ABI is to
4670 * honor the memtype from the guest's PAT so that guest accesses to
4671 * memory that is DMA'd aren't cached against the guest's wishes. As a
4672 * result, KVM _may_ ignore guest PAT, whereas without non-coherent DMA,
4673 * KVM _always_ ignores guest PAT (when EPT is enabled).
4674 */
4675 return shadow_memtype_mask;
4676 }
4677
kvm_tdp_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4678 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4679 {
4680 #ifdef CONFIG_X86_64
4681 if (tdp_mmu_enabled)
4682 return kvm_tdp_mmu_page_fault(vcpu, fault);
4683 #endif
4684
4685 return direct_page_fault(vcpu, fault);
4686 }
4687
kvm_tdp_map_page(struct kvm_vcpu * vcpu,gpa_t gpa,u64 error_code,u8 * level)4688 static int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code,
4689 u8 *level)
4690 {
4691 int r;
4692
4693 /*
4694 * Restrict to TDP page fault, since that's the only case where the MMU
4695 * is indexed by GPA.
4696 */
4697 if (vcpu->arch.mmu->page_fault != kvm_tdp_page_fault)
4698 return -EOPNOTSUPP;
4699
4700 do {
4701 if (signal_pending(current))
4702 return -EINTR;
4703 cond_resched();
4704 r = kvm_mmu_do_page_fault(vcpu, gpa, error_code, true, NULL, level);
4705 } while (r == RET_PF_RETRY);
4706
4707 if (r < 0)
4708 return r;
4709
4710 switch (r) {
4711 case RET_PF_FIXED:
4712 case RET_PF_SPURIOUS:
4713 case RET_PF_WRITE_PROTECTED:
4714 return 0;
4715
4716 case RET_PF_EMULATE:
4717 return -ENOENT;
4718
4719 case RET_PF_RETRY:
4720 case RET_PF_CONTINUE:
4721 case RET_PF_INVALID:
4722 default:
4723 WARN_ONCE(1, "could not fix page fault during prefault");
4724 return -EIO;
4725 }
4726 }
4727
kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu * vcpu,struct kvm_pre_fault_memory * range)4728 long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
4729 struct kvm_pre_fault_memory *range)
4730 {
4731 u64 error_code = PFERR_GUEST_FINAL_MASK;
4732 u8 level = PG_LEVEL_4K;
4733 u64 end;
4734 int r;
4735
4736 if (!vcpu->kvm->arch.pre_fault_allowed)
4737 return -EOPNOTSUPP;
4738
4739 /*
4740 * reload is efficient when called repeatedly, so we can do it on
4741 * every iteration.
4742 */
4743 r = kvm_mmu_reload(vcpu);
4744 if (r)
4745 return r;
4746
4747 if (kvm_arch_has_private_mem(vcpu->kvm) &&
4748 kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(range->gpa)))
4749 error_code |= PFERR_PRIVATE_ACCESS;
4750
4751 /*
4752 * Shadow paging uses GVA for kvm page fault, so restrict to
4753 * two-dimensional paging.
4754 */
4755 r = kvm_tdp_map_page(vcpu, range->gpa, error_code, &level);
4756 if (r < 0)
4757 return r;
4758
4759 /*
4760 * If the mapping that covers range->gpa can use a huge page, it
4761 * may start below it or end after range->gpa + range->size.
4762 */
4763 end = (range->gpa & KVM_HPAGE_MASK(level)) + KVM_HPAGE_SIZE(level);
4764 return min(range->size, end - range->gpa);
4765 }
4766
nonpaging_init_context(struct kvm_mmu * context)4767 static void nonpaging_init_context(struct kvm_mmu *context)
4768 {
4769 context->page_fault = nonpaging_page_fault;
4770 context->gva_to_gpa = nonpaging_gva_to_gpa;
4771 context->sync_spte = NULL;
4772 }
4773
is_root_usable(struct kvm_mmu_root_info * root,gpa_t pgd,union kvm_mmu_page_role role)4774 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
4775 union kvm_mmu_page_role role)
4776 {
4777 struct kvm_mmu_page *sp;
4778
4779 if (!VALID_PAGE(root->hpa))
4780 return false;
4781
4782 if (!role.direct && pgd != root->pgd)
4783 return false;
4784
4785 sp = root_to_sp(root->hpa);
4786 if (WARN_ON_ONCE(!sp))
4787 return false;
4788
4789 return role.word == sp->role.word;
4790 }
4791
4792 /*
4793 * Find out if a previously cached root matching the new pgd/role is available,
4794 * and insert the current root as the MRU in the cache.
4795 * If a matching root is found, it is assigned to kvm_mmu->root and
4796 * true is returned.
4797 * If no match is found, kvm_mmu->root is left invalid, the LRU root is
4798 * evicted to make room for the current root, and false is returned.
4799 */
cached_root_find_and_keep_current(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role)4800 static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu,
4801 gpa_t new_pgd,
4802 union kvm_mmu_page_role new_role)
4803 {
4804 uint i;
4805
4806 if (is_root_usable(&mmu->root, new_pgd, new_role))
4807 return true;
4808
4809 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
4810 /*
4811 * The swaps end up rotating the cache like this:
4812 * C 0 1 2 3 (on entry to the function)
4813 * 0 C 1 2 3
4814 * 1 C 0 2 3
4815 * 2 C 0 1 3
4816 * 3 C 0 1 2 (on exit from the loop)
4817 */
4818 swap(mmu->root, mmu->prev_roots[i]);
4819 if (is_root_usable(&mmu->root, new_pgd, new_role))
4820 return true;
4821 }
4822
4823 kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
4824 return false;
4825 }
4826
4827 /*
4828 * Find out if a previously cached root matching the new pgd/role is available.
4829 * On entry, mmu->root is invalid.
4830 * If a matching root is found, it is assigned to kvm_mmu->root, the LRU entry
4831 * of the cache becomes invalid, and true is returned.
4832 * If no match is found, kvm_mmu->root is left invalid and false is returned.
4833 */
cached_root_find_without_current(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role)4834 static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu,
4835 gpa_t new_pgd,
4836 union kvm_mmu_page_role new_role)
4837 {
4838 uint i;
4839
4840 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
4841 if (is_root_usable(&mmu->prev_roots[i], new_pgd, new_role))
4842 goto hit;
4843
4844 return false;
4845
4846 hit:
4847 swap(mmu->root, mmu->prev_roots[i]);
4848 /* Bubble up the remaining roots. */
4849 for (; i < KVM_MMU_NUM_PREV_ROOTS - 1; i++)
4850 mmu->prev_roots[i] = mmu->prev_roots[i + 1];
4851 mmu->prev_roots[i].hpa = INVALID_PAGE;
4852 return true;
4853 }
4854
fast_pgd_switch(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role)4855 static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
4856 gpa_t new_pgd, union kvm_mmu_page_role new_role)
4857 {
4858 /*
4859 * Limit reuse to 64-bit hosts+VMs without "special" roots in order to
4860 * avoid having to deal with PDPTEs and other complexities.
4861 */
4862 if (VALID_PAGE(mmu->root.hpa) && !root_to_sp(mmu->root.hpa))
4863 kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
4864
4865 if (VALID_PAGE(mmu->root.hpa))
4866 return cached_root_find_and_keep_current(kvm, mmu, new_pgd, new_role);
4867 else
4868 return cached_root_find_without_current(kvm, mmu, new_pgd, new_role);
4869 }
4870
kvm_mmu_new_pgd(struct kvm_vcpu * vcpu,gpa_t new_pgd)4871 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
4872 {
4873 struct kvm_mmu *mmu = vcpu->arch.mmu;
4874 union kvm_mmu_page_role new_role = mmu->root_role;
4875
4876 /*
4877 * Return immediately if no usable root was found, kvm_mmu_reload()
4878 * will establish a valid root prior to the next VM-Enter.
4879 */
4880 if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role))
4881 return;
4882
4883 /*
4884 * It's possible that the cached previous root page is obsolete because
4885 * of a change in the MMU generation number. However, changing the
4886 * generation number is accompanied by KVM_REQ_MMU_FREE_OBSOLETE_ROOTS,
4887 * which will free the root set here and allocate a new one.
4888 */
4889 kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
4890
4891 if (force_flush_and_sync_on_reuse) {
4892 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
4893 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
4894 }
4895
4896 /*
4897 * The last MMIO access's GVA and GPA are cached in the VCPU. When
4898 * switching to a new CR3, that GVA->GPA mapping may no longer be
4899 * valid. So clear any cached MMIO info even when we don't need to sync
4900 * the shadow page tables.
4901 */
4902 vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4903
4904 /*
4905 * If this is a direct root page, it doesn't have a write flooding
4906 * count. Otherwise, clear the write flooding count.
4907 */
4908 if (!new_role.direct) {
4909 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
4910
4911 if (!WARN_ON_ONCE(!sp))
4912 __clear_sp_write_flooding_count(sp);
4913 }
4914 }
4915 EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
4916
sync_mmio_spte(struct kvm_vcpu * vcpu,u64 * sptep,gfn_t gfn,unsigned int access)4917 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4918 unsigned int access)
4919 {
4920 if (unlikely(is_mmio_spte(vcpu->kvm, *sptep))) {
4921 if (gfn != get_mmio_spte_gfn(*sptep)) {
4922 mmu_spte_clear_no_track(sptep);
4923 return true;
4924 }
4925
4926 mark_mmio_spte(vcpu, sptep, gfn, access);
4927 return true;
4928 }
4929
4930 return false;
4931 }
4932
4933 #define PTTYPE_EPT 18 /* arbitrary */
4934 #define PTTYPE PTTYPE_EPT
4935 #include "paging_tmpl.h"
4936 #undef PTTYPE
4937
4938 #define PTTYPE 64
4939 #include "paging_tmpl.h"
4940 #undef PTTYPE
4941
4942 #define PTTYPE 32
4943 #include "paging_tmpl.h"
4944 #undef PTTYPE
4945
__reset_rsvds_bits_mask(struct rsvd_bits_validate * rsvd_check,u64 pa_bits_rsvd,int level,bool nx,bool gbpages,bool pse,bool amd)4946 static void __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
4947 u64 pa_bits_rsvd, int level, bool nx,
4948 bool gbpages, bool pse, bool amd)
4949 {
4950 u64 gbpages_bit_rsvd = 0;
4951 u64 nonleaf_bit8_rsvd = 0;
4952 u64 high_bits_rsvd;
4953
4954 rsvd_check->bad_mt_xwr = 0;
4955
4956 if (!gbpages)
4957 gbpages_bit_rsvd = rsvd_bits(7, 7);
4958
4959 if (level == PT32E_ROOT_LEVEL)
4960 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
4961 else
4962 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4963
4964 /* Note, NX doesn't exist in PDPTEs, this is handled below. */
4965 if (!nx)
4966 high_bits_rsvd |= rsvd_bits(63, 63);
4967
4968 /*
4969 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
4970 * leaf entries) on AMD CPUs only.
4971 */
4972 if (amd)
4973 nonleaf_bit8_rsvd = rsvd_bits(8, 8);
4974
4975 switch (level) {
4976 case PT32_ROOT_LEVEL:
4977 /* no rsvd bits for 2 level 4K page table entries */
4978 rsvd_check->rsvd_bits_mask[0][1] = 0;
4979 rsvd_check->rsvd_bits_mask[0][0] = 0;
4980 rsvd_check->rsvd_bits_mask[1][0] =
4981 rsvd_check->rsvd_bits_mask[0][0];
4982
4983 if (!pse) {
4984 rsvd_check->rsvd_bits_mask[1][1] = 0;
4985 break;
4986 }
4987
4988 if (is_cpuid_PSE36())
4989 /* 36bits PSE 4MB page */
4990 rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4991 else
4992 /* 32 bits PSE 4MB page */
4993 rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4994 break;
4995 case PT32E_ROOT_LEVEL:
4996 rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
4997 high_bits_rsvd |
4998 rsvd_bits(5, 8) |
4999 rsvd_bits(1, 2); /* PDPTE */
5000 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd; /* PDE */
5001 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; /* PTE */
5002 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
5003 rsvd_bits(13, 20); /* large page */
5004 rsvd_check->rsvd_bits_mask[1][0] =
5005 rsvd_check->rsvd_bits_mask[0][0];
5006 break;
5007 case PT64_ROOT_5LEVEL:
5008 rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
5009 nonleaf_bit8_rsvd |
5010 rsvd_bits(7, 7);
5011 rsvd_check->rsvd_bits_mask[1][4] =
5012 rsvd_check->rsvd_bits_mask[0][4];
5013 fallthrough;
5014 case PT64_ROOT_4LEVEL:
5015 rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
5016 nonleaf_bit8_rsvd |
5017 rsvd_bits(7, 7);
5018 rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
5019 gbpages_bit_rsvd;
5020 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
5021 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
5022 rsvd_check->rsvd_bits_mask[1][3] =
5023 rsvd_check->rsvd_bits_mask[0][3];
5024 rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
5025 gbpages_bit_rsvd |
5026 rsvd_bits(13, 29);
5027 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
5028 rsvd_bits(13, 20); /* large page */
5029 rsvd_check->rsvd_bits_mask[1][0] =
5030 rsvd_check->rsvd_bits_mask[0][0];
5031 break;
5032 }
5033 }
5034
reset_guest_rsvds_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context)5035 static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
5036 struct kvm_mmu *context)
5037 {
5038 __reset_rsvds_bits_mask(&context->guest_rsvd_check,
5039 vcpu->arch.reserved_gpa_bits,
5040 context->cpu_role.base.level, is_efer_nx(context),
5041 guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES),
5042 is_cr4_pse(context),
5043 guest_cpuid_is_amd_compatible(vcpu));
5044 }
5045
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate * rsvd_check,u64 pa_bits_rsvd,bool execonly,int huge_page_level)5046 static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
5047 u64 pa_bits_rsvd, bool execonly,
5048 int huge_page_level)
5049 {
5050 u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
5051 u64 large_1g_rsvd = 0, large_2m_rsvd = 0;
5052 u64 bad_mt_xwr;
5053
5054 if (huge_page_level < PG_LEVEL_1G)
5055 large_1g_rsvd = rsvd_bits(7, 7);
5056 if (huge_page_level < PG_LEVEL_2M)
5057 large_2m_rsvd = rsvd_bits(7, 7);
5058
5059 rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
5060 rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
5061 rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6) | large_1g_rsvd;
5062 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6) | large_2m_rsvd;
5063 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
5064
5065 /* large page */
5066 rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
5067 rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
5068 rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29) | large_1g_rsvd;
5069 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20) | large_2m_rsvd;
5070 rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
5071
5072 bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */
5073 bad_mt_xwr |= 0xFFull << (3 * 8); /* bits 3..5 must not be 3 */
5074 bad_mt_xwr |= 0xFFull << (7 * 8); /* bits 3..5 must not be 7 */
5075 bad_mt_xwr |= REPEAT_BYTE(1ull << 2); /* bits 0..2 must not be 010 */
5076 bad_mt_xwr |= REPEAT_BYTE(1ull << 6); /* bits 0..2 must not be 110 */
5077 if (!execonly) {
5078 /* bits 0..2 must not be 100 unless VMX capabilities allow it */
5079 bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
5080 }
5081 rsvd_check->bad_mt_xwr = bad_mt_xwr;
5082 }
5083
reset_rsvds_bits_mask_ept(struct kvm_vcpu * vcpu,struct kvm_mmu * context,bool execonly,int huge_page_level)5084 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
5085 struct kvm_mmu *context, bool execonly, int huge_page_level)
5086 {
5087 __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
5088 vcpu->arch.reserved_gpa_bits, execonly,
5089 huge_page_level);
5090 }
5091
reserved_hpa_bits(void)5092 static inline u64 reserved_hpa_bits(void)
5093 {
5094 return rsvd_bits(kvm_host.maxphyaddr, 63);
5095 }
5096
5097 /*
5098 * the page table on host is the shadow page table for the page
5099 * table in guest or amd nested guest, its mmu features completely
5100 * follow the features in guest.
5101 */
reset_shadow_zero_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context)5102 static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
5103 struct kvm_mmu *context)
5104 {
5105 /* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
5106 bool is_amd = true;
5107 /* KVM doesn't use 2-level page tables for the shadow MMU. */
5108 bool is_pse = false;
5109 struct rsvd_bits_validate *shadow_zero_check;
5110 int i;
5111
5112 WARN_ON_ONCE(context->root_role.level < PT32E_ROOT_LEVEL);
5113
5114 shadow_zero_check = &context->shadow_zero_check;
5115 __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
5116 context->root_role.level,
5117 context->root_role.efer_nx,
5118 guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES),
5119 is_pse, is_amd);
5120
5121 if (!shadow_me_mask)
5122 return;
5123
5124 for (i = context->root_role.level; --i >= 0;) {
5125 /*
5126 * So far shadow_me_value is a constant during KVM's life
5127 * time. Bits in shadow_me_value are allowed to be set.
5128 * Bits in shadow_me_mask but not in shadow_me_value are
5129 * not allowed to be set.
5130 */
5131 shadow_zero_check->rsvd_bits_mask[0][i] |= shadow_me_mask;
5132 shadow_zero_check->rsvd_bits_mask[1][i] |= shadow_me_mask;
5133 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_value;
5134 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_value;
5135 }
5136
5137 }
5138
boot_cpu_is_amd(void)5139 static inline bool boot_cpu_is_amd(void)
5140 {
5141 WARN_ON_ONCE(!tdp_enabled);
5142 return shadow_x_mask == 0;
5143 }
5144
5145 /*
5146 * the direct page table on host, use as much mmu features as
5147 * possible, however, kvm currently does not do execution-protection.
5148 */
reset_tdp_shadow_zero_bits_mask(struct kvm_mmu * context)5149 static void reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context)
5150 {
5151 struct rsvd_bits_validate *shadow_zero_check;
5152 int i;
5153
5154 shadow_zero_check = &context->shadow_zero_check;
5155
5156 if (boot_cpu_is_amd())
5157 __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
5158 context->root_role.level, true,
5159 boot_cpu_has(X86_FEATURE_GBPAGES),
5160 false, true);
5161 else
5162 __reset_rsvds_bits_mask_ept(shadow_zero_check,
5163 reserved_hpa_bits(), false,
5164 max_huge_page_level);
5165
5166 if (!shadow_me_mask)
5167 return;
5168
5169 for (i = context->root_role.level; --i >= 0;) {
5170 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
5171 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
5172 }
5173 }
5174
5175 /*
5176 * as the comments in reset_shadow_zero_bits_mask() except it
5177 * is the shadow page table for intel nested guest.
5178 */
5179 static void
reset_ept_shadow_zero_bits_mask(struct kvm_mmu * context,bool execonly)5180 reset_ept_shadow_zero_bits_mask(struct kvm_mmu *context, bool execonly)
5181 {
5182 __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
5183 reserved_hpa_bits(), execonly,
5184 max_huge_page_level);
5185 }
5186
5187 #define BYTE_MASK(access) \
5188 ((1 & (access) ? 2 : 0) | \
5189 (2 & (access) ? 4 : 0) | \
5190 (3 & (access) ? 8 : 0) | \
5191 (4 & (access) ? 16 : 0) | \
5192 (5 & (access) ? 32 : 0) | \
5193 (6 & (access) ? 64 : 0) | \
5194 (7 & (access) ? 128 : 0))
5195
5196
update_permission_bitmask(struct kvm_mmu * mmu,bool ept)5197 static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
5198 {
5199 unsigned byte;
5200
5201 const u8 x = BYTE_MASK(ACC_EXEC_MASK);
5202 const u8 w = BYTE_MASK(ACC_WRITE_MASK);
5203 const u8 u = BYTE_MASK(ACC_USER_MASK);
5204
5205 bool cr4_smep = is_cr4_smep(mmu);
5206 bool cr4_smap = is_cr4_smap(mmu);
5207 bool cr0_wp = is_cr0_wp(mmu);
5208 bool efer_nx = is_efer_nx(mmu);
5209
5210 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
5211 unsigned pfec = byte << 1;
5212
5213 /*
5214 * Each "*f" variable has a 1 bit for each UWX value
5215 * that causes a fault with the given PFEC.
5216 */
5217
5218 /* Faults from writes to non-writable pages */
5219 u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
5220 /* Faults from user mode accesses to supervisor pages */
5221 u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
5222 /* Faults from fetches of non-executable pages*/
5223 u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
5224 /* Faults from kernel mode fetches of user pages */
5225 u8 smepf = 0;
5226 /* Faults from kernel mode accesses of user pages */
5227 u8 smapf = 0;
5228
5229 if (!ept) {
5230 /* Faults from kernel mode accesses to user pages */
5231 u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
5232
5233 /* Not really needed: !nx will cause pte.nx to fault */
5234 if (!efer_nx)
5235 ff = 0;
5236
5237 /* Allow supervisor writes if !cr0.wp */
5238 if (!cr0_wp)
5239 wf = (pfec & PFERR_USER_MASK) ? wf : 0;
5240
5241 /* Disallow supervisor fetches of user code if cr4.smep */
5242 if (cr4_smep)
5243 smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
5244
5245 /*
5246 * SMAP:kernel-mode data accesses from user-mode
5247 * mappings should fault. A fault is considered
5248 * as a SMAP violation if all of the following
5249 * conditions are true:
5250 * - X86_CR4_SMAP is set in CR4
5251 * - A user page is accessed
5252 * - The access is not a fetch
5253 * - The access is supervisor mode
5254 * - If implicit supervisor access or X86_EFLAGS_AC is clear
5255 *
5256 * Here, we cover the first four conditions.
5257 * The fifth is computed dynamically in permission_fault();
5258 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
5259 * *not* subject to SMAP restrictions.
5260 */
5261 if (cr4_smap)
5262 smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
5263 }
5264
5265 mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
5266 }
5267 }
5268
5269 /*
5270 * PKU is an additional mechanism by which the paging controls access to
5271 * user-mode addresses based on the value in the PKRU register. Protection
5272 * key violations are reported through a bit in the page fault error code.
5273 * Unlike other bits of the error code, the PK bit is not known at the
5274 * call site of e.g. gva_to_gpa; it must be computed directly in
5275 * permission_fault based on two bits of PKRU, on some machine state (CR4,
5276 * CR0, EFER, CPL), and on other bits of the error code and the page tables.
5277 *
5278 * In particular the following conditions come from the error code, the
5279 * page tables and the machine state:
5280 * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
5281 * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
5282 * - PK is always zero if U=0 in the page tables
5283 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
5284 *
5285 * The PKRU bitmask caches the result of these four conditions. The error
5286 * code (minus the P bit) and the page table's U bit form an index into the
5287 * PKRU bitmask. Two bits of the PKRU bitmask are then extracted and ANDed
5288 * with the two bits of the PKRU register corresponding to the protection key.
5289 * For the first three conditions above the bits will be 00, thus masking
5290 * away both AD and WD. For all reads or if the last condition holds, WD
5291 * only will be masked away.
5292 */
update_pkru_bitmask(struct kvm_mmu * mmu)5293 static void update_pkru_bitmask(struct kvm_mmu *mmu)
5294 {
5295 unsigned bit;
5296 bool wp;
5297
5298 mmu->pkru_mask = 0;
5299
5300 if (!is_cr4_pke(mmu))
5301 return;
5302
5303 wp = is_cr0_wp(mmu);
5304
5305 for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
5306 unsigned pfec, pkey_bits;
5307 bool check_pkey, check_write, ff, uf, wf, pte_user;
5308
5309 pfec = bit << 1;
5310 ff = pfec & PFERR_FETCH_MASK;
5311 uf = pfec & PFERR_USER_MASK;
5312 wf = pfec & PFERR_WRITE_MASK;
5313
5314 /* PFEC.RSVD is replaced by ACC_USER_MASK. */
5315 pte_user = pfec & PFERR_RSVD_MASK;
5316
5317 /*
5318 * Only need to check the access which is not an
5319 * instruction fetch and is to a user page.
5320 */
5321 check_pkey = (!ff && pte_user);
5322 /*
5323 * write access is controlled by PKRU if it is a
5324 * user access or CR0.WP = 1.
5325 */
5326 check_write = check_pkey && wf && (uf || wp);
5327
5328 /* PKRU.AD stops both read and write access. */
5329 pkey_bits = !!check_pkey;
5330 /* PKRU.WD stops write access. */
5331 pkey_bits |= (!!check_write) << 1;
5332
5333 mmu->pkru_mask |= (pkey_bits & 3) << pfec;
5334 }
5335 }
5336
reset_guest_paging_metadata(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)5337 static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
5338 struct kvm_mmu *mmu)
5339 {
5340 if (!is_cr0_pg(mmu))
5341 return;
5342
5343 reset_guest_rsvds_bits_mask(vcpu, mmu);
5344 update_permission_bitmask(mmu, false);
5345 update_pkru_bitmask(mmu);
5346 }
5347
paging64_init_context(struct kvm_mmu * context)5348 static void paging64_init_context(struct kvm_mmu *context)
5349 {
5350 context->page_fault = paging64_page_fault;
5351 context->gva_to_gpa = paging64_gva_to_gpa;
5352 context->sync_spte = paging64_sync_spte;
5353 }
5354
paging32_init_context(struct kvm_mmu * context)5355 static void paging32_init_context(struct kvm_mmu *context)
5356 {
5357 context->page_fault = paging32_page_fault;
5358 context->gva_to_gpa = paging32_gva_to_gpa;
5359 context->sync_spte = paging32_sync_spte;
5360 }
5361
kvm_calc_cpu_role(struct kvm_vcpu * vcpu,const struct kvm_mmu_role_regs * regs)5362 static union kvm_cpu_role kvm_calc_cpu_role(struct kvm_vcpu *vcpu,
5363 const struct kvm_mmu_role_regs *regs)
5364 {
5365 union kvm_cpu_role role = {0};
5366
5367 role.base.access = ACC_ALL;
5368 role.base.smm = is_smm(vcpu);
5369 role.base.guest_mode = is_guest_mode(vcpu);
5370 role.ext.valid = 1;
5371
5372 if (!____is_cr0_pg(regs)) {
5373 role.base.direct = 1;
5374 return role;
5375 }
5376
5377 role.base.efer_nx = ____is_efer_nx(regs);
5378 role.base.cr0_wp = ____is_cr0_wp(regs);
5379 role.base.smep_andnot_wp = ____is_cr4_smep(regs) && !____is_cr0_wp(regs);
5380 role.base.smap_andnot_wp = ____is_cr4_smap(regs) && !____is_cr0_wp(regs);
5381 role.base.has_4_byte_gpte = !____is_cr4_pae(regs);
5382
5383 if (____is_efer_lma(regs))
5384 role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL
5385 : PT64_ROOT_4LEVEL;
5386 else if (____is_cr4_pae(regs))
5387 role.base.level = PT32E_ROOT_LEVEL;
5388 else
5389 role.base.level = PT32_ROOT_LEVEL;
5390
5391 role.ext.cr4_smep = ____is_cr4_smep(regs);
5392 role.ext.cr4_smap = ____is_cr4_smap(regs);
5393 role.ext.cr4_pse = ____is_cr4_pse(regs);
5394
5395 /* PKEY and LA57 are active iff long mode is active. */
5396 role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
5397 role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
5398 role.ext.efer_lma = ____is_efer_lma(regs);
5399 return role;
5400 }
5401
__kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)5402 void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
5403 struct kvm_mmu *mmu)
5404 {
5405 const bool cr0_wp = kvm_is_cr0_bit_set(vcpu, X86_CR0_WP);
5406
5407 BUILD_BUG_ON((KVM_MMU_CR0_ROLE_BITS & KVM_POSSIBLE_CR0_GUEST_BITS) != X86_CR0_WP);
5408 BUILD_BUG_ON((KVM_MMU_CR4_ROLE_BITS & KVM_POSSIBLE_CR4_GUEST_BITS));
5409
5410 if (is_cr0_wp(mmu) == cr0_wp)
5411 return;
5412
5413 mmu->cpu_role.base.cr0_wp = cr0_wp;
5414 reset_guest_paging_metadata(vcpu, mmu);
5415 }
5416
kvm_mmu_get_tdp_level(struct kvm_vcpu * vcpu)5417 static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
5418 {
5419 /* tdp_root_level is architecture forced level, use it if nonzero */
5420 if (tdp_root_level)
5421 return tdp_root_level;
5422
5423 /* Use 5-level TDP if and only if it's useful/necessary. */
5424 if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
5425 return 4;
5426
5427 return max_tdp_level;
5428 }
5429
kvm_mmu_get_max_tdp_level(void)5430 u8 kvm_mmu_get_max_tdp_level(void)
5431 {
5432 return tdp_root_level ? tdp_root_level : max_tdp_level;
5433 }
5434
5435 static union kvm_mmu_page_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5436 kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
5437 union kvm_cpu_role cpu_role)
5438 {
5439 union kvm_mmu_page_role role = {0};
5440
5441 role.access = ACC_ALL;
5442 role.cr0_wp = true;
5443 role.efer_nx = true;
5444 role.smm = cpu_role.base.smm;
5445 role.guest_mode = cpu_role.base.guest_mode;
5446 role.ad_disabled = !kvm_ad_enabled;
5447 role.level = kvm_mmu_get_tdp_level(vcpu);
5448 role.direct = true;
5449 role.has_4_byte_gpte = false;
5450
5451 return role;
5452 }
5453
init_kvm_tdp_mmu(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5454 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
5455 union kvm_cpu_role cpu_role)
5456 {
5457 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5458 union kvm_mmu_page_role root_role = kvm_calc_tdp_mmu_root_page_role(vcpu, cpu_role);
5459
5460 if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
5461 root_role.word == context->root_role.word)
5462 return;
5463
5464 context->cpu_role.as_u64 = cpu_role.as_u64;
5465 context->root_role.word = root_role.word;
5466 context->page_fault = kvm_tdp_page_fault;
5467 context->sync_spte = NULL;
5468 context->get_guest_pgd = get_guest_cr3;
5469 context->get_pdptr = kvm_pdptr_read;
5470 context->inject_page_fault = kvm_inject_page_fault;
5471
5472 if (!is_cr0_pg(context))
5473 context->gva_to_gpa = nonpaging_gva_to_gpa;
5474 else if (is_cr4_pae(context))
5475 context->gva_to_gpa = paging64_gva_to_gpa;
5476 else
5477 context->gva_to_gpa = paging32_gva_to_gpa;
5478
5479 reset_guest_paging_metadata(vcpu, context);
5480 reset_tdp_shadow_zero_bits_mask(context);
5481 }
5482
shadow_mmu_init_context(struct kvm_vcpu * vcpu,struct kvm_mmu * context,union kvm_cpu_role cpu_role,union kvm_mmu_page_role root_role)5483 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
5484 union kvm_cpu_role cpu_role,
5485 union kvm_mmu_page_role root_role)
5486 {
5487 if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
5488 root_role.word == context->root_role.word)
5489 return;
5490
5491 context->cpu_role.as_u64 = cpu_role.as_u64;
5492 context->root_role.word = root_role.word;
5493
5494 if (!is_cr0_pg(context))
5495 nonpaging_init_context(context);
5496 else if (is_cr4_pae(context))
5497 paging64_init_context(context);
5498 else
5499 paging32_init_context(context);
5500
5501 reset_guest_paging_metadata(vcpu, context);
5502 reset_shadow_zero_bits_mask(vcpu, context);
5503 }
5504
kvm_init_shadow_mmu(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5505 static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
5506 union kvm_cpu_role cpu_role)
5507 {
5508 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5509 union kvm_mmu_page_role root_role;
5510
5511 root_role = cpu_role.base;
5512
5513 /* KVM uses PAE paging whenever the guest isn't using 64-bit paging. */
5514 root_role.level = max_t(u32, root_role.level, PT32E_ROOT_LEVEL);
5515
5516 /*
5517 * KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role.
5518 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
5519 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
5520 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
5521 * The iTLB multi-hit workaround can be toggled at any time, so assume
5522 * NX can be used by any non-nested shadow MMU to avoid having to reset
5523 * MMU contexts.
5524 */
5525 root_role.efer_nx = true;
5526
5527 shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
5528 }
5529
kvm_init_shadow_npt_mmu(struct kvm_vcpu * vcpu,unsigned long cr0,unsigned long cr4,u64 efer,gpa_t nested_cr3)5530 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
5531 unsigned long cr4, u64 efer, gpa_t nested_cr3)
5532 {
5533 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5534 struct kvm_mmu_role_regs regs = {
5535 .cr0 = cr0,
5536 .cr4 = cr4 & ~X86_CR4_PKE,
5537 .efer = efer,
5538 };
5539 union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, ®s);
5540 union kvm_mmu_page_role root_role;
5541
5542 /* NPT requires CR0.PG=1. */
5543 WARN_ON_ONCE(cpu_role.base.direct || !cpu_role.base.guest_mode);
5544
5545 root_role = cpu_role.base;
5546 root_role.level = kvm_mmu_get_tdp_level(vcpu);
5547 if (root_role.level == PT64_ROOT_5LEVEL &&
5548 cpu_role.base.level == PT64_ROOT_4LEVEL)
5549 root_role.passthrough = 1;
5550
5551 shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
5552 kvm_mmu_new_pgd(vcpu, nested_cr3);
5553 }
5554 EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
5555
5556 static union kvm_cpu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu * vcpu,bool accessed_dirty,bool execonly,u8 level)5557 kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
5558 bool execonly, u8 level)
5559 {
5560 union kvm_cpu_role role = {0};
5561
5562 /*
5563 * KVM does not support SMM transfer monitors, and consequently does not
5564 * support the "entry to SMM" control either. role.base.smm is always 0.
5565 */
5566 WARN_ON_ONCE(is_smm(vcpu));
5567 role.base.level = level;
5568 role.base.has_4_byte_gpte = false;
5569 role.base.direct = false;
5570 role.base.ad_disabled = !accessed_dirty;
5571 role.base.guest_mode = true;
5572 role.base.access = ACC_ALL;
5573
5574 role.ext.word = 0;
5575 role.ext.execonly = execonly;
5576 role.ext.valid = 1;
5577
5578 return role;
5579 }
5580
kvm_init_shadow_ept_mmu(struct kvm_vcpu * vcpu,bool execonly,int huge_page_level,bool accessed_dirty,gpa_t new_eptp)5581 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
5582 int huge_page_level, bool accessed_dirty,
5583 gpa_t new_eptp)
5584 {
5585 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5586 u8 level = vmx_eptp_page_walk_level(new_eptp);
5587 union kvm_cpu_role new_mode =
5588 kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
5589 execonly, level);
5590
5591 if (new_mode.as_u64 != context->cpu_role.as_u64) {
5592 /* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
5593 context->cpu_role.as_u64 = new_mode.as_u64;
5594 context->root_role.word = new_mode.base.word;
5595
5596 context->page_fault = ept_page_fault;
5597 context->gva_to_gpa = ept_gva_to_gpa;
5598 context->sync_spte = ept_sync_spte;
5599
5600 update_permission_bitmask(context, true);
5601 context->pkru_mask = 0;
5602 reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level);
5603 reset_ept_shadow_zero_bits_mask(context, execonly);
5604 }
5605
5606 kvm_mmu_new_pgd(vcpu, new_eptp);
5607 }
5608 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
5609
init_kvm_softmmu(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5610 static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
5611 union kvm_cpu_role cpu_role)
5612 {
5613 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5614
5615 kvm_init_shadow_mmu(vcpu, cpu_role);
5616
5617 context->get_guest_pgd = get_guest_cr3;
5618 context->get_pdptr = kvm_pdptr_read;
5619 context->inject_page_fault = kvm_inject_page_fault;
5620 }
5621
init_kvm_nested_mmu(struct kvm_vcpu * vcpu,union kvm_cpu_role new_mode)5622 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
5623 union kvm_cpu_role new_mode)
5624 {
5625 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
5626
5627 if (new_mode.as_u64 == g_context->cpu_role.as_u64)
5628 return;
5629
5630 g_context->cpu_role.as_u64 = new_mode.as_u64;
5631 g_context->get_guest_pgd = get_guest_cr3;
5632 g_context->get_pdptr = kvm_pdptr_read;
5633 g_context->inject_page_fault = kvm_inject_page_fault;
5634
5635 /*
5636 * L2 page tables are never shadowed, so there is no need to sync
5637 * SPTEs.
5638 */
5639 g_context->sync_spte = NULL;
5640
5641 /*
5642 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
5643 * L1's nested page tables (e.g. EPT12). The nested translation
5644 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
5645 * L2's page tables as the first level of translation and L1's
5646 * nested page tables as the second level of translation. Basically
5647 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
5648 */
5649 if (!is_paging(vcpu))
5650 g_context->gva_to_gpa = nonpaging_gva_to_gpa;
5651 else if (is_long_mode(vcpu))
5652 g_context->gva_to_gpa = paging64_gva_to_gpa;
5653 else if (is_pae(vcpu))
5654 g_context->gva_to_gpa = paging64_gva_to_gpa;
5655 else
5656 g_context->gva_to_gpa = paging32_gva_to_gpa;
5657
5658 reset_guest_paging_metadata(vcpu, g_context);
5659 }
5660
kvm_init_mmu(struct kvm_vcpu * vcpu)5661 void kvm_init_mmu(struct kvm_vcpu *vcpu)
5662 {
5663 struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
5664 union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, ®s);
5665
5666 if (mmu_is_nested(vcpu))
5667 init_kvm_nested_mmu(vcpu, cpu_role);
5668 else if (tdp_enabled)
5669 init_kvm_tdp_mmu(vcpu, cpu_role);
5670 else
5671 init_kvm_softmmu(vcpu, cpu_role);
5672 }
5673 EXPORT_SYMBOL_GPL(kvm_init_mmu);
5674
kvm_mmu_after_set_cpuid(struct kvm_vcpu * vcpu)5675 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
5676 {
5677 /*
5678 * Invalidate all MMU roles to force them to reinitialize as CPUID
5679 * information is factored into reserved bit calculations.
5680 *
5681 * Correctly handling multiple vCPU models with respect to paging and
5682 * physical address properties) in a single VM would require tracking
5683 * all relevant CPUID information in kvm_mmu_page_role. That is very
5684 * undesirable as it would increase the memory requirements for
5685 * gfn_write_track (see struct kvm_mmu_page_role comments). For now
5686 * that problem is swept under the rug; KVM's CPUID API is horrific and
5687 * it's all but impossible to solve it without introducing a new API.
5688 */
5689 vcpu->arch.root_mmu.root_role.invalid = 1;
5690 vcpu->arch.guest_mmu.root_role.invalid = 1;
5691 vcpu->arch.nested_mmu.root_role.invalid = 1;
5692 vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
5693 vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
5694 vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
5695 kvm_mmu_reset_context(vcpu);
5696
5697 /*
5698 * Changing guest CPUID after KVM_RUN is forbidden, see the comment in
5699 * kvm_arch_vcpu_ioctl().
5700 */
5701 KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm);
5702 }
5703
kvm_mmu_reset_context(struct kvm_vcpu * vcpu)5704 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
5705 {
5706 kvm_mmu_unload(vcpu);
5707 kvm_init_mmu(vcpu);
5708 }
5709 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
5710
kvm_mmu_load(struct kvm_vcpu * vcpu)5711 int kvm_mmu_load(struct kvm_vcpu *vcpu)
5712 {
5713 int r;
5714
5715 r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct);
5716 if (r)
5717 goto out;
5718 r = mmu_alloc_special_roots(vcpu);
5719 if (r)
5720 goto out;
5721 if (vcpu->arch.mmu->root_role.direct)
5722 r = mmu_alloc_direct_roots(vcpu);
5723 else
5724 r = mmu_alloc_shadow_roots(vcpu);
5725 if (r)
5726 goto out;
5727
5728 kvm_mmu_sync_roots(vcpu);
5729
5730 kvm_mmu_load_pgd(vcpu);
5731
5732 /*
5733 * Flush any TLB entries for the new root, the provenance of the root
5734 * is unknown. Even if KVM ensures there are no stale TLB entries
5735 * for a freed root, in theory another hypervisor could have left
5736 * stale entries. Flushing on alloc also allows KVM to skip the TLB
5737 * flush when freeing a root (see kvm_tdp_mmu_put_root()).
5738 */
5739 kvm_x86_call(flush_tlb_current)(vcpu);
5740 out:
5741 return r;
5742 }
5743
kvm_mmu_unload(struct kvm_vcpu * vcpu)5744 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
5745 {
5746 struct kvm *kvm = vcpu->kvm;
5747
5748 kvm_mmu_free_roots(kvm, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
5749 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa));
5750 kvm_mmu_free_roots(kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5751 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa));
5752 vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
5753 }
5754
is_obsolete_root(struct kvm * kvm,hpa_t root_hpa)5755 static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
5756 {
5757 struct kvm_mmu_page *sp;
5758
5759 if (!VALID_PAGE(root_hpa))
5760 return false;
5761
5762 /*
5763 * When freeing obsolete roots, treat roots as obsolete if they don't
5764 * have an associated shadow page, as it's impossible to determine if
5765 * such roots are fresh or stale. This does mean KVM will get false
5766 * positives and free roots that don't strictly need to be freed, but
5767 * such false positives are relatively rare:
5768 *
5769 * (a) only PAE paging and nested NPT have roots without shadow pages
5770 * (or any shadow paging flavor with a dummy root, see note below)
5771 * (b) remote reloads due to a memslot update obsoletes _all_ roots
5772 * (c) KVM doesn't track previous roots for PAE paging, and the guest
5773 * is unlikely to zap an in-use PGD.
5774 *
5775 * Note! Dummy roots are unique in that they are obsoleted by memslot
5776 * _creation_! See also FNAME(fetch).
5777 */
5778 sp = root_to_sp(root_hpa);
5779 return !sp || is_obsolete_sp(kvm, sp);
5780 }
5781
__kvm_mmu_free_obsolete_roots(struct kvm * kvm,struct kvm_mmu * mmu)5782 static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
5783 {
5784 unsigned long roots_to_free = 0;
5785 int i;
5786
5787 if (is_obsolete_root(kvm, mmu->root.hpa))
5788 roots_to_free |= KVM_MMU_ROOT_CURRENT;
5789
5790 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5791 if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa))
5792 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
5793 }
5794
5795 if (roots_to_free)
5796 kvm_mmu_free_roots(kvm, mmu, roots_to_free);
5797 }
5798
kvm_mmu_free_obsolete_roots(struct kvm_vcpu * vcpu)5799 void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
5800 {
5801 __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
5802 __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
5803 }
5804
mmu_pte_write_fetch_gpte(struct kvm_vcpu * vcpu,gpa_t * gpa,int * bytes)5805 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5806 int *bytes)
5807 {
5808 u64 gentry = 0;
5809 int r;
5810
5811 /*
5812 * Assume that the pte write on a page table of the same type
5813 * as the current vcpu paging mode since we update the sptes only
5814 * when they have the same mode.
5815 */
5816 if (is_pae(vcpu) && *bytes == 4) {
5817 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5818 *gpa &= ~(gpa_t)7;
5819 *bytes = 8;
5820 }
5821
5822 if (*bytes == 4 || *bytes == 8) {
5823 r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
5824 if (r)
5825 gentry = 0;
5826 }
5827
5828 return gentry;
5829 }
5830
5831 /*
5832 * If we're seeing too many writes to a page, it may no longer be a page table,
5833 * or we may be forking, in which case it is better to unmap the page.
5834 */
detect_write_flooding(struct kvm_mmu_page * sp)5835 static bool detect_write_flooding(struct kvm_mmu_page *sp)
5836 {
5837 /*
5838 * Skip write-flooding detected for the sp whose level is 1, because
5839 * it can become unsync, then the guest page is not write-protected.
5840 */
5841 if (sp->role.level == PG_LEVEL_4K)
5842 return false;
5843
5844 atomic_inc(&sp->write_flooding_count);
5845 return atomic_read(&sp->write_flooding_count) >= 3;
5846 }
5847
5848 /*
5849 * Misaligned accesses are too much trouble to fix up; also, they usually
5850 * indicate a page is not used as a page table.
5851 */
detect_write_misaligned(struct kvm_mmu_page * sp,gpa_t gpa,int bytes)5852 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
5853 int bytes)
5854 {
5855 unsigned offset, pte_size, misaligned;
5856
5857 offset = offset_in_page(gpa);
5858 pte_size = sp->role.has_4_byte_gpte ? 4 : 8;
5859
5860 /*
5861 * Sometimes, the OS only writes the last one bytes to update status
5862 * bits, for example, in linux, andb instruction is used in clear_bit().
5863 */
5864 if (!(offset & (pte_size - 1)) && bytes == 1)
5865 return false;
5866
5867 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
5868 misaligned |= bytes < 4;
5869
5870 return misaligned;
5871 }
5872
get_written_sptes(struct kvm_mmu_page * sp,gpa_t gpa,int * nspte)5873 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
5874 {
5875 unsigned page_offset, quadrant;
5876 u64 *spte;
5877 int level;
5878
5879 page_offset = offset_in_page(gpa);
5880 level = sp->role.level;
5881 *nspte = 1;
5882 if (sp->role.has_4_byte_gpte) {
5883 page_offset <<= 1; /* 32->64 */
5884 /*
5885 * A 32-bit pde maps 4MB while the shadow pdes map
5886 * only 2MB. So we need to double the offset again
5887 * and zap two pdes instead of one.
5888 */
5889 if (level == PT32_ROOT_LEVEL) {
5890 page_offset &= ~7; /* kill rounding error */
5891 page_offset <<= 1;
5892 *nspte = 2;
5893 }
5894 quadrant = page_offset >> PAGE_SHIFT;
5895 page_offset &= ~PAGE_MASK;
5896 if (quadrant != sp->role.quadrant)
5897 return NULL;
5898 }
5899
5900 spte = &sp->spt[page_offset / sizeof(*spte)];
5901 return spte;
5902 }
5903
kvm_mmu_track_write(struct kvm_vcpu * vcpu,gpa_t gpa,const u8 * new,int bytes)5904 void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
5905 int bytes)
5906 {
5907 gfn_t gfn = gpa >> PAGE_SHIFT;
5908 struct kvm_mmu_page *sp;
5909 LIST_HEAD(invalid_list);
5910 u64 entry, gentry, *spte;
5911 int npte;
5912 bool flush = false;
5913
5914 /*
5915 * When emulating guest writes, ensure the written value is visible to
5916 * any task that is handling page faults before checking whether or not
5917 * KVM is shadowing a guest PTE. This ensures either KVM will create
5918 * the correct SPTE in the page fault handler, or this task will see
5919 * a non-zero indirect_shadow_pages. Pairs with the smp_mb() in
5920 * account_shadowed().
5921 */
5922 smp_mb();
5923 if (!vcpu->kvm->arch.indirect_shadow_pages)
5924 return;
5925
5926 write_lock(&vcpu->kvm->mmu_lock);
5927
5928 gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5929
5930 ++vcpu->kvm->stat.mmu_pte_write;
5931
5932 for_each_gfn_valid_sp_with_gptes(vcpu->kvm, sp, gfn) {
5933 if (detect_write_misaligned(sp, gpa, bytes) ||
5934 detect_write_flooding(sp)) {
5935 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
5936 ++vcpu->kvm->stat.mmu_flooded;
5937 continue;
5938 }
5939
5940 spte = get_written_sptes(sp, gpa, &npte);
5941 if (!spte)
5942 continue;
5943
5944 while (npte--) {
5945 entry = *spte;
5946 mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5947 if (gentry && sp->role.level != PG_LEVEL_4K)
5948 ++vcpu->kvm->stat.mmu_pde_zapped;
5949 if (is_shadow_present_pte(entry))
5950 flush = true;
5951 ++spte;
5952 }
5953 }
5954 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
5955 write_unlock(&vcpu->kvm->mmu_lock);
5956 }
5957
is_write_to_guest_page_table(u64 error_code)5958 static bool is_write_to_guest_page_table(u64 error_code)
5959 {
5960 const u64 mask = PFERR_GUEST_PAGE_MASK | PFERR_WRITE_MASK | PFERR_PRESENT_MASK;
5961
5962 return (error_code & mask) == mask;
5963 }
5964
kvm_mmu_write_protect_fault(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,u64 error_code,int * emulation_type)5965 static int kvm_mmu_write_protect_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
5966 u64 error_code, int *emulation_type)
5967 {
5968 bool direct = vcpu->arch.mmu->root_role.direct;
5969
5970 /*
5971 * Do not try to unprotect and retry if the vCPU re-faulted on the same
5972 * RIP with the same address that was previously unprotected, as doing
5973 * so will likely put the vCPU into an infinite. E.g. if the vCPU uses
5974 * a non-page-table modifying instruction on the PDE that points to the
5975 * instruction, then unprotecting the gfn will unmap the instruction's
5976 * code, i.e. make it impossible for the instruction to ever complete.
5977 */
5978 if (vcpu->arch.last_retry_eip == kvm_rip_read(vcpu) &&
5979 vcpu->arch.last_retry_addr == cr2_or_gpa)
5980 return RET_PF_EMULATE;
5981
5982 /*
5983 * Reset the unprotect+retry values that guard against infinite loops.
5984 * The values will be refreshed if KVM explicitly unprotects a gfn and
5985 * retries, in all other cases it's safe to retry in the future even if
5986 * the next page fault happens on the same RIP+address.
5987 */
5988 vcpu->arch.last_retry_eip = 0;
5989 vcpu->arch.last_retry_addr = 0;
5990
5991 /*
5992 * It should be impossible to reach this point with an MMIO cache hit,
5993 * as RET_PF_WRITE_PROTECTED is returned if and only if there's a valid,
5994 * writable memslot, and creating a memslot should invalidate the MMIO
5995 * cache by way of changing the memslot generation. WARN and disallow
5996 * retry if MMIO is detected, as retrying MMIO emulation is pointless
5997 * and could put the vCPU into an infinite loop because the processor
5998 * will keep faulting on the non-existent MMIO address.
5999 */
6000 if (WARN_ON_ONCE(mmio_info_in_cache(vcpu, cr2_or_gpa, direct)))
6001 return RET_PF_EMULATE;
6002
6003 /*
6004 * Before emulating the instruction, check to see if the access was due
6005 * to a read-only violation while the CPU was walking non-nested NPT
6006 * page tables, i.e. for a direct MMU, for _guest_ page tables in L1.
6007 * If L1 is sharing (a subset of) its page tables with L2, e.g. by
6008 * having nCR3 share lower level page tables with hCR3, then when KVM
6009 * (L0) write-protects the nested NPTs, i.e. npt12 entries, KVM is also
6010 * unknowingly write-protecting L1's guest page tables, which KVM isn't
6011 * shadowing.
6012 *
6013 * Because the CPU (by default) walks NPT page tables using a write
6014 * access (to ensure the CPU can do A/D updates), page walks in L1 can
6015 * trigger write faults for the above case even when L1 isn't modifying
6016 * PTEs. As a result, KVM will unnecessarily emulate (or at least, try
6017 * to emulate) an excessive number of L1 instructions; because L1's MMU
6018 * isn't shadowed by KVM, there is no need to write-protect L1's gPTEs
6019 * and thus no need to emulate in order to guarantee forward progress.
6020 *
6021 * Try to unprotect the gfn, i.e. zap any shadow pages, so that L1 can
6022 * proceed without triggering emulation. If one or more shadow pages
6023 * was zapped, skip emulation and resume L1 to let it natively execute
6024 * the instruction. If no shadow pages were zapped, then the write-
6025 * fault is due to something else entirely, i.e. KVM needs to emulate,
6026 * as resuming the guest will put it into an infinite loop.
6027 *
6028 * Note, this code also applies to Intel CPUs, even though it is *very*
6029 * unlikely that an L1 will share its page tables (IA32/PAE/paging64
6030 * format) with L2's page tables (EPT format).
6031 *
6032 * For indirect MMUs, i.e. if KVM is shadowing the current MMU, try to
6033 * unprotect the gfn and retry if an event is awaiting reinjection. If
6034 * KVM emulates multiple instructions before completing event injection,
6035 * the event could be delayed beyond what is architecturally allowed,
6036 * e.g. KVM could inject an IRQ after the TPR has been raised.
6037 */
6038 if (((direct && is_write_to_guest_page_table(error_code)) ||
6039 (!direct && kvm_event_needs_reinjection(vcpu))) &&
6040 kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa))
6041 return RET_PF_RETRY;
6042
6043 /*
6044 * The gfn is write-protected, but if KVM detects its emulating an
6045 * instruction that is unlikely to be used to modify page tables, or if
6046 * emulation fails, KVM can try to unprotect the gfn and let the CPU
6047 * re-execute the instruction that caused the page fault. Do not allow
6048 * retrying an instruction from a nested guest as KVM is only explicitly
6049 * shadowing L1's page tables, i.e. unprotecting something for L1 isn't
6050 * going to magically fix whatever issue caused L2 to fail.
6051 */
6052 if (!is_guest_mode(vcpu))
6053 *emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
6054
6055 return RET_PF_EMULATE;
6056 }
6057
kvm_mmu_page_fault(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,u64 error_code,void * insn,int insn_len)6058 int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
6059 void *insn, int insn_len)
6060 {
6061 int r, emulation_type = EMULTYPE_PF;
6062 bool direct = vcpu->arch.mmu->root_role.direct;
6063
6064 if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
6065 return RET_PF_RETRY;
6066
6067 /*
6068 * Except for reserved faults (emulated MMIO is shared-only), set the
6069 * PFERR_PRIVATE_ACCESS flag for software-protected VMs based on the gfn's
6070 * current attributes, which are the source of truth for such VMs. Note,
6071 * this wrong for nested MMUs as the GPA is an L2 GPA, but KVM doesn't
6072 * currently supported nested virtualization (among many other things)
6073 * for software-protected VMs.
6074 */
6075 if (IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) &&
6076 !(error_code & PFERR_RSVD_MASK) &&
6077 vcpu->kvm->arch.vm_type == KVM_X86_SW_PROTECTED_VM &&
6078 kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(cr2_or_gpa)))
6079 error_code |= PFERR_PRIVATE_ACCESS;
6080
6081 r = RET_PF_INVALID;
6082 if (unlikely(error_code & PFERR_RSVD_MASK)) {
6083 if (WARN_ON_ONCE(error_code & PFERR_PRIVATE_ACCESS))
6084 return -EFAULT;
6085
6086 r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
6087 if (r == RET_PF_EMULATE)
6088 goto emulate;
6089 }
6090
6091 if (r == RET_PF_INVALID) {
6092 vcpu->stat.pf_taken++;
6093
6094 r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, error_code, false,
6095 &emulation_type, NULL);
6096 if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
6097 return -EIO;
6098 }
6099
6100 if (r < 0)
6101 return r;
6102
6103 if (r == RET_PF_WRITE_PROTECTED)
6104 r = kvm_mmu_write_protect_fault(vcpu, cr2_or_gpa, error_code,
6105 &emulation_type);
6106
6107 if (r == RET_PF_FIXED)
6108 vcpu->stat.pf_fixed++;
6109 else if (r == RET_PF_EMULATE)
6110 vcpu->stat.pf_emulate++;
6111 else if (r == RET_PF_SPURIOUS)
6112 vcpu->stat.pf_spurious++;
6113
6114 /*
6115 * None of handle_mmio_page_fault(), kvm_mmu_do_page_fault(), or
6116 * kvm_mmu_write_protect_fault() return RET_PF_CONTINUE.
6117 * kvm_mmu_do_page_fault() only uses RET_PF_CONTINUE internally to
6118 * indicate continuing the page fault handling until to the final
6119 * page table mapping phase.
6120 */
6121 WARN_ON_ONCE(r == RET_PF_CONTINUE);
6122 if (r != RET_PF_EMULATE)
6123 return r;
6124
6125 emulate:
6126 return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
6127 insn_len);
6128 }
6129 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
6130
kvm_mmu_print_sptes(struct kvm_vcpu * vcpu,gpa_t gpa,const char * msg)6131 void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg)
6132 {
6133 u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
6134 int root_level, leaf, level;
6135
6136 leaf = get_sptes_lockless(vcpu, gpa, sptes, &root_level);
6137 if (unlikely(leaf < 0))
6138 return;
6139
6140 pr_err("%s %llx", msg, gpa);
6141 for (level = root_level; level >= leaf; level--)
6142 pr_cont(", spte[%d] = 0x%llx", level, sptes[level]);
6143 pr_cont("\n");
6144 }
6145 EXPORT_SYMBOL_GPL(kvm_mmu_print_sptes);
6146
__kvm_mmu_invalidate_addr(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,u64 addr,hpa_t root_hpa)6147 static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
6148 u64 addr, hpa_t root_hpa)
6149 {
6150 struct kvm_shadow_walk_iterator iterator;
6151
6152 vcpu_clear_mmio_info(vcpu, addr);
6153
6154 /*
6155 * Walking and synchronizing SPTEs both assume they are operating in
6156 * the context of the current MMU, and would need to be reworked if
6157 * this is ever used to sync the guest_mmu, e.g. to emulate INVEPT.
6158 */
6159 if (WARN_ON_ONCE(mmu != vcpu->arch.mmu))
6160 return;
6161
6162 if (!VALID_PAGE(root_hpa))
6163 return;
6164
6165 write_lock(&vcpu->kvm->mmu_lock);
6166 for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) {
6167 struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep);
6168
6169 if (sp->unsync) {
6170 int ret = kvm_sync_spte(vcpu, sp, iterator.index);
6171
6172 if (ret < 0)
6173 mmu_page_zap_pte(vcpu->kvm, sp, iterator.sptep, NULL);
6174 if (ret)
6175 kvm_flush_remote_tlbs_sptep(vcpu->kvm, iterator.sptep);
6176 }
6177
6178 if (!sp->unsync_children)
6179 break;
6180 }
6181 write_unlock(&vcpu->kvm->mmu_lock);
6182 }
6183
kvm_mmu_invalidate_addr(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,u64 addr,unsigned long roots)6184 void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
6185 u64 addr, unsigned long roots)
6186 {
6187 int i;
6188
6189 WARN_ON_ONCE(roots & ~KVM_MMU_ROOTS_ALL);
6190
6191 /* It's actually a GPA for vcpu->arch.guest_mmu. */
6192 if (mmu != &vcpu->arch.guest_mmu) {
6193 /* INVLPG on a non-canonical address is a NOP according to the SDM. */
6194 if (is_noncanonical_invlpg_address(addr, vcpu))
6195 return;
6196
6197 kvm_x86_call(flush_tlb_gva)(vcpu, addr);
6198 }
6199
6200 if (!mmu->sync_spte)
6201 return;
6202
6203 if (roots & KVM_MMU_ROOT_CURRENT)
6204 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa);
6205
6206 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
6207 if (roots & KVM_MMU_ROOT_PREVIOUS(i))
6208 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa);
6209 }
6210 }
6211 EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_addr);
6212
kvm_mmu_invlpg(struct kvm_vcpu * vcpu,gva_t gva)6213 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
6214 {
6215 /*
6216 * INVLPG is required to invalidate any global mappings for the VA,
6217 * irrespective of PCID. Blindly sync all roots as it would take
6218 * roughly the same amount of work/time to determine whether any of the
6219 * previous roots have a global mapping.
6220 *
6221 * Mappings not reachable via the current or previous cached roots will
6222 * be synced when switching to that new cr3, so nothing needs to be
6223 * done here for them.
6224 */
6225 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL);
6226 ++vcpu->stat.invlpg;
6227 }
6228 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
6229
6230
kvm_mmu_invpcid_gva(struct kvm_vcpu * vcpu,gva_t gva,unsigned long pcid)6231 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
6232 {
6233 struct kvm_mmu *mmu = vcpu->arch.mmu;
6234 unsigned long roots = 0;
6235 uint i;
6236
6237 if (pcid == kvm_get_active_pcid(vcpu))
6238 roots |= KVM_MMU_ROOT_CURRENT;
6239
6240 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
6241 if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
6242 pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd))
6243 roots |= KVM_MMU_ROOT_PREVIOUS(i);
6244 }
6245
6246 if (roots)
6247 kvm_mmu_invalidate_addr(vcpu, mmu, gva, roots);
6248 ++vcpu->stat.invlpg;
6249
6250 /*
6251 * Mappings not reachable via the current cr3 or the prev_roots will be
6252 * synced when switching to that cr3, so nothing needs to be done here
6253 * for them.
6254 */
6255 }
6256
kvm_configure_mmu(bool enable_tdp,int tdp_forced_root_level,int tdp_max_root_level,int tdp_huge_page_level)6257 void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
6258 int tdp_max_root_level, int tdp_huge_page_level)
6259 {
6260 tdp_enabled = enable_tdp;
6261 tdp_root_level = tdp_forced_root_level;
6262 max_tdp_level = tdp_max_root_level;
6263
6264 #ifdef CONFIG_X86_64
6265 tdp_mmu_enabled = tdp_mmu_allowed && tdp_enabled;
6266 #endif
6267 /*
6268 * max_huge_page_level reflects KVM's MMU capabilities irrespective
6269 * of kernel support, e.g. KVM may be capable of using 1GB pages when
6270 * the kernel is not. But, KVM never creates a page size greater than
6271 * what is used by the kernel for any given HVA, i.e. the kernel's
6272 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
6273 */
6274 if (tdp_enabled)
6275 max_huge_page_level = tdp_huge_page_level;
6276 else if (boot_cpu_has(X86_FEATURE_GBPAGES))
6277 max_huge_page_level = PG_LEVEL_1G;
6278 else
6279 max_huge_page_level = PG_LEVEL_2M;
6280 }
6281 EXPORT_SYMBOL_GPL(kvm_configure_mmu);
6282
free_mmu_pages(struct kvm_mmu * mmu)6283 static void free_mmu_pages(struct kvm_mmu *mmu)
6284 {
6285 if (!tdp_enabled && mmu->pae_root)
6286 set_memory_encrypted((unsigned long)mmu->pae_root, 1);
6287 free_page((unsigned long)mmu->pae_root);
6288 free_page((unsigned long)mmu->pml4_root);
6289 free_page((unsigned long)mmu->pml5_root);
6290 }
6291
__kvm_mmu_create(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)6292 static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
6293 {
6294 struct page *page;
6295 int i;
6296
6297 mmu->root.hpa = INVALID_PAGE;
6298 mmu->root.pgd = 0;
6299 mmu->mirror_root_hpa = INVALID_PAGE;
6300 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
6301 mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
6302
6303 /* vcpu->arch.guest_mmu isn't used when !tdp_enabled. */
6304 if (!tdp_enabled && mmu == &vcpu->arch.guest_mmu)
6305 return 0;
6306
6307 /*
6308 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
6309 * while the PDP table is a per-vCPU construct that's allocated at MMU
6310 * creation. When emulating 32-bit mode, cr3 is only 32 bits even on
6311 * x86_64. Therefore we need to allocate the PDP table in the first
6312 * 4GB of memory, which happens to fit the DMA32 zone. TDP paging
6313 * generally doesn't use PAE paging and can skip allocating the PDP
6314 * table. The main exception, handled here, is SVM's 32-bit NPT. The
6315 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
6316 * KVM; that horror is handled on-demand by mmu_alloc_special_roots().
6317 */
6318 if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
6319 return 0;
6320
6321 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
6322 if (!page)
6323 return -ENOMEM;
6324
6325 mmu->pae_root = page_address(page);
6326
6327 /*
6328 * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
6329 * get the CPU to treat the PDPTEs as encrypted. Decrypt the page so
6330 * that KVM's writes and the CPU's reads get along. Note, this is
6331 * only necessary when using shadow paging, as 64-bit NPT can get at
6332 * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
6333 * by 32-bit kernels (when KVM itself uses 32-bit NPT).
6334 */
6335 if (!tdp_enabled)
6336 set_memory_decrypted((unsigned long)mmu->pae_root, 1);
6337 else
6338 WARN_ON_ONCE(shadow_me_value);
6339
6340 for (i = 0; i < 4; ++i)
6341 mmu->pae_root[i] = INVALID_PAE_ROOT;
6342
6343 return 0;
6344 }
6345
kvm_mmu_create(struct kvm_vcpu * vcpu)6346 int kvm_mmu_create(struct kvm_vcpu *vcpu)
6347 {
6348 int ret;
6349
6350 vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
6351 vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
6352
6353 vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
6354 vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
6355
6356 vcpu->arch.mmu_shadow_page_cache.init_value =
6357 SHADOW_NONPRESENT_VALUE;
6358 if (!vcpu->arch.mmu_shadow_page_cache.init_value)
6359 vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
6360
6361 vcpu->arch.mmu = &vcpu->arch.root_mmu;
6362 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
6363
6364 ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
6365 if (ret)
6366 return ret;
6367
6368 ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
6369 if (ret)
6370 goto fail_allocate_root;
6371
6372 return ret;
6373 fail_allocate_root:
6374 free_mmu_pages(&vcpu->arch.guest_mmu);
6375 return ret;
6376 }
6377
6378 #define BATCH_ZAP_PAGES 10
kvm_zap_obsolete_pages(struct kvm * kvm)6379 static void kvm_zap_obsolete_pages(struct kvm *kvm)
6380 {
6381 struct kvm_mmu_page *sp, *node;
6382 int nr_zapped, batch = 0;
6383 LIST_HEAD(invalid_list);
6384 bool unstable;
6385
6386 lockdep_assert_held(&kvm->slots_lock);
6387
6388 restart:
6389 list_for_each_entry_safe_reverse(sp, node,
6390 &kvm->arch.active_mmu_pages, link) {
6391 /*
6392 * No obsolete valid page exists before a newly created page
6393 * since active_mmu_pages is a FIFO list.
6394 */
6395 if (!is_obsolete_sp(kvm, sp))
6396 break;
6397
6398 /*
6399 * Invalid pages should never land back on the list of active
6400 * pages. Skip the bogus page, otherwise we'll get stuck in an
6401 * infinite loop if the page gets put back on the list (again).
6402 */
6403 if (WARN_ON_ONCE(sp->role.invalid))
6404 continue;
6405
6406 /*
6407 * No need to flush the TLB since we're only zapping shadow
6408 * pages with an obsolete generation number and all vCPUS have
6409 * loaded a new root, i.e. the shadow pages being zapped cannot
6410 * be in active use by the guest.
6411 */
6412 if (batch >= BATCH_ZAP_PAGES &&
6413 cond_resched_rwlock_write(&kvm->mmu_lock)) {
6414 batch = 0;
6415 goto restart;
6416 }
6417
6418 unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
6419 &invalid_list, &nr_zapped);
6420 batch += nr_zapped;
6421
6422 if (unstable)
6423 goto restart;
6424 }
6425
6426 /*
6427 * Kick all vCPUs (via remote TLB flush) before freeing the page tables
6428 * to ensure KVM is not in the middle of a lockless shadow page table
6429 * walk, which may reference the pages. The remote TLB flush itself is
6430 * not required and is simply a convenient way to kick vCPUs as needed.
6431 * KVM performs a local TLB flush when allocating a new root (see
6432 * kvm_mmu_load()), and the reload in the caller ensure no vCPUs are
6433 * running with an obsolete MMU.
6434 */
6435 kvm_mmu_commit_zap_page(kvm, &invalid_list);
6436 }
6437
6438 /*
6439 * Fast invalidate all shadow pages and use lock-break technique
6440 * to zap obsolete pages.
6441 *
6442 * It's required when memslot is being deleted or VM is being
6443 * destroyed, in these cases, we should ensure that KVM MMU does
6444 * not use any resource of the being-deleted slot or all slots
6445 * after calling the function.
6446 */
kvm_mmu_zap_all_fast(struct kvm * kvm)6447 static void kvm_mmu_zap_all_fast(struct kvm *kvm)
6448 {
6449 lockdep_assert_held(&kvm->slots_lock);
6450
6451 write_lock(&kvm->mmu_lock);
6452 trace_kvm_mmu_zap_all_fast(kvm);
6453
6454 /*
6455 * Toggle mmu_valid_gen between '0' and '1'. Because slots_lock is
6456 * held for the entire duration of zapping obsolete pages, it's
6457 * impossible for there to be multiple invalid generations associated
6458 * with *valid* shadow pages at any given time, i.e. there is exactly
6459 * one valid generation and (at most) one invalid generation.
6460 */
6461 kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
6462
6463 /*
6464 * In order to ensure all vCPUs drop their soon-to-be invalid roots,
6465 * invalidating TDP MMU roots must be done while holding mmu_lock for
6466 * write and in the same critical section as making the reload request,
6467 * e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield.
6468 */
6469 if (tdp_mmu_enabled) {
6470 /*
6471 * External page tables don't support fast zapping, therefore
6472 * their mirrors must be invalidated separately by the caller.
6473 */
6474 kvm_tdp_mmu_invalidate_roots(kvm, KVM_DIRECT_ROOTS);
6475 }
6476
6477 /*
6478 * Notify all vcpus to reload its shadow page table and flush TLB.
6479 * Then all vcpus will switch to new shadow page table with the new
6480 * mmu_valid_gen.
6481 *
6482 * Note: we need to do this under the protection of mmu_lock,
6483 * otherwise, vcpu would purge shadow page but miss tlb flush.
6484 */
6485 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);
6486
6487 kvm_zap_obsolete_pages(kvm);
6488
6489 write_unlock(&kvm->mmu_lock);
6490
6491 /*
6492 * Zap the invalidated TDP MMU roots, all SPTEs must be dropped before
6493 * returning to the caller, e.g. if the zap is in response to a memslot
6494 * deletion, mmu_notifier callbacks will be unable to reach the SPTEs
6495 * associated with the deleted memslot once the update completes, and
6496 * Deferring the zap until the final reference to the root is put would
6497 * lead to use-after-free.
6498 */
6499 if (tdp_mmu_enabled)
6500 kvm_tdp_mmu_zap_invalidated_roots(kvm, true);
6501 }
6502
kvm_mmu_init_vm(struct kvm * kvm)6503 void kvm_mmu_init_vm(struct kvm *kvm)
6504 {
6505 kvm->arch.shadow_mmio_value = shadow_mmio_value;
6506 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6507 INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
6508 spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
6509
6510 if (tdp_mmu_enabled)
6511 kvm_mmu_init_tdp_mmu(kvm);
6512
6513 kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
6514 kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
6515
6516 kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
6517
6518 kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
6519 kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
6520 }
6521
mmu_free_vm_memory_caches(struct kvm * kvm)6522 static void mmu_free_vm_memory_caches(struct kvm *kvm)
6523 {
6524 kvm_mmu_free_memory_cache(&kvm->arch.split_desc_cache);
6525 kvm_mmu_free_memory_cache(&kvm->arch.split_page_header_cache);
6526 kvm_mmu_free_memory_cache(&kvm->arch.split_shadow_page_cache);
6527 }
6528
kvm_mmu_uninit_vm(struct kvm * kvm)6529 void kvm_mmu_uninit_vm(struct kvm *kvm)
6530 {
6531 if (tdp_mmu_enabled)
6532 kvm_mmu_uninit_tdp_mmu(kvm);
6533
6534 mmu_free_vm_memory_caches(kvm);
6535 }
6536
kvm_rmap_zap_gfn_range(struct kvm * kvm,gfn_t gfn_start,gfn_t gfn_end)6537 static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
6538 {
6539 const struct kvm_memory_slot *memslot;
6540 struct kvm_memslots *slots;
6541 struct kvm_memslot_iter iter;
6542 bool flush = false;
6543 gfn_t start, end;
6544 int i;
6545
6546 if (!kvm_memslots_have_rmaps(kvm))
6547 return flush;
6548
6549 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
6550 slots = __kvm_memslots(kvm, i);
6551
6552 kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
6553 memslot = iter.slot;
6554 start = max(gfn_start, memslot->base_gfn);
6555 end = min(gfn_end, memslot->base_gfn + memslot->npages);
6556 if (WARN_ON_ONCE(start >= end))
6557 continue;
6558
6559 flush = __kvm_rmap_zap_gfn_range(kvm, memslot, start,
6560 end, true, flush);
6561 }
6562 }
6563
6564 return flush;
6565 }
6566
6567 /*
6568 * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
6569 * (not including it)
6570 */
kvm_zap_gfn_range(struct kvm * kvm,gfn_t gfn_start,gfn_t gfn_end)6571 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
6572 {
6573 bool flush;
6574
6575 if (WARN_ON_ONCE(gfn_end <= gfn_start))
6576 return;
6577
6578 write_lock(&kvm->mmu_lock);
6579
6580 kvm_mmu_invalidate_begin(kvm);
6581
6582 kvm_mmu_invalidate_range_add(kvm, gfn_start, gfn_end);
6583
6584 flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
6585
6586 if (tdp_mmu_enabled)
6587 flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush);
6588
6589 if (flush)
6590 kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);
6591
6592 kvm_mmu_invalidate_end(kvm);
6593
6594 write_unlock(&kvm->mmu_lock);
6595 }
6596
slot_rmap_write_protect(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)6597 static bool slot_rmap_write_protect(struct kvm *kvm,
6598 struct kvm_rmap_head *rmap_head,
6599 const struct kvm_memory_slot *slot)
6600 {
6601 return rmap_write_protect(rmap_head, false);
6602 }
6603
kvm_mmu_slot_remove_write_access(struct kvm * kvm,const struct kvm_memory_slot * memslot,int start_level)6604 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
6605 const struct kvm_memory_slot *memslot,
6606 int start_level)
6607 {
6608 if (kvm_memslots_have_rmaps(kvm)) {
6609 write_lock(&kvm->mmu_lock);
6610 walk_slot_rmaps(kvm, memslot, slot_rmap_write_protect,
6611 start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
6612 write_unlock(&kvm->mmu_lock);
6613 }
6614
6615 if (tdp_mmu_enabled) {
6616 read_lock(&kvm->mmu_lock);
6617 kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
6618 read_unlock(&kvm->mmu_lock);
6619 }
6620 }
6621
need_topup(struct kvm_mmu_memory_cache * cache,int min)6622 static inline bool need_topup(struct kvm_mmu_memory_cache *cache, int min)
6623 {
6624 return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
6625 }
6626
need_topup_split_caches_or_resched(struct kvm * kvm)6627 static bool need_topup_split_caches_or_resched(struct kvm *kvm)
6628 {
6629 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
6630 return true;
6631
6632 /*
6633 * In the worst case, SPLIT_DESC_CACHE_MIN_NR_OBJECTS descriptors are needed
6634 * to split a single huge page. Calculating how many are actually needed
6635 * is possible but not worth the complexity.
6636 */
6637 return need_topup(&kvm->arch.split_desc_cache, SPLIT_DESC_CACHE_MIN_NR_OBJECTS) ||
6638 need_topup(&kvm->arch.split_page_header_cache, 1) ||
6639 need_topup(&kvm->arch.split_shadow_page_cache, 1);
6640 }
6641
topup_split_caches(struct kvm * kvm)6642 static int topup_split_caches(struct kvm *kvm)
6643 {
6644 /*
6645 * Allocating rmap list entries when splitting huge pages for nested
6646 * MMUs is uncommon as KVM needs to use a list if and only if there is
6647 * more than one rmap entry for a gfn, i.e. requires an L1 gfn to be
6648 * aliased by multiple L2 gfns and/or from multiple nested roots with
6649 * different roles. Aliasing gfns when using TDP is atypical for VMMs;
6650 * a few gfns are often aliased during boot, e.g. when remapping BIOS,
6651 * but aliasing rarely occurs post-boot or for many gfns. If there is
6652 * only one rmap entry, rmap->val points directly at that one entry and
6653 * doesn't need to allocate a list. Buffer the cache by the default
6654 * capacity so that KVM doesn't have to drop mmu_lock to topup if KVM
6655 * encounters an aliased gfn or two.
6656 */
6657 const int capacity = SPLIT_DESC_CACHE_MIN_NR_OBJECTS +
6658 KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE;
6659 int r;
6660
6661 lockdep_assert_held(&kvm->slots_lock);
6662
6663 r = __kvm_mmu_topup_memory_cache(&kvm->arch.split_desc_cache, capacity,
6664 SPLIT_DESC_CACHE_MIN_NR_OBJECTS);
6665 if (r)
6666 return r;
6667
6668 r = kvm_mmu_topup_memory_cache(&kvm->arch.split_page_header_cache, 1);
6669 if (r)
6670 return r;
6671
6672 return kvm_mmu_topup_memory_cache(&kvm->arch.split_shadow_page_cache, 1);
6673 }
6674
shadow_mmu_get_sp_for_split(struct kvm * kvm,u64 * huge_sptep)6675 static struct kvm_mmu_page *shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *huge_sptep)
6676 {
6677 struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep);
6678 struct shadow_page_caches caches = {};
6679 union kvm_mmu_page_role role;
6680 unsigned int access;
6681 gfn_t gfn;
6682
6683 gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
6684 access = kvm_mmu_page_get_access(huge_sp, spte_index(huge_sptep));
6685
6686 /*
6687 * Note, huge page splitting always uses direct shadow pages, regardless
6688 * of whether the huge page itself is mapped by a direct or indirect
6689 * shadow page, since the huge page region itself is being directly
6690 * mapped with smaller pages.
6691 */
6692 role = kvm_mmu_child_role(huge_sptep, /*direct=*/true, access);
6693
6694 /* Direct SPs do not require a shadowed_info_cache. */
6695 caches.page_header_cache = &kvm->arch.split_page_header_cache;
6696 caches.shadow_page_cache = &kvm->arch.split_shadow_page_cache;
6697
6698 /* Safe to pass NULL for vCPU since requesting a direct SP. */
6699 return __kvm_mmu_get_shadow_page(kvm, NULL, &caches, gfn, role);
6700 }
6701
shadow_mmu_split_huge_page(struct kvm * kvm,const struct kvm_memory_slot * slot,u64 * huge_sptep)6702 static void shadow_mmu_split_huge_page(struct kvm *kvm,
6703 const struct kvm_memory_slot *slot,
6704 u64 *huge_sptep)
6705
6706 {
6707 struct kvm_mmu_memory_cache *cache = &kvm->arch.split_desc_cache;
6708 u64 huge_spte = READ_ONCE(*huge_sptep);
6709 struct kvm_mmu_page *sp;
6710 bool flush = false;
6711 u64 *sptep, spte;
6712 gfn_t gfn;
6713 int index;
6714
6715 sp = shadow_mmu_get_sp_for_split(kvm, huge_sptep);
6716
6717 for (index = 0; index < SPTE_ENT_PER_PAGE; index++) {
6718 sptep = &sp->spt[index];
6719 gfn = kvm_mmu_page_get_gfn(sp, index);
6720
6721 /*
6722 * The SP may already have populated SPTEs, e.g. if this huge
6723 * page is aliased by multiple sptes with the same access
6724 * permissions. These entries are guaranteed to map the same
6725 * gfn-to-pfn translation since the SP is direct, so no need to
6726 * modify them.
6727 *
6728 * However, if a given SPTE points to a lower level page table,
6729 * that lower level page table may only be partially populated.
6730 * Installing such SPTEs would effectively unmap a potion of the
6731 * huge page. Unmapping guest memory always requires a TLB flush
6732 * since a subsequent operation on the unmapped regions would
6733 * fail to detect the need to flush.
6734 */
6735 if (is_shadow_present_pte(*sptep)) {
6736 flush |= !is_last_spte(*sptep, sp->role.level);
6737 continue;
6738 }
6739
6740 spte = make_small_spte(kvm, huge_spte, sp->role, index);
6741 mmu_spte_set(sptep, spte);
6742 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);
6743 }
6744
6745 __link_shadow_page(kvm, cache, huge_sptep, sp, flush);
6746 }
6747
shadow_mmu_try_split_huge_page(struct kvm * kvm,const struct kvm_memory_slot * slot,u64 * huge_sptep)6748 static int shadow_mmu_try_split_huge_page(struct kvm *kvm,
6749 const struct kvm_memory_slot *slot,
6750 u64 *huge_sptep)
6751 {
6752 struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep);
6753 int level, r = 0;
6754 gfn_t gfn;
6755 u64 spte;
6756
6757 /* Grab information for the tracepoint before dropping the MMU lock. */
6758 gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
6759 level = huge_sp->role.level;
6760 spte = *huge_sptep;
6761
6762 if (kvm_mmu_available_pages(kvm) <= KVM_MIN_FREE_MMU_PAGES) {
6763 r = -ENOSPC;
6764 goto out;
6765 }
6766
6767 if (need_topup_split_caches_or_resched(kvm)) {
6768 write_unlock(&kvm->mmu_lock);
6769 cond_resched();
6770 /*
6771 * If the topup succeeds, return -EAGAIN to indicate that the
6772 * rmap iterator should be restarted because the MMU lock was
6773 * dropped.
6774 */
6775 r = topup_split_caches(kvm) ?: -EAGAIN;
6776 write_lock(&kvm->mmu_lock);
6777 goto out;
6778 }
6779
6780 shadow_mmu_split_huge_page(kvm, slot, huge_sptep);
6781
6782 out:
6783 trace_kvm_mmu_split_huge_page(gfn, spte, level, r);
6784 return r;
6785 }
6786
shadow_mmu_try_split_huge_pages(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)6787 static bool shadow_mmu_try_split_huge_pages(struct kvm *kvm,
6788 struct kvm_rmap_head *rmap_head,
6789 const struct kvm_memory_slot *slot)
6790 {
6791 struct rmap_iterator iter;
6792 struct kvm_mmu_page *sp;
6793 u64 *huge_sptep;
6794 int r;
6795
6796 restart:
6797 for_each_rmap_spte(rmap_head, &iter, huge_sptep) {
6798 sp = sptep_to_sp(huge_sptep);
6799
6800 /* TDP MMU is enabled, so rmap only contains nested MMU SPs. */
6801 if (WARN_ON_ONCE(!sp->role.guest_mode))
6802 continue;
6803
6804 /* The rmaps should never contain non-leaf SPTEs. */
6805 if (WARN_ON_ONCE(!is_large_pte(*huge_sptep)))
6806 continue;
6807
6808 /* SPs with level >PG_LEVEL_4K should never by unsync. */
6809 if (WARN_ON_ONCE(sp->unsync))
6810 continue;
6811
6812 /* Don't bother splitting huge pages on invalid SPs. */
6813 if (sp->role.invalid)
6814 continue;
6815
6816 r = shadow_mmu_try_split_huge_page(kvm, slot, huge_sptep);
6817
6818 /*
6819 * The split succeeded or needs to be retried because the MMU
6820 * lock was dropped. Either way, restart the iterator to get it
6821 * back into a consistent state.
6822 */
6823 if (!r || r == -EAGAIN)
6824 goto restart;
6825
6826 /* The split failed and shouldn't be retried (e.g. -ENOMEM). */
6827 break;
6828 }
6829
6830 return false;
6831 }
6832
kvm_shadow_mmu_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t start,gfn_t end,int target_level)6833 static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
6834 const struct kvm_memory_slot *slot,
6835 gfn_t start, gfn_t end,
6836 int target_level)
6837 {
6838 int level;
6839
6840 /*
6841 * Split huge pages starting with KVM_MAX_HUGEPAGE_LEVEL and working
6842 * down to the target level. This ensures pages are recursively split
6843 * all the way to the target level. There's no need to split pages
6844 * already at the target level.
6845 */
6846 for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--)
6847 __walk_slot_rmaps(kvm, slot, shadow_mmu_try_split_huge_pages,
6848 level, level, start, end - 1, true, true, false);
6849 }
6850
6851 /* Must be called with the mmu_lock held in write-mode. */
kvm_mmu_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * memslot,u64 start,u64 end,int target_level)6852 void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
6853 const struct kvm_memory_slot *memslot,
6854 u64 start, u64 end,
6855 int target_level)
6856 {
6857 if (!tdp_mmu_enabled)
6858 return;
6859
6860 if (kvm_memslots_have_rmaps(kvm))
6861 kvm_shadow_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level);
6862
6863 kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, false);
6864
6865 /*
6866 * A TLB flush is unnecessary at this point for the same reasons as in
6867 * kvm_mmu_slot_try_split_huge_pages().
6868 */
6869 }
6870
kvm_mmu_slot_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * memslot,int target_level)6871 void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
6872 const struct kvm_memory_slot *memslot,
6873 int target_level)
6874 {
6875 u64 start = memslot->base_gfn;
6876 u64 end = start + memslot->npages;
6877
6878 if (!tdp_mmu_enabled)
6879 return;
6880
6881 if (kvm_memslots_have_rmaps(kvm)) {
6882 write_lock(&kvm->mmu_lock);
6883 kvm_shadow_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level);
6884 write_unlock(&kvm->mmu_lock);
6885 }
6886
6887 read_lock(&kvm->mmu_lock);
6888 kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, true);
6889 read_unlock(&kvm->mmu_lock);
6890
6891 /*
6892 * No TLB flush is necessary here. KVM will flush TLBs after
6893 * write-protecting and/or clearing dirty on the newly split SPTEs to
6894 * ensure that guest writes are reflected in the dirty log before the
6895 * ioctl to enable dirty logging on this memslot completes. Since the
6896 * split SPTEs retain the write and dirty bits of the huge SPTE, it is
6897 * safe for KVM to decide if a TLB flush is necessary based on the split
6898 * SPTEs.
6899 */
6900 }
6901
kvm_mmu_zap_collapsible_spte(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)6902 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
6903 struct kvm_rmap_head *rmap_head,
6904 const struct kvm_memory_slot *slot)
6905 {
6906 u64 *sptep;
6907 struct rmap_iterator iter;
6908 int need_tlb_flush = 0;
6909 struct kvm_mmu_page *sp;
6910
6911 restart:
6912 for_each_rmap_spte(rmap_head, &iter, sptep) {
6913 sp = sptep_to_sp(sptep);
6914
6915 /*
6916 * We cannot do huge page mapping for indirect shadow pages,
6917 * which are found on the last rmap (level = 1) when not using
6918 * tdp; such shadow pages are synced with the page table in
6919 * the guest, and the guest page table is using 4K page size
6920 * mapping if the indirect sp has level = 1.
6921 */
6922 if (sp->role.direct &&
6923 sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn)) {
6924 kvm_zap_one_rmap_spte(kvm, rmap_head, sptep);
6925
6926 if (kvm_available_flush_remote_tlbs_range())
6927 kvm_flush_remote_tlbs_sptep(kvm, sptep);
6928 else
6929 need_tlb_flush = 1;
6930
6931 goto restart;
6932 }
6933 }
6934
6935 return need_tlb_flush;
6936 }
6937 EXPORT_SYMBOL_GPL(kvm_zap_gfn_range);
6938
kvm_rmap_zap_collapsible_sptes(struct kvm * kvm,const struct kvm_memory_slot * slot)6939 static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
6940 const struct kvm_memory_slot *slot)
6941 {
6942 /*
6943 * Note, use KVM_MAX_HUGEPAGE_LEVEL - 1 since there's no need to zap
6944 * pages that are already mapped at the maximum hugepage level.
6945 */
6946 if (walk_slot_rmaps(kvm, slot, kvm_mmu_zap_collapsible_spte,
6947 PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true))
6948 kvm_flush_remote_tlbs_memslot(kvm, slot);
6949 }
6950
kvm_mmu_recover_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * slot)6951 void kvm_mmu_recover_huge_pages(struct kvm *kvm,
6952 const struct kvm_memory_slot *slot)
6953 {
6954 if (kvm_memslots_have_rmaps(kvm)) {
6955 write_lock(&kvm->mmu_lock);
6956 kvm_rmap_zap_collapsible_sptes(kvm, slot);
6957 write_unlock(&kvm->mmu_lock);
6958 }
6959
6960 if (tdp_mmu_enabled) {
6961 read_lock(&kvm->mmu_lock);
6962 kvm_tdp_mmu_recover_huge_pages(kvm, slot);
6963 read_unlock(&kvm->mmu_lock);
6964 }
6965 }
6966
kvm_mmu_slot_leaf_clear_dirty(struct kvm * kvm,const struct kvm_memory_slot * memslot)6967 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
6968 const struct kvm_memory_slot *memslot)
6969 {
6970 if (kvm_memslots_have_rmaps(kvm)) {
6971 write_lock(&kvm->mmu_lock);
6972 /*
6973 * Clear dirty bits only on 4k SPTEs since the legacy MMU only
6974 * support dirty logging at a 4k granularity.
6975 */
6976 walk_slot_rmaps_4k(kvm, memslot, __rmap_clear_dirty, false);
6977 write_unlock(&kvm->mmu_lock);
6978 }
6979
6980 if (tdp_mmu_enabled) {
6981 read_lock(&kvm->mmu_lock);
6982 kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
6983 read_unlock(&kvm->mmu_lock);
6984 }
6985
6986 /*
6987 * The caller will flush the TLBs after this function returns.
6988 *
6989 * It's also safe to flush TLBs out of mmu lock here as currently this
6990 * function is only used for dirty logging, in which case flushing TLB
6991 * out of mmu lock also guarantees no dirty pages will be lost in
6992 * dirty_bitmap.
6993 */
6994 }
6995
kvm_mmu_zap_all(struct kvm * kvm)6996 static void kvm_mmu_zap_all(struct kvm *kvm)
6997 {
6998 struct kvm_mmu_page *sp, *node;
6999 LIST_HEAD(invalid_list);
7000 int ign;
7001
7002 write_lock(&kvm->mmu_lock);
7003 restart:
7004 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
7005 if (WARN_ON_ONCE(sp->role.invalid))
7006 continue;
7007 if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
7008 goto restart;
7009 if (cond_resched_rwlock_write(&kvm->mmu_lock))
7010 goto restart;
7011 }
7012
7013 kvm_mmu_commit_zap_page(kvm, &invalid_list);
7014
7015 if (tdp_mmu_enabled)
7016 kvm_tdp_mmu_zap_all(kvm);
7017
7018 write_unlock(&kvm->mmu_lock);
7019 }
7020
kvm_arch_flush_shadow_all(struct kvm * kvm)7021 void kvm_arch_flush_shadow_all(struct kvm *kvm)
7022 {
7023 kvm_mmu_zap_all(kvm);
7024 }
7025
kvm_mmu_zap_memslot_pages_and_flush(struct kvm * kvm,struct kvm_memory_slot * slot,bool flush)7026 static void kvm_mmu_zap_memslot_pages_and_flush(struct kvm *kvm,
7027 struct kvm_memory_slot *slot,
7028 bool flush)
7029 {
7030 LIST_HEAD(invalid_list);
7031 unsigned long i;
7032
7033 if (list_empty(&kvm->arch.active_mmu_pages))
7034 goto out_flush;
7035
7036 /*
7037 * Since accounting information is stored in struct kvm_arch_memory_slot,
7038 * all MMU pages that are shadowing guest PTEs must be zapped before the
7039 * memslot is deleted, as freeing such pages after the memslot is freed
7040 * will result in use-after-free, e.g. in unaccount_shadowed().
7041 */
7042 for (i = 0; i < slot->npages; i++) {
7043 struct kvm_mmu_page *sp;
7044 gfn_t gfn = slot->base_gfn + i;
7045
7046 for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn)
7047 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
7048
7049 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7050 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7051 flush = false;
7052 cond_resched_rwlock_write(&kvm->mmu_lock);
7053 }
7054 }
7055
7056 out_flush:
7057 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7058 }
7059
kvm_mmu_zap_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)7060 static void kvm_mmu_zap_memslot(struct kvm *kvm,
7061 struct kvm_memory_slot *slot)
7062 {
7063 struct kvm_gfn_range range = {
7064 .slot = slot,
7065 .start = slot->base_gfn,
7066 .end = slot->base_gfn + slot->npages,
7067 .may_block = true,
7068 };
7069 bool flush;
7070
7071 write_lock(&kvm->mmu_lock);
7072 flush = kvm_unmap_gfn_range(kvm, &range);
7073 kvm_mmu_zap_memslot_pages_and_flush(kvm, slot, flush);
7074 write_unlock(&kvm->mmu_lock);
7075 }
7076
kvm_memslot_flush_zap_all(struct kvm * kvm)7077 static inline bool kvm_memslot_flush_zap_all(struct kvm *kvm)
7078 {
7079 return kvm->arch.vm_type == KVM_X86_DEFAULT_VM &&
7080 kvm_check_has_quirk(kvm, KVM_X86_QUIRK_SLOT_ZAP_ALL);
7081 }
7082
kvm_arch_flush_shadow_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)7083 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
7084 struct kvm_memory_slot *slot)
7085 {
7086 if (kvm_memslot_flush_zap_all(kvm))
7087 kvm_mmu_zap_all_fast(kvm);
7088 else
7089 kvm_mmu_zap_memslot(kvm, slot);
7090 }
7091
kvm_mmu_invalidate_mmio_sptes(struct kvm * kvm,u64 gen)7092 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
7093 {
7094 WARN_ON_ONCE(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
7095
7096 gen &= MMIO_SPTE_GEN_MASK;
7097
7098 /*
7099 * Generation numbers are incremented in multiples of the number of
7100 * address spaces in order to provide unique generations across all
7101 * address spaces. Strip what is effectively the address space
7102 * modifier prior to checking for a wrap of the MMIO generation so
7103 * that a wrap in any address space is detected.
7104 */
7105 gen &= ~((u64)kvm_arch_nr_memslot_as_ids(kvm) - 1);
7106
7107 /*
7108 * The very rare case: if the MMIO generation number has wrapped,
7109 * zap all shadow pages.
7110 */
7111 if (unlikely(gen == 0)) {
7112 kvm_debug_ratelimited("zapping shadow pages for mmio generation wraparound\n");
7113 kvm_mmu_zap_all_fast(kvm);
7114 }
7115 }
7116
mmu_destroy_caches(void)7117 static void mmu_destroy_caches(void)
7118 {
7119 kmem_cache_destroy(pte_list_desc_cache);
7120 kmem_cache_destroy(mmu_page_header_cache);
7121 }
7122
kvm_wake_nx_recovery_thread(struct kvm * kvm)7123 static void kvm_wake_nx_recovery_thread(struct kvm *kvm)
7124 {
7125 /*
7126 * The NX recovery thread is spawned on-demand at the first KVM_RUN and
7127 * may not be valid even though the VM is globally visible. Do nothing,
7128 * as such a VM can't have any possible NX huge pages.
7129 */
7130 struct vhost_task *nx_thread = READ_ONCE(kvm->arch.nx_huge_page_recovery_thread);
7131
7132 if (nx_thread)
7133 vhost_task_wake(nx_thread);
7134 }
7135
get_nx_huge_pages(char * buffer,const struct kernel_param * kp)7136 static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
7137 {
7138 if (nx_hugepage_mitigation_hard_disabled)
7139 return sysfs_emit(buffer, "never\n");
7140
7141 return param_get_bool(buffer, kp);
7142 }
7143
get_nx_auto_mode(void)7144 static bool get_nx_auto_mode(void)
7145 {
7146 /* Return true when CPU has the bug, and mitigations are ON */
7147 return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
7148 }
7149
__set_nx_huge_pages(bool val)7150 static void __set_nx_huge_pages(bool val)
7151 {
7152 nx_huge_pages = itlb_multihit_kvm_mitigation = val;
7153 }
7154
set_nx_huge_pages(const char * val,const struct kernel_param * kp)7155 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
7156 {
7157 bool old_val = nx_huge_pages;
7158 bool new_val;
7159
7160 if (nx_hugepage_mitigation_hard_disabled)
7161 return -EPERM;
7162
7163 /* In "auto" mode deploy workaround only if CPU has the bug. */
7164 if (sysfs_streq(val, "off")) {
7165 new_val = 0;
7166 } else if (sysfs_streq(val, "force")) {
7167 new_val = 1;
7168 } else if (sysfs_streq(val, "auto")) {
7169 new_val = get_nx_auto_mode();
7170 } else if (sysfs_streq(val, "never")) {
7171 new_val = 0;
7172
7173 mutex_lock(&kvm_lock);
7174 if (!list_empty(&vm_list)) {
7175 mutex_unlock(&kvm_lock);
7176 return -EBUSY;
7177 }
7178 nx_hugepage_mitigation_hard_disabled = true;
7179 mutex_unlock(&kvm_lock);
7180 } else if (kstrtobool(val, &new_val) < 0) {
7181 return -EINVAL;
7182 }
7183
7184 __set_nx_huge_pages(new_val);
7185
7186 if (new_val != old_val) {
7187 struct kvm *kvm;
7188
7189 mutex_lock(&kvm_lock);
7190
7191 list_for_each_entry(kvm, &vm_list, vm_list) {
7192 mutex_lock(&kvm->slots_lock);
7193 kvm_mmu_zap_all_fast(kvm);
7194 mutex_unlock(&kvm->slots_lock);
7195
7196 kvm_wake_nx_recovery_thread(kvm);
7197 }
7198 mutex_unlock(&kvm_lock);
7199 }
7200
7201 return 0;
7202 }
7203
7204 /*
7205 * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
7206 * its default value of -1 is technically undefined behavior for a boolean.
7207 * Forward the module init call to SPTE code so that it too can handle module
7208 * params that need to be resolved/snapshot.
7209 */
kvm_mmu_x86_module_init(void)7210 void __init kvm_mmu_x86_module_init(void)
7211 {
7212 if (nx_huge_pages == -1)
7213 __set_nx_huge_pages(get_nx_auto_mode());
7214
7215 /*
7216 * Snapshot userspace's desire to enable the TDP MMU. Whether or not the
7217 * TDP MMU is actually enabled is determined in kvm_configure_mmu()
7218 * when the vendor module is loaded.
7219 */
7220 tdp_mmu_allowed = tdp_mmu_enabled;
7221
7222 kvm_mmu_spte_module_init();
7223 }
7224
7225 /*
7226 * The bulk of the MMU initialization is deferred until the vendor module is
7227 * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
7228 * to be reset when a potentially different vendor module is loaded.
7229 */
kvm_mmu_vendor_module_init(void)7230 int kvm_mmu_vendor_module_init(void)
7231 {
7232 int ret = -ENOMEM;
7233
7234 /*
7235 * MMU roles use union aliasing which is, generally speaking, an
7236 * undefined behavior. However, we supposedly know how compilers behave
7237 * and the current status quo is unlikely to change. Guardians below are
7238 * supposed to let us know if the assumption becomes false.
7239 */
7240 BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
7241 BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
7242 BUILD_BUG_ON(sizeof(union kvm_cpu_role) != sizeof(u64));
7243
7244 kvm_mmu_reset_all_pte_masks();
7245
7246 pte_list_desc_cache = KMEM_CACHE(pte_list_desc, SLAB_ACCOUNT);
7247 if (!pte_list_desc_cache)
7248 goto out;
7249
7250 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
7251 sizeof(struct kvm_mmu_page),
7252 0, SLAB_ACCOUNT, NULL);
7253 if (!mmu_page_header_cache)
7254 goto out;
7255
7256 return 0;
7257
7258 out:
7259 mmu_destroy_caches();
7260 return ret;
7261 }
7262
kvm_mmu_destroy(struct kvm_vcpu * vcpu)7263 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
7264 {
7265 kvm_mmu_unload(vcpu);
7266 if (tdp_mmu_enabled) {
7267 read_lock(&vcpu->kvm->mmu_lock);
7268 mmu_free_root_page(vcpu->kvm, &vcpu->arch.mmu->mirror_root_hpa,
7269 NULL);
7270 read_unlock(&vcpu->kvm->mmu_lock);
7271 }
7272 free_mmu_pages(&vcpu->arch.root_mmu);
7273 free_mmu_pages(&vcpu->arch.guest_mmu);
7274 mmu_free_memory_caches(vcpu);
7275 }
7276
kvm_mmu_vendor_module_exit(void)7277 void kvm_mmu_vendor_module_exit(void)
7278 {
7279 mmu_destroy_caches();
7280 }
7281
7282 /*
7283 * Calculate the effective recovery period, accounting for '0' meaning "let KVM
7284 * select a halving time of 1 hour". Returns true if recovery is enabled.
7285 */
calc_nx_huge_pages_recovery_period(uint * period)7286 static bool calc_nx_huge_pages_recovery_period(uint *period)
7287 {
7288 /*
7289 * Use READ_ONCE to get the params, this may be called outside of the
7290 * param setters, e.g. by the kthread to compute its next timeout.
7291 */
7292 bool enabled = READ_ONCE(nx_huge_pages);
7293 uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
7294
7295 if (!enabled || !ratio)
7296 return false;
7297
7298 *period = READ_ONCE(nx_huge_pages_recovery_period_ms);
7299 if (!*period) {
7300 /* Make sure the period is not less than one second. */
7301 ratio = min(ratio, 3600u);
7302 *period = 60 * 60 * 1000 / ratio;
7303 }
7304 return true;
7305 }
7306
set_nx_huge_pages_recovery_param(const char * val,const struct kernel_param * kp)7307 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
7308 {
7309 bool was_recovery_enabled, is_recovery_enabled;
7310 uint old_period, new_period;
7311 int err;
7312
7313 if (nx_hugepage_mitigation_hard_disabled)
7314 return -EPERM;
7315
7316 was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
7317
7318 err = param_set_uint(val, kp);
7319 if (err)
7320 return err;
7321
7322 is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
7323
7324 if (is_recovery_enabled &&
7325 (!was_recovery_enabled || old_period > new_period)) {
7326 struct kvm *kvm;
7327
7328 mutex_lock(&kvm_lock);
7329
7330 list_for_each_entry(kvm, &vm_list, vm_list)
7331 kvm_wake_nx_recovery_thread(kvm);
7332
7333 mutex_unlock(&kvm_lock);
7334 }
7335
7336 return err;
7337 }
7338
kvm_recover_nx_huge_pages(struct kvm * kvm)7339 static void kvm_recover_nx_huge_pages(struct kvm *kvm)
7340 {
7341 unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
7342 struct kvm_memory_slot *slot;
7343 int rcu_idx;
7344 struct kvm_mmu_page *sp;
7345 unsigned int ratio;
7346 LIST_HEAD(invalid_list);
7347 bool flush = false;
7348 ulong to_zap;
7349
7350 rcu_idx = srcu_read_lock(&kvm->srcu);
7351 write_lock(&kvm->mmu_lock);
7352
7353 /*
7354 * Zapping TDP MMU shadow pages, including the remote TLB flush, must
7355 * be done under RCU protection, because the pages are freed via RCU
7356 * callback.
7357 */
7358 rcu_read_lock();
7359
7360 ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
7361 to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
7362 for ( ; to_zap; --to_zap) {
7363 if (list_empty(&kvm->arch.possible_nx_huge_pages))
7364 break;
7365
7366 /*
7367 * We use a separate list instead of just using active_mmu_pages
7368 * because the number of shadow pages that be replaced with an
7369 * NX huge page is expected to be relatively small compared to
7370 * the total number of shadow pages. And because the TDP MMU
7371 * doesn't use active_mmu_pages.
7372 */
7373 sp = list_first_entry(&kvm->arch.possible_nx_huge_pages,
7374 struct kvm_mmu_page,
7375 possible_nx_huge_page_link);
7376 WARN_ON_ONCE(!sp->nx_huge_page_disallowed);
7377 WARN_ON_ONCE(!sp->role.direct);
7378
7379 /*
7380 * Unaccount and do not attempt to recover any NX Huge Pages
7381 * that are being dirty tracked, as they would just be faulted
7382 * back in as 4KiB pages. The NX Huge Pages in this slot will be
7383 * recovered, along with all the other huge pages in the slot,
7384 * when dirty logging is disabled.
7385 *
7386 * Since gfn_to_memslot() is relatively expensive, it helps to
7387 * skip it if it the test cannot possibly return true. On the
7388 * other hand, if any memslot has logging enabled, chances are
7389 * good that all of them do, in which case unaccount_nx_huge_page()
7390 * is much cheaper than zapping the page.
7391 *
7392 * If a memslot update is in progress, reading an incorrect value
7393 * of kvm->nr_memslots_dirty_logging is not a problem: if it is
7394 * becoming zero, gfn_to_memslot() will be done unnecessarily; if
7395 * it is becoming nonzero, the page will be zapped unnecessarily.
7396 * Either way, this only affects efficiency in racy situations,
7397 * and not correctness.
7398 */
7399 slot = NULL;
7400 if (atomic_read(&kvm->nr_memslots_dirty_logging)) {
7401 struct kvm_memslots *slots;
7402
7403 slots = kvm_memslots_for_spte_role(kvm, sp->role);
7404 slot = __gfn_to_memslot(slots, sp->gfn);
7405 WARN_ON_ONCE(!slot);
7406 }
7407
7408 if (slot && kvm_slot_dirty_track_enabled(slot))
7409 unaccount_nx_huge_page(kvm, sp);
7410 else if (is_tdp_mmu_page(sp))
7411 flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
7412 else
7413 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
7414 WARN_ON_ONCE(sp->nx_huge_page_disallowed);
7415
7416 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7417 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7418 rcu_read_unlock();
7419
7420 cond_resched_rwlock_write(&kvm->mmu_lock);
7421 flush = false;
7422
7423 rcu_read_lock();
7424 }
7425 }
7426 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7427
7428 rcu_read_unlock();
7429
7430 write_unlock(&kvm->mmu_lock);
7431 srcu_read_unlock(&kvm->srcu, rcu_idx);
7432 }
7433
kvm_nx_huge_page_recovery_worker_kill(void * data)7434 static void kvm_nx_huge_page_recovery_worker_kill(void *data)
7435 {
7436 }
7437
kvm_nx_huge_page_recovery_worker(void * data)7438 static bool kvm_nx_huge_page_recovery_worker(void *data)
7439 {
7440 struct kvm *kvm = data;
7441 bool enabled;
7442 uint period;
7443 long remaining_time;
7444
7445 enabled = calc_nx_huge_pages_recovery_period(&period);
7446 if (!enabled)
7447 return false;
7448
7449 remaining_time = kvm->arch.nx_huge_page_last + msecs_to_jiffies(period)
7450 - get_jiffies_64();
7451 if (remaining_time > 0) {
7452 schedule_timeout(remaining_time);
7453 /* check for signals and come back */
7454 return true;
7455 }
7456
7457 __set_current_state(TASK_RUNNING);
7458 kvm_recover_nx_huge_pages(kvm);
7459 kvm->arch.nx_huge_page_last = get_jiffies_64();
7460 return true;
7461 }
7462
kvm_mmu_start_lpage_recovery(struct once * once)7463 static int kvm_mmu_start_lpage_recovery(struct once *once)
7464 {
7465 struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
7466 struct kvm *kvm = container_of(ka, struct kvm, arch);
7467 struct vhost_task *nx_thread;
7468
7469 kvm->arch.nx_huge_page_last = get_jiffies_64();
7470 nx_thread = vhost_task_create(kvm_nx_huge_page_recovery_worker,
7471 kvm_nx_huge_page_recovery_worker_kill,
7472 kvm, "kvm-nx-lpage-recovery");
7473
7474 if (IS_ERR(nx_thread))
7475 return PTR_ERR(nx_thread);
7476
7477 vhost_task_start(nx_thread);
7478
7479 /* Make the task visible only once it is fully started. */
7480 WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
7481 return 0;
7482 }
7483
kvm_mmu_post_init_vm(struct kvm * kvm)7484 int kvm_mmu_post_init_vm(struct kvm *kvm)
7485 {
7486 if (nx_hugepage_mitigation_hard_disabled)
7487 return 0;
7488
7489 return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
7490 }
7491
kvm_mmu_pre_destroy_vm(struct kvm * kvm)7492 void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
7493 {
7494 if (kvm->arch.nx_huge_page_recovery_thread)
7495 vhost_task_stop(kvm->arch.nx_huge_page_recovery_thread);
7496 }
7497
7498 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
kvm_arch_pre_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range)7499 bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
7500 struct kvm_gfn_range *range)
7501 {
7502 /*
7503 * Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
7504 * supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
7505 * can simply ignore such slots. But if userspace is making memory
7506 * PRIVATE, then KVM must prevent the guest from accessing the memory
7507 * as shared. And if userspace is making memory SHARED and this point
7508 * is reached, then at least one page within the range was previously
7509 * PRIVATE, i.e. the slot's possible hugepage ranges are changing.
7510 * Zapping SPTEs in this case ensures KVM will reassess whether or not
7511 * a hugepage can be used for affected ranges.
7512 */
7513 if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
7514 return false;
7515
7516 /* Unmap the old attribute page. */
7517 if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
7518 range->attr_filter = KVM_FILTER_SHARED;
7519 else
7520 range->attr_filter = KVM_FILTER_PRIVATE;
7521
7522 return kvm_unmap_gfn_range(kvm, range);
7523 }
7524
hugepage_test_mixed(struct kvm_memory_slot * slot,gfn_t gfn,int level)7525 static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7526 int level)
7527 {
7528 return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
7529 }
7530
hugepage_clear_mixed(struct kvm_memory_slot * slot,gfn_t gfn,int level)7531 static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7532 int level)
7533 {
7534 lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
7535 }
7536
hugepage_set_mixed(struct kvm_memory_slot * slot,gfn_t gfn,int level)7537 static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7538 int level)
7539 {
7540 lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
7541 }
7542
hugepage_has_attrs(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,int level,unsigned long attrs)7543 static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
7544 gfn_t gfn, int level, unsigned long attrs)
7545 {
7546 const unsigned long start = gfn;
7547 const unsigned long end = start + KVM_PAGES_PER_HPAGE(level);
7548
7549 if (level == PG_LEVEL_2M)
7550 return kvm_range_has_memory_attributes(kvm, start, end, ~0, attrs);
7551
7552 for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
7553 if (hugepage_test_mixed(slot, gfn, level - 1) ||
7554 attrs != kvm_get_memory_attributes(kvm, gfn))
7555 return false;
7556 }
7557 return true;
7558 }
7559
kvm_arch_post_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range)7560 bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
7561 struct kvm_gfn_range *range)
7562 {
7563 unsigned long attrs = range->arg.attributes;
7564 struct kvm_memory_slot *slot = range->slot;
7565 int level;
7566
7567 lockdep_assert_held_write(&kvm->mmu_lock);
7568 lockdep_assert_held(&kvm->slots_lock);
7569
7570 /*
7571 * Calculate which ranges can be mapped with hugepages even if the slot
7572 * can't map memory PRIVATE. KVM mustn't create a SHARED hugepage over
7573 * a range that has PRIVATE GFNs, and conversely converting a range to
7574 * SHARED may now allow hugepages.
7575 */
7576 if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
7577 return false;
7578
7579 /*
7580 * The sequence matters here: upper levels consume the result of lower
7581 * level's scanning.
7582 */
7583 for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
7584 gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
7585 gfn_t gfn = gfn_round_for_level(range->start, level);
7586
7587 /* Process the head page if it straddles the range. */
7588 if (gfn != range->start || gfn + nr_pages > range->end) {
7589 /*
7590 * Skip mixed tracking if the aligned gfn isn't covered
7591 * by the memslot, KVM can't use a hugepage due to the
7592 * misaligned address regardless of memory attributes.
7593 */
7594 if (gfn >= slot->base_gfn &&
7595 gfn + nr_pages <= slot->base_gfn + slot->npages) {
7596 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7597 hugepage_clear_mixed(slot, gfn, level);
7598 else
7599 hugepage_set_mixed(slot, gfn, level);
7600 }
7601 gfn += nr_pages;
7602 }
7603
7604 /*
7605 * Pages entirely covered by the range are guaranteed to have
7606 * only the attributes which were just set.
7607 */
7608 for ( ; gfn + nr_pages <= range->end; gfn += nr_pages)
7609 hugepage_clear_mixed(slot, gfn, level);
7610
7611 /*
7612 * Process the last tail page if it straddles the range and is
7613 * contained by the memslot. Like the head page, KVM can't
7614 * create a hugepage if the slot size is misaligned.
7615 */
7616 if (gfn < range->end &&
7617 (gfn + nr_pages) <= (slot->base_gfn + slot->npages)) {
7618 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7619 hugepage_clear_mixed(slot, gfn, level);
7620 else
7621 hugepage_set_mixed(slot, gfn, level);
7622 }
7623 }
7624 return false;
7625 }
7626
kvm_mmu_init_memslot_memory_attributes(struct kvm * kvm,struct kvm_memory_slot * slot)7627 void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
7628 struct kvm_memory_slot *slot)
7629 {
7630 int level;
7631
7632 if (!kvm_arch_has_private_mem(kvm))
7633 return;
7634
7635 for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
7636 /*
7637 * Don't bother tracking mixed attributes for pages that can't
7638 * be huge due to alignment, i.e. process only pages that are
7639 * entirely contained by the memslot.
7640 */
7641 gfn_t end = gfn_round_for_level(slot->base_gfn + slot->npages, level);
7642 gfn_t start = gfn_round_for_level(slot->base_gfn, level);
7643 gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
7644 gfn_t gfn;
7645
7646 if (start < slot->base_gfn)
7647 start += nr_pages;
7648
7649 /*
7650 * Unlike setting attributes, every potential hugepage needs to
7651 * be manually checked as the attributes may already be mixed.
7652 */
7653 for (gfn = start; gfn < end; gfn += nr_pages) {
7654 unsigned long attrs = kvm_get_memory_attributes(kvm, gfn);
7655
7656 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7657 hugepage_clear_mixed(slot, gfn, level);
7658 else
7659 hugepage_set_mixed(slot, gfn, level);
7660 }
7661 }
7662 }
7663 #endif
7664