1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * MMU support
9 *
10 * Copyright (C) 2006 Qumranet, Inc.
11 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12 *
13 * Authors:
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Avi Kivity <avi@qumranet.com>
16 */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include "irq.h"
20 #include "ioapic.h"
21 #include "mmu.h"
22 #include "mmu_internal.h"
23 #include "tdp_mmu.h"
24 #include "x86.h"
25 #include "kvm_cache_regs.h"
26 #include "smm.h"
27 #include "kvm_emulate.h"
28 #include "page_track.h"
29 #include "cpuid.h"
30 #include "spte.h"
31
32 #include <linux/kvm_host.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/mm.h>
36 #include <linux/highmem.h>
37 #include <linux/moduleparam.h>
38 #include <linux/export.h>
39 #include <linux/swap.h>
40 #include <linux/hugetlb.h>
41 #include <linux/compiler.h>
42 #include <linux/srcu.h>
43 #include <linux/slab.h>
44 #include <linux/sched/signal.h>
45 #include <linux/uaccess.h>
46 #include <linux/hash.h>
47 #include <linux/kern_levels.h>
48 #include <linux/kstrtox.h>
49 #include <linux/kthread.h>
50 #include <linux/wordpart.h>
51
52 #include <asm/page.h>
53 #include <asm/memtype.h>
54 #include <asm/cmpxchg.h>
55 #include <asm/io.h>
56 #include <asm/set_memory.h>
57 #include <asm/spec-ctrl.h>
58 #include <asm/vmx.h>
59
60 #include "trace.h"
61
62 static bool nx_hugepage_mitigation_hard_disabled;
63
64 int __read_mostly nx_huge_pages = -1;
65 static uint __read_mostly nx_huge_pages_recovery_period_ms;
66 #ifdef CONFIG_PREEMPT_RT
67 /* Recovery can cause latency spikes, disable it for PREEMPT_RT. */
68 static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
69 #else
70 static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
71 #endif
72
73 static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp);
74 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
75 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp);
76
77 static const struct kernel_param_ops nx_huge_pages_ops = {
78 .set = set_nx_huge_pages,
79 .get = get_nx_huge_pages,
80 };
81
82 static const struct kernel_param_ops nx_huge_pages_recovery_param_ops = {
83 .set = set_nx_huge_pages_recovery_param,
84 .get = param_get_uint,
85 };
86
87 module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
88 __MODULE_PARM_TYPE(nx_huge_pages, "bool");
89 module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_param_ops,
90 &nx_huge_pages_recovery_ratio, 0644);
91 __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
92 module_param_cb(nx_huge_pages_recovery_period_ms, &nx_huge_pages_recovery_param_ops,
93 &nx_huge_pages_recovery_period_ms, 0644);
94 __MODULE_PARM_TYPE(nx_huge_pages_recovery_period_ms, "uint");
95
96 static bool __read_mostly force_flush_and_sync_on_reuse;
97 module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
98
99 /*
100 * When setting this variable to true it enables Two-Dimensional-Paging
101 * where the hardware walks 2 page tables:
102 * 1. the guest-virtual to guest-physical
103 * 2. while doing 1. it walks guest-physical to host-physical
104 * If the hardware supports that we don't need to do shadow paging.
105 */
106 bool tdp_enabled = false;
107
108 static bool __ro_after_init tdp_mmu_allowed;
109
110 #ifdef CONFIG_X86_64
111 bool __read_mostly tdp_mmu_enabled = true;
112 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0444);
113 #endif
114
115 static int max_huge_page_level __read_mostly;
116 static int tdp_root_level __read_mostly;
117 static int max_tdp_level __read_mostly;
118
119 #define PTE_PREFETCH_NUM 8
120
121 #include <trace/events/kvm.h>
122
123 /* make pte_list_desc fit well in cache lines */
124 #define PTE_LIST_EXT 14
125
126 /*
127 * struct pte_list_desc is the core data structure used to implement a custom
128 * list for tracking a set of related SPTEs, e.g. all the SPTEs that map a
129 * given GFN when used in the context of rmaps. Using a custom list allows KVM
130 * to optimize for the common case where many GFNs will have at most a handful
131 * of SPTEs pointing at them, i.e. allows packing multiple SPTEs into a small
132 * memory footprint, which in turn improves runtime performance by exploiting
133 * cache locality.
134 *
135 * A list is comprised of one or more pte_list_desc objects (descriptors).
136 * Each individual descriptor stores up to PTE_LIST_EXT SPTEs. If a descriptor
137 * is full and a new SPTEs needs to be added, a new descriptor is allocated and
138 * becomes the head of the list. This means that by definitions, all tail
139 * descriptors are full.
140 *
141 * Note, the meta data fields are deliberately placed at the start of the
142 * structure to optimize the cacheline layout; accessing the descriptor will
143 * touch only a single cacheline so long as @spte_count<=6 (or if only the
144 * descriptors metadata is accessed).
145 */
146 struct pte_list_desc {
147 struct pte_list_desc *more;
148 /* The number of PTEs stored in _this_ descriptor. */
149 u32 spte_count;
150 /* The number of PTEs stored in all tails of this descriptor. */
151 u32 tail_count;
152 u64 *sptes[PTE_LIST_EXT];
153 };
154
155 struct kvm_shadow_walk_iterator {
156 u64 addr;
157 hpa_t shadow_addr;
158 u64 *sptep;
159 int level;
160 unsigned index;
161 };
162
163 #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \
164 for (shadow_walk_init_using_root(&(_walker), (_vcpu), \
165 (_root), (_addr)); \
166 shadow_walk_okay(&(_walker)); \
167 shadow_walk_next(&(_walker)))
168
169 #define for_each_shadow_entry(_vcpu, _addr, _walker) \
170 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
171 shadow_walk_okay(&(_walker)); \
172 shadow_walk_next(&(_walker)))
173
174 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \
175 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
176 shadow_walk_okay(&(_walker)) && \
177 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
178 __shadow_walk_next(&(_walker), spte))
179
180 static struct kmem_cache *pte_list_desc_cache;
181 struct kmem_cache *mmu_page_header_cache;
182 static struct percpu_counter kvm_total_used_mmu_pages;
183
184 static void mmu_spte_set(u64 *sptep, u64 spte);
185
186 struct kvm_mmu_role_regs {
187 const unsigned long cr0;
188 const unsigned long cr4;
189 const u64 efer;
190 };
191
192 #define CREATE_TRACE_POINTS
193 #include "mmutrace.h"
194
195 /*
196 * Yes, lot's of underscores. They're a hint that you probably shouldn't be
197 * reading from the role_regs. Once the root_role is constructed, it becomes
198 * the single source of truth for the MMU's state.
199 */
200 #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag) \
201 static inline bool __maybe_unused \
202 ____is_##reg##_##name(const struct kvm_mmu_role_regs *regs) \
203 { \
204 return !!(regs->reg & flag); \
205 }
206 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
207 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
208 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
209 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
210 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
211 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
212 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
213 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
214 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
215 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
216
217 /*
218 * The MMU itself (with a valid role) is the single source of truth for the
219 * MMU. Do not use the regs used to build the MMU/role, nor the vCPU. The
220 * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
221 * and the vCPU may be incorrect/irrelevant.
222 */
223 #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name) \
224 static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu) \
225 { \
226 return !!(mmu->cpu_role. base_or_ext . reg##_##name); \
227 }
228 BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
229 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse);
230 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smep);
231 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap);
232 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke);
233 BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57);
234 BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
235 BUILD_MMU_ROLE_ACCESSOR(ext, efer, lma);
236
is_cr0_pg(struct kvm_mmu * mmu)237 static inline bool is_cr0_pg(struct kvm_mmu *mmu)
238 {
239 return mmu->cpu_role.base.level > 0;
240 }
241
is_cr4_pae(struct kvm_mmu * mmu)242 static inline bool is_cr4_pae(struct kvm_mmu *mmu)
243 {
244 return !mmu->cpu_role.base.has_4_byte_gpte;
245 }
246
vcpu_to_role_regs(struct kvm_vcpu * vcpu)247 static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
248 {
249 struct kvm_mmu_role_regs regs = {
250 .cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
251 .cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
252 .efer = vcpu->arch.efer,
253 };
254
255 return regs;
256 }
257
get_guest_cr3(struct kvm_vcpu * vcpu)258 static unsigned long get_guest_cr3(struct kvm_vcpu *vcpu)
259 {
260 return kvm_read_cr3(vcpu);
261 }
262
kvm_mmu_get_guest_pgd(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)263 static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu,
264 struct kvm_mmu *mmu)
265 {
266 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3)
267 return kvm_read_cr3(vcpu);
268
269 return mmu->get_guest_pgd(vcpu);
270 }
271
kvm_available_flush_remote_tlbs_range(void)272 static inline bool kvm_available_flush_remote_tlbs_range(void)
273 {
274 #if IS_ENABLED(CONFIG_HYPERV)
275 return kvm_x86_ops.flush_remote_tlbs_range;
276 #else
277 return false;
278 #endif
279 }
280
281 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
282
283 /* Flush the range of guest memory mapped by the given SPTE. */
kvm_flush_remote_tlbs_sptep(struct kvm * kvm,u64 * sptep)284 static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep)
285 {
286 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
287 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep));
288
289 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
290 }
291
mark_mmio_spte(struct kvm_vcpu * vcpu,u64 * sptep,u64 gfn,unsigned int access)292 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
293 unsigned int access)
294 {
295 u64 spte = make_mmio_spte(vcpu, gfn, access);
296
297 trace_mark_mmio_spte(sptep, gfn, spte);
298 mmu_spte_set(sptep, spte);
299 }
300
get_mmio_spte_gfn(u64 spte)301 static gfn_t get_mmio_spte_gfn(u64 spte)
302 {
303 u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
304
305 gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
306 & shadow_nonpresent_or_rsvd_mask;
307
308 return gpa >> PAGE_SHIFT;
309 }
310
get_mmio_spte_access(u64 spte)311 static unsigned get_mmio_spte_access(u64 spte)
312 {
313 return spte & shadow_mmio_access_mask;
314 }
315
check_mmio_spte(struct kvm_vcpu * vcpu,u64 spte)316 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
317 {
318 u64 kvm_gen, spte_gen, gen;
319
320 gen = kvm_vcpu_memslots(vcpu)->generation;
321 if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
322 return false;
323
324 kvm_gen = gen & MMIO_SPTE_GEN_MASK;
325 spte_gen = get_mmio_spte_generation(spte);
326
327 trace_check_mmio_spte(spte, kvm_gen, spte_gen);
328 return likely(kvm_gen == spte_gen);
329 }
330
is_cpuid_PSE36(void)331 static int is_cpuid_PSE36(void)
332 {
333 return 1;
334 }
335
336 #ifdef CONFIG_X86_64
__set_spte(u64 * sptep,u64 spte)337 static void __set_spte(u64 *sptep, u64 spte)
338 {
339 KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
340 WRITE_ONCE(*sptep, spte);
341 }
342
__update_clear_spte_fast(u64 * sptep,u64 spte)343 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
344 {
345 KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
346 WRITE_ONCE(*sptep, spte);
347 }
348
__update_clear_spte_slow(u64 * sptep,u64 spte)349 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
350 {
351 KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
352 return xchg(sptep, spte);
353 }
354
__get_spte_lockless(u64 * sptep)355 static u64 __get_spte_lockless(u64 *sptep)
356 {
357 return READ_ONCE(*sptep);
358 }
359 #else
360 union split_spte {
361 struct {
362 u32 spte_low;
363 u32 spte_high;
364 };
365 u64 spte;
366 };
367
count_spte_clear(u64 * sptep,u64 spte)368 static void count_spte_clear(u64 *sptep, u64 spte)
369 {
370 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
371
372 if (is_shadow_present_pte(spte))
373 return;
374
375 /* Ensure the spte is completely set before we increase the count */
376 smp_wmb();
377 sp->clear_spte_count++;
378 }
379
__set_spte(u64 * sptep,u64 spte)380 static void __set_spte(u64 *sptep, u64 spte)
381 {
382 union split_spte *ssptep, sspte;
383
384 ssptep = (union split_spte *)sptep;
385 sspte = (union split_spte)spte;
386
387 ssptep->spte_high = sspte.spte_high;
388
389 /*
390 * If we map the spte from nonpresent to present, We should store
391 * the high bits firstly, then set present bit, so cpu can not
392 * fetch this spte while we are setting the spte.
393 */
394 smp_wmb();
395
396 WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
397 }
398
__update_clear_spte_fast(u64 * sptep,u64 spte)399 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
400 {
401 union split_spte *ssptep, sspte;
402
403 ssptep = (union split_spte *)sptep;
404 sspte = (union split_spte)spte;
405
406 WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
407
408 /*
409 * If we map the spte from present to nonpresent, we should clear
410 * present bit firstly to avoid vcpu fetch the old high bits.
411 */
412 smp_wmb();
413
414 ssptep->spte_high = sspte.spte_high;
415 count_spte_clear(sptep, spte);
416 }
417
__update_clear_spte_slow(u64 * sptep,u64 spte)418 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
419 {
420 union split_spte *ssptep, sspte, orig;
421
422 ssptep = (union split_spte *)sptep;
423 sspte = (union split_spte)spte;
424
425 /* xchg acts as a barrier before the setting of the high bits */
426 orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
427 orig.spte_high = ssptep->spte_high;
428 ssptep->spte_high = sspte.spte_high;
429 count_spte_clear(sptep, spte);
430
431 return orig.spte;
432 }
433
434 /*
435 * The idea using the light way get the spte on x86_32 guest is from
436 * gup_get_pte (mm/gup.c).
437 *
438 * An spte tlb flush may be pending, because they are coalesced and
439 * we are running out of the MMU lock. Therefore
440 * we need to protect against in-progress updates of the spte.
441 *
442 * Reading the spte while an update is in progress may get the old value
443 * for the high part of the spte. The race is fine for a present->non-present
444 * change (because the high part of the spte is ignored for non-present spte),
445 * but for a present->present change we must reread the spte.
446 *
447 * All such changes are done in two steps (present->non-present and
448 * non-present->present), hence it is enough to count the number of
449 * present->non-present updates: if it changed while reading the spte,
450 * we might have hit the race. This is done using clear_spte_count.
451 */
__get_spte_lockless(u64 * sptep)452 static u64 __get_spte_lockless(u64 *sptep)
453 {
454 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
455 union split_spte spte, *orig = (union split_spte *)sptep;
456 int count;
457
458 retry:
459 count = sp->clear_spte_count;
460 smp_rmb();
461
462 spte.spte_low = orig->spte_low;
463 smp_rmb();
464
465 spte.spte_high = orig->spte_high;
466 smp_rmb();
467
468 if (unlikely(spte.spte_low != orig->spte_low ||
469 count != sp->clear_spte_count))
470 goto retry;
471
472 return spte.spte;
473 }
474 #endif
475
476 /* Rules for using mmu_spte_set:
477 * Set the sptep from nonpresent to present.
478 * Note: the sptep being assigned *must* be either not present
479 * or in a state where the hardware will not attempt to update
480 * the spte.
481 */
mmu_spte_set(u64 * sptep,u64 new_spte)482 static void mmu_spte_set(u64 *sptep, u64 new_spte)
483 {
484 WARN_ON_ONCE(is_shadow_present_pte(*sptep));
485 __set_spte(sptep, new_spte);
486 }
487
488 /*
489 * Update the SPTE (excluding the PFN), but do not track changes in its
490 * accessed/dirty status.
491 */
mmu_spte_update_no_track(u64 * sptep,u64 new_spte)492 static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
493 {
494 u64 old_spte = *sptep;
495
496 WARN_ON_ONCE(!is_shadow_present_pte(new_spte));
497 check_spte_writable_invariants(new_spte);
498
499 if (!is_shadow_present_pte(old_spte)) {
500 mmu_spte_set(sptep, new_spte);
501 return old_spte;
502 }
503
504 if (!spte_has_volatile_bits(old_spte))
505 __update_clear_spte_fast(sptep, new_spte);
506 else
507 old_spte = __update_clear_spte_slow(sptep, new_spte);
508
509 WARN_ON_ONCE(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
510
511 return old_spte;
512 }
513
514 /* Rules for using mmu_spte_update:
515 * Update the state bits, it means the mapped pfn is not changed.
516 *
517 * Whenever an MMU-writable SPTE is overwritten with a read-only SPTE, remote
518 * TLBs must be flushed. Otherwise rmap_write_protect will find a read-only
519 * spte, even though the writable spte might be cached on a CPU's TLB.
520 *
521 * Returns true if the TLB needs to be flushed
522 */
mmu_spte_update(u64 * sptep,u64 new_spte)523 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
524 {
525 bool flush = false;
526 u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
527
528 if (!is_shadow_present_pte(old_spte))
529 return false;
530
531 /*
532 * For the spte updated out of mmu-lock is safe, since
533 * we always atomically update it, see the comments in
534 * spte_has_volatile_bits().
535 */
536 if (is_mmu_writable_spte(old_spte) &&
537 !is_writable_pte(new_spte))
538 flush = true;
539
540 /*
541 * Flush TLB when accessed/dirty states are changed in the page tables,
542 * to guarantee consistency between TLB and page tables.
543 */
544
545 if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
546 flush = true;
547 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
548 }
549
550 if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
551 flush = true;
552 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
553 }
554
555 return flush;
556 }
557
558 /*
559 * Rules for using mmu_spte_clear_track_bits:
560 * It sets the sptep from present to nonpresent, and track the
561 * state bits, it is used to clear the last level sptep.
562 * Returns the old PTE.
563 */
mmu_spte_clear_track_bits(struct kvm * kvm,u64 * sptep)564 static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
565 {
566 kvm_pfn_t pfn;
567 u64 old_spte = *sptep;
568 int level = sptep_to_sp(sptep)->role.level;
569 struct page *page;
570
571 if (!is_shadow_present_pte(old_spte) ||
572 !spte_has_volatile_bits(old_spte))
573 __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
574 else
575 old_spte = __update_clear_spte_slow(sptep, SHADOW_NONPRESENT_VALUE);
576
577 if (!is_shadow_present_pte(old_spte))
578 return old_spte;
579
580 kvm_update_page_stats(kvm, level, -1);
581
582 pfn = spte_to_pfn(old_spte);
583
584 /*
585 * KVM doesn't hold a reference to any pages mapped into the guest, and
586 * instead uses the mmu_notifier to ensure that KVM unmaps any pages
587 * before they are reclaimed. Sanity check that, if the pfn is backed
588 * by a refcounted page, the refcount is elevated.
589 */
590 page = kvm_pfn_to_refcounted_page(pfn);
591 WARN_ON_ONCE(page && !page_count(page));
592
593 if (is_accessed_spte(old_spte))
594 kvm_set_pfn_accessed(pfn);
595
596 if (is_dirty_spte(old_spte))
597 kvm_set_pfn_dirty(pfn);
598
599 return old_spte;
600 }
601
602 /*
603 * Rules for using mmu_spte_clear_no_track:
604 * Directly clear spte without caring the state bits of sptep,
605 * it is used to set the upper level spte.
606 */
mmu_spte_clear_no_track(u64 * sptep)607 static void mmu_spte_clear_no_track(u64 *sptep)
608 {
609 __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
610 }
611
mmu_spte_get_lockless(u64 * sptep)612 static u64 mmu_spte_get_lockless(u64 *sptep)
613 {
614 return __get_spte_lockless(sptep);
615 }
616
617 /* Returns the Accessed status of the PTE and resets it at the same time. */
mmu_spte_age(u64 * sptep)618 static bool mmu_spte_age(u64 *sptep)
619 {
620 u64 spte = mmu_spte_get_lockless(sptep);
621
622 if (!is_accessed_spte(spte))
623 return false;
624
625 if (spte_ad_enabled(spte)) {
626 clear_bit((ffs(shadow_accessed_mask) - 1),
627 (unsigned long *)sptep);
628 } else {
629 /*
630 * Capture the dirty status of the page, so that it doesn't get
631 * lost when the SPTE is marked for access tracking.
632 */
633 if (is_writable_pte(spte))
634 kvm_set_pfn_dirty(spte_to_pfn(spte));
635
636 spte = mark_spte_for_access_track(spte);
637 mmu_spte_update_no_track(sptep, spte);
638 }
639
640 return true;
641 }
642
is_tdp_mmu_active(struct kvm_vcpu * vcpu)643 static inline bool is_tdp_mmu_active(struct kvm_vcpu *vcpu)
644 {
645 return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct;
646 }
647
walk_shadow_page_lockless_begin(struct kvm_vcpu * vcpu)648 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
649 {
650 if (is_tdp_mmu_active(vcpu)) {
651 kvm_tdp_mmu_walk_lockless_begin();
652 } else {
653 /*
654 * Prevent page table teardown by making any free-er wait during
655 * kvm_flush_remote_tlbs() IPI to all active vcpus.
656 */
657 local_irq_disable();
658
659 /*
660 * Make sure a following spte read is not reordered ahead of the write
661 * to vcpu->mode.
662 */
663 smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
664 }
665 }
666
walk_shadow_page_lockless_end(struct kvm_vcpu * vcpu)667 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
668 {
669 if (is_tdp_mmu_active(vcpu)) {
670 kvm_tdp_mmu_walk_lockless_end();
671 } else {
672 /*
673 * Make sure the write to vcpu->mode is not reordered in front of
674 * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us
675 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
676 */
677 smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
678 local_irq_enable();
679 }
680 }
681
mmu_topup_memory_caches(struct kvm_vcpu * vcpu,bool maybe_indirect)682 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
683 {
684 int r;
685
686 /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
687 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
688 1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
689 if (r)
690 return r;
691 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
692 PT64_ROOT_MAX_LEVEL);
693 if (r)
694 return r;
695 if (maybe_indirect) {
696 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
697 PT64_ROOT_MAX_LEVEL);
698 if (r)
699 return r;
700 }
701 return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
702 PT64_ROOT_MAX_LEVEL);
703 }
704
mmu_free_memory_caches(struct kvm_vcpu * vcpu)705 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
706 {
707 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
708 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
709 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
710 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
711 }
712
mmu_free_pte_list_desc(struct pte_list_desc * pte_list_desc)713 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
714 {
715 kmem_cache_free(pte_list_desc_cache, pte_list_desc);
716 }
717
718 static bool sp_has_gptes(struct kvm_mmu_page *sp);
719
kvm_mmu_page_get_gfn(struct kvm_mmu_page * sp,int index)720 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
721 {
722 if (sp->role.passthrough)
723 return sp->gfn;
724
725 if (sp->shadowed_translation)
726 return sp->shadowed_translation[index] >> PAGE_SHIFT;
727
728 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
729 }
730
731 /*
732 * For leaf SPTEs, fetch the *guest* access permissions being shadowed. Note
733 * that the SPTE itself may have a more constrained access permissions that
734 * what the guest enforces. For example, a guest may create an executable
735 * huge PTE but KVM may disallow execution to mitigate iTLB multihit.
736 */
kvm_mmu_page_get_access(struct kvm_mmu_page * sp,int index)737 static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
738 {
739 if (sp->shadowed_translation)
740 return sp->shadowed_translation[index] & ACC_ALL;
741
742 /*
743 * For direct MMUs (e.g. TDP or non-paging guests) or passthrough SPs,
744 * KVM is not shadowing any guest page tables, so the "guest access
745 * permissions" are just ACC_ALL.
746 *
747 * For direct SPs in indirect MMUs (shadow paging), i.e. when KVM
748 * is shadowing a guest huge page with small pages, the guest access
749 * permissions being shadowed are the access permissions of the huge
750 * page.
751 *
752 * In both cases, sp->role.access contains the correct access bits.
753 */
754 return sp->role.access;
755 }
756
kvm_mmu_page_set_translation(struct kvm_mmu_page * sp,int index,gfn_t gfn,unsigned int access)757 static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
758 gfn_t gfn, unsigned int access)
759 {
760 if (sp->shadowed_translation) {
761 sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
762 return;
763 }
764
765 WARN_ONCE(access != kvm_mmu_page_get_access(sp, index),
766 "access mismatch under %s page %llx (expected %u, got %u)\n",
767 sp->role.passthrough ? "passthrough" : "direct",
768 sp->gfn, kvm_mmu_page_get_access(sp, index), access);
769
770 WARN_ONCE(gfn != kvm_mmu_page_get_gfn(sp, index),
771 "gfn mismatch under %s page %llx (expected %llx, got %llx)\n",
772 sp->role.passthrough ? "passthrough" : "direct",
773 sp->gfn, kvm_mmu_page_get_gfn(sp, index), gfn);
774 }
775
kvm_mmu_page_set_access(struct kvm_mmu_page * sp,int index,unsigned int access)776 static void kvm_mmu_page_set_access(struct kvm_mmu_page *sp, int index,
777 unsigned int access)
778 {
779 gfn_t gfn = kvm_mmu_page_get_gfn(sp, index);
780
781 kvm_mmu_page_set_translation(sp, index, gfn, access);
782 }
783
784 /*
785 * Return the pointer to the large page information for a given gfn,
786 * handling slots that are not large page aligned.
787 */
lpage_info_slot(gfn_t gfn,const struct kvm_memory_slot * slot,int level)788 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
789 const struct kvm_memory_slot *slot, int level)
790 {
791 unsigned long idx;
792
793 idx = gfn_to_index(gfn, slot->base_gfn, level);
794 return &slot->arch.lpage_info[level - 2][idx];
795 }
796
797 /*
798 * The most significant bit in disallow_lpage tracks whether or not memory
799 * attributes are mixed, i.e. not identical for all gfns at the current level.
800 * The lower order bits are used to refcount other cases where a hugepage is
801 * disallowed, e.g. if KVM has shadow a page table at the gfn.
802 */
803 #define KVM_LPAGE_MIXED_FLAG BIT(31)
804
update_gfn_disallow_lpage_count(const struct kvm_memory_slot * slot,gfn_t gfn,int count)805 static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
806 gfn_t gfn, int count)
807 {
808 struct kvm_lpage_info *linfo;
809 int old, i;
810
811 for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
812 linfo = lpage_info_slot(gfn, slot, i);
813
814 old = linfo->disallow_lpage;
815 linfo->disallow_lpage += count;
816 WARN_ON_ONCE((old ^ linfo->disallow_lpage) & KVM_LPAGE_MIXED_FLAG);
817 }
818 }
819
kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot * slot,gfn_t gfn)820 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
821 {
822 update_gfn_disallow_lpage_count(slot, gfn, 1);
823 }
824
kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot * slot,gfn_t gfn)825 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
826 {
827 update_gfn_disallow_lpage_count(slot, gfn, -1);
828 }
829
account_shadowed(struct kvm * kvm,struct kvm_mmu_page * sp)830 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
831 {
832 struct kvm_memslots *slots;
833 struct kvm_memory_slot *slot;
834 gfn_t gfn;
835
836 kvm->arch.indirect_shadow_pages++;
837 /*
838 * Ensure indirect_shadow_pages is elevated prior to re-reading guest
839 * child PTEs in FNAME(gpte_changed), i.e. guarantee either in-flight
840 * emulated writes are visible before re-reading guest PTEs, or that
841 * an emulated write will see the elevated count and acquire mmu_lock
842 * to update SPTEs. Pairs with the smp_mb() in kvm_mmu_track_write().
843 */
844 smp_mb();
845
846 gfn = sp->gfn;
847 slots = kvm_memslots_for_spte_role(kvm, sp->role);
848 slot = __gfn_to_memslot(slots, gfn);
849
850 /* the non-leaf shadow pages are keeping readonly. */
851 if (sp->role.level > PG_LEVEL_4K)
852 return __kvm_write_track_add_gfn(kvm, slot, gfn);
853
854 kvm_mmu_gfn_disallow_lpage(slot, gfn);
855
856 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
857 kvm_flush_remote_tlbs_gfn(kvm, gfn, PG_LEVEL_4K);
858 }
859
track_possible_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp)860 void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
861 {
862 /*
863 * If it's possible to replace the shadow page with an NX huge page,
864 * i.e. if the shadow page is the only thing currently preventing KVM
865 * from using a huge page, add the shadow page to the list of "to be
866 * zapped for NX recovery" pages. Note, the shadow page can already be
867 * on the list if KVM is reusing an existing shadow page, i.e. if KVM
868 * links a shadow page at multiple points.
869 */
870 if (!list_empty(&sp->possible_nx_huge_page_link))
871 return;
872
873 ++kvm->stat.nx_lpage_splits;
874 list_add_tail(&sp->possible_nx_huge_page_link,
875 &kvm->arch.possible_nx_huge_pages);
876 }
877
account_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp,bool nx_huge_page_possible)878 static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
879 bool nx_huge_page_possible)
880 {
881 sp->nx_huge_page_disallowed = true;
882
883 if (nx_huge_page_possible)
884 track_possible_nx_huge_page(kvm, sp);
885 }
886
unaccount_shadowed(struct kvm * kvm,struct kvm_mmu_page * sp)887 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
888 {
889 struct kvm_memslots *slots;
890 struct kvm_memory_slot *slot;
891 gfn_t gfn;
892
893 kvm->arch.indirect_shadow_pages--;
894 gfn = sp->gfn;
895 slots = kvm_memslots_for_spte_role(kvm, sp->role);
896 slot = __gfn_to_memslot(slots, gfn);
897 if (sp->role.level > PG_LEVEL_4K)
898 return __kvm_write_track_remove_gfn(kvm, slot, gfn);
899
900 kvm_mmu_gfn_allow_lpage(slot, gfn);
901 }
902
untrack_possible_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp)903 void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
904 {
905 if (list_empty(&sp->possible_nx_huge_page_link))
906 return;
907
908 --kvm->stat.nx_lpage_splits;
909 list_del_init(&sp->possible_nx_huge_page_link);
910 }
911
unaccount_nx_huge_page(struct kvm * kvm,struct kvm_mmu_page * sp)912 static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
913 {
914 sp->nx_huge_page_disallowed = false;
915
916 untrack_possible_nx_huge_page(kvm, sp);
917 }
918
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu * vcpu,gfn_t gfn,bool no_dirty_log)919 static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu,
920 gfn_t gfn,
921 bool no_dirty_log)
922 {
923 struct kvm_memory_slot *slot;
924
925 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
926 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
927 return NULL;
928 if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
929 return NULL;
930
931 return slot;
932 }
933
934 /*
935 * About rmap_head encoding:
936 *
937 * If the bit zero of rmap_head->val is clear, then it points to the only spte
938 * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
939 * pte_list_desc containing more mappings.
940 */
941
942 /*
943 * Returns the number of pointers in the rmap chain, not counting the new one.
944 */
pte_list_add(struct kvm_mmu_memory_cache * cache,u64 * spte,struct kvm_rmap_head * rmap_head)945 static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
946 struct kvm_rmap_head *rmap_head)
947 {
948 struct pte_list_desc *desc;
949 int count = 0;
950
951 if (!rmap_head->val) {
952 rmap_head->val = (unsigned long)spte;
953 } else if (!(rmap_head->val & 1)) {
954 desc = kvm_mmu_memory_cache_alloc(cache);
955 desc->sptes[0] = (u64 *)rmap_head->val;
956 desc->sptes[1] = spte;
957 desc->spte_count = 2;
958 desc->tail_count = 0;
959 rmap_head->val = (unsigned long)desc | 1;
960 ++count;
961 } else {
962 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
963 count = desc->tail_count + desc->spte_count;
964
965 /*
966 * If the previous head is full, allocate a new head descriptor
967 * as tail descriptors are always kept full.
968 */
969 if (desc->spte_count == PTE_LIST_EXT) {
970 desc = kvm_mmu_memory_cache_alloc(cache);
971 desc->more = (struct pte_list_desc *)(rmap_head->val & ~1ul);
972 desc->spte_count = 0;
973 desc->tail_count = count;
974 rmap_head->val = (unsigned long)desc | 1;
975 }
976 desc->sptes[desc->spte_count++] = spte;
977 }
978 return count;
979 }
980
pte_list_desc_remove_entry(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct pte_list_desc * desc,int i)981 static void pte_list_desc_remove_entry(struct kvm *kvm,
982 struct kvm_rmap_head *rmap_head,
983 struct pte_list_desc *desc, int i)
984 {
985 struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
986 int j = head_desc->spte_count - 1;
987
988 /*
989 * The head descriptor should never be empty. A new head is added only
990 * when adding an entry and the previous head is full, and heads are
991 * removed (this flow) when they become empty.
992 */
993 KVM_BUG_ON_DATA_CORRUPTION(j < 0, kvm);
994
995 /*
996 * Replace the to-be-freed SPTE with the last valid entry from the head
997 * descriptor to ensure that tail descriptors are full at all times.
998 * Note, this also means that tail_count is stable for each descriptor.
999 */
1000 desc->sptes[i] = head_desc->sptes[j];
1001 head_desc->sptes[j] = NULL;
1002 head_desc->spte_count--;
1003 if (head_desc->spte_count)
1004 return;
1005
1006 /*
1007 * The head descriptor is empty. If there are no tail descriptors,
1008 * nullify the rmap head to mark the list as empty, else point the rmap
1009 * head at the next descriptor, i.e. the new head.
1010 */
1011 if (!head_desc->more)
1012 rmap_head->val = 0;
1013 else
1014 rmap_head->val = (unsigned long)head_desc->more | 1;
1015 mmu_free_pte_list_desc(head_desc);
1016 }
1017
pte_list_remove(struct kvm * kvm,u64 * spte,struct kvm_rmap_head * rmap_head)1018 static void pte_list_remove(struct kvm *kvm, u64 *spte,
1019 struct kvm_rmap_head *rmap_head)
1020 {
1021 struct pte_list_desc *desc;
1022 int i;
1023
1024 if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_head->val, kvm))
1025 return;
1026
1027 if (!(rmap_head->val & 1)) {
1028 if (KVM_BUG_ON_DATA_CORRUPTION((u64 *)rmap_head->val != spte, kvm))
1029 return;
1030
1031 rmap_head->val = 0;
1032 } else {
1033 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1034 while (desc) {
1035 for (i = 0; i < desc->spte_count; ++i) {
1036 if (desc->sptes[i] == spte) {
1037 pte_list_desc_remove_entry(kvm, rmap_head,
1038 desc, i);
1039 return;
1040 }
1041 }
1042 desc = desc->more;
1043 }
1044
1045 KVM_BUG_ON_DATA_CORRUPTION(true, kvm);
1046 }
1047 }
1048
kvm_zap_one_rmap_spte(struct kvm * kvm,struct kvm_rmap_head * rmap_head,u64 * sptep)1049 static void kvm_zap_one_rmap_spte(struct kvm *kvm,
1050 struct kvm_rmap_head *rmap_head, u64 *sptep)
1051 {
1052 mmu_spte_clear_track_bits(kvm, sptep);
1053 pte_list_remove(kvm, sptep, rmap_head);
1054 }
1055
1056 /* Return true if at least one SPTE was zapped, false otherwise */
kvm_zap_all_rmap_sptes(struct kvm * kvm,struct kvm_rmap_head * rmap_head)1057 static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
1058 struct kvm_rmap_head *rmap_head)
1059 {
1060 struct pte_list_desc *desc, *next;
1061 int i;
1062
1063 if (!rmap_head->val)
1064 return false;
1065
1066 if (!(rmap_head->val & 1)) {
1067 mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
1068 goto out;
1069 }
1070
1071 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1072
1073 for (; desc; desc = next) {
1074 for (i = 0; i < desc->spte_count; i++)
1075 mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
1076 next = desc->more;
1077 mmu_free_pte_list_desc(desc);
1078 }
1079 out:
1080 /* rmap_head is meaningless now, remember to reset it */
1081 rmap_head->val = 0;
1082 return true;
1083 }
1084
pte_list_count(struct kvm_rmap_head * rmap_head)1085 unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
1086 {
1087 struct pte_list_desc *desc;
1088
1089 if (!rmap_head->val)
1090 return 0;
1091 else if (!(rmap_head->val & 1))
1092 return 1;
1093
1094 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1095 return desc->tail_count + desc->spte_count;
1096 }
1097
gfn_to_rmap(gfn_t gfn,int level,const struct kvm_memory_slot * slot)1098 static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
1099 const struct kvm_memory_slot *slot)
1100 {
1101 unsigned long idx;
1102
1103 idx = gfn_to_index(gfn, slot->base_gfn, level);
1104 return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1105 }
1106
rmap_remove(struct kvm * kvm,u64 * spte)1107 static void rmap_remove(struct kvm *kvm, u64 *spte)
1108 {
1109 struct kvm_memslots *slots;
1110 struct kvm_memory_slot *slot;
1111 struct kvm_mmu_page *sp;
1112 gfn_t gfn;
1113 struct kvm_rmap_head *rmap_head;
1114
1115 sp = sptep_to_sp(spte);
1116 gfn = kvm_mmu_page_get_gfn(sp, spte_index(spte));
1117
1118 /*
1119 * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
1120 * so we have to determine which memslots to use based on context
1121 * information in sp->role.
1122 */
1123 slots = kvm_memslots_for_spte_role(kvm, sp->role);
1124
1125 slot = __gfn_to_memslot(slots, gfn);
1126 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1127
1128 pte_list_remove(kvm, spte, rmap_head);
1129 }
1130
1131 /*
1132 * Used by the following functions to iterate through the sptes linked by a
1133 * rmap. All fields are private and not assumed to be used outside.
1134 */
1135 struct rmap_iterator {
1136 /* private fields */
1137 struct pte_list_desc *desc; /* holds the sptep if not NULL */
1138 int pos; /* index of the sptep */
1139 };
1140
1141 /*
1142 * Iteration must be started by this function. This should also be used after
1143 * removing/dropping sptes from the rmap link because in such cases the
1144 * information in the iterator may not be valid.
1145 *
1146 * Returns sptep if found, NULL otherwise.
1147 */
rmap_get_first(struct kvm_rmap_head * rmap_head,struct rmap_iterator * iter)1148 static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1149 struct rmap_iterator *iter)
1150 {
1151 u64 *sptep;
1152
1153 if (!rmap_head->val)
1154 return NULL;
1155
1156 if (!(rmap_head->val & 1)) {
1157 iter->desc = NULL;
1158 sptep = (u64 *)rmap_head->val;
1159 goto out;
1160 }
1161
1162 iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1163 iter->pos = 0;
1164 sptep = iter->desc->sptes[iter->pos];
1165 out:
1166 BUG_ON(!is_shadow_present_pte(*sptep));
1167 return sptep;
1168 }
1169
1170 /*
1171 * Must be used with a valid iterator: e.g. after rmap_get_first().
1172 *
1173 * Returns sptep if found, NULL otherwise.
1174 */
rmap_get_next(struct rmap_iterator * iter)1175 static u64 *rmap_get_next(struct rmap_iterator *iter)
1176 {
1177 u64 *sptep;
1178
1179 if (iter->desc) {
1180 if (iter->pos < PTE_LIST_EXT - 1) {
1181 ++iter->pos;
1182 sptep = iter->desc->sptes[iter->pos];
1183 if (sptep)
1184 goto out;
1185 }
1186
1187 iter->desc = iter->desc->more;
1188
1189 if (iter->desc) {
1190 iter->pos = 0;
1191 /* desc->sptes[0] cannot be NULL */
1192 sptep = iter->desc->sptes[iter->pos];
1193 goto out;
1194 }
1195 }
1196
1197 return NULL;
1198 out:
1199 BUG_ON(!is_shadow_present_pte(*sptep));
1200 return sptep;
1201 }
1202
1203 #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \
1204 for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \
1205 _spte_; _spte_ = rmap_get_next(_iter_))
1206
drop_spte(struct kvm * kvm,u64 * sptep)1207 static void drop_spte(struct kvm *kvm, u64 *sptep)
1208 {
1209 u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
1210
1211 if (is_shadow_present_pte(old_spte))
1212 rmap_remove(kvm, sptep);
1213 }
1214
drop_large_spte(struct kvm * kvm,u64 * sptep,bool flush)1215 static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush)
1216 {
1217 struct kvm_mmu_page *sp;
1218
1219 sp = sptep_to_sp(sptep);
1220 WARN_ON_ONCE(sp->role.level == PG_LEVEL_4K);
1221
1222 drop_spte(kvm, sptep);
1223
1224 if (flush)
1225 kvm_flush_remote_tlbs_sptep(kvm, sptep);
1226 }
1227
1228 /*
1229 * Write-protect on the specified @sptep, @pt_protect indicates whether
1230 * spte write-protection is caused by protecting shadow page table.
1231 *
1232 * Note: write protection is difference between dirty logging and spte
1233 * protection:
1234 * - for dirty logging, the spte can be set to writable at anytime if
1235 * its dirty bitmap is properly set.
1236 * - for spte protection, the spte can be writable only after unsync-ing
1237 * shadow page.
1238 *
1239 * Return true if tlb need be flushed.
1240 */
spte_write_protect(u64 * sptep,bool pt_protect)1241 static bool spte_write_protect(u64 *sptep, bool pt_protect)
1242 {
1243 u64 spte = *sptep;
1244
1245 if (!is_writable_pte(spte) &&
1246 !(pt_protect && is_mmu_writable_spte(spte)))
1247 return false;
1248
1249 if (pt_protect)
1250 spte &= ~shadow_mmu_writable_mask;
1251 spte = spte & ~PT_WRITABLE_MASK;
1252
1253 return mmu_spte_update(sptep, spte);
1254 }
1255
rmap_write_protect(struct kvm_rmap_head * rmap_head,bool pt_protect)1256 static bool rmap_write_protect(struct kvm_rmap_head *rmap_head,
1257 bool pt_protect)
1258 {
1259 u64 *sptep;
1260 struct rmap_iterator iter;
1261 bool flush = false;
1262
1263 for_each_rmap_spte(rmap_head, &iter, sptep)
1264 flush |= spte_write_protect(sptep, pt_protect);
1265
1266 return flush;
1267 }
1268
spte_clear_dirty(u64 * sptep)1269 static bool spte_clear_dirty(u64 *sptep)
1270 {
1271 u64 spte = *sptep;
1272
1273 KVM_MMU_WARN_ON(!spte_ad_enabled(spte));
1274 spte &= ~shadow_dirty_mask;
1275 return mmu_spte_update(sptep, spte);
1276 }
1277
spte_wrprot_for_clear_dirty(u64 * sptep)1278 static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1279 {
1280 bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
1281 (unsigned long *)sptep);
1282 if (was_writable && !spte_ad_enabled(*sptep))
1283 kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1284
1285 return was_writable;
1286 }
1287
1288 /*
1289 * Gets the GFN ready for another round of dirty logging by clearing the
1290 * - D bit on ad-enabled SPTEs, and
1291 * - W bit on ad-disabled SPTEs.
1292 * Returns true iff any D or W bits were cleared.
1293 */
__rmap_clear_dirty(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)1294 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1295 const struct kvm_memory_slot *slot)
1296 {
1297 u64 *sptep;
1298 struct rmap_iterator iter;
1299 bool flush = false;
1300
1301 for_each_rmap_spte(rmap_head, &iter, sptep)
1302 if (spte_ad_need_write_protect(*sptep))
1303 flush |= spte_wrprot_for_clear_dirty(sptep);
1304 else
1305 flush |= spte_clear_dirty(sptep);
1306
1307 return flush;
1308 }
1309
1310 /**
1311 * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1312 * @kvm: kvm instance
1313 * @slot: slot to protect
1314 * @gfn_offset: start of the BITS_PER_LONG pages we care about
1315 * @mask: indicates which pages we should protect
1316 *
1317 * Used when we do not need to care about huge page mappings.
1318 */
kvm_mmu_write_protect_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1319 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1320 struct kvm_memory_slot *slot,
1321 gfn_t gfn_offset, unsigned long mask)
1322 {
1323 struct kvm_rmap_head *rmap_head;
1324
1325 if (tdp_mmu_enabled)
1326 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1327 slot->base_gfn + gfn_offset, mask, true);
1328
1329 if (!kvm_memslots_have_rmaps(kvm))
1330 return;
1331
1332 while (mask) {
1333 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1334 PG_LEVEL_4K, slot);
1335 rmap_write_protect(rmap_head, false);
1336
1337 /* clear the first set bit */
1338 mask &= mask - 1;
1339 }
1340 }
1341
1342 /**
1343 * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
1344 * protect the page if the D-bit isn't supported.
1345 * @kvm: kvm instance
1346 * @slot: slot to clear D-bit
1347 * @gfn_offset: start of the BITS_PER_LONG pages we care about
1348 * @mask: indicates which pages we should clear D-bit
1349 *
1350 * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1351 */
kvm_mmu_clear_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1352 static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1353 struct kvm_memory_slot *slot,
1354 gfn_t gfn_offset, unsigned long mask)
1355 {
1356 struct kvm_rmap_head *rmap_head;
1357
1358 if (tdp_mmu_enabled)
1359 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1360 slot->base_gfn + gfn_offset, mask, false);
1361
1362 if (!kvm_memslots_have_rmaps(kvm))
1363 return;
1364
1365 while (mask) {
1366 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1367 PG_LEVEL_4K, slot);
1368 __rmap_clear_dirty(kvm, rmap_head, slot);
1369
1370 /* clear the first set bit */
1371 mask &= mask - 1;
1372 }
1373 }
1374
1375 /**
1376 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1377 * PT level pages.
1378 *
1379 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1380 * enable dirty logging for them.
1381 *
1382 * We need to care about huge page mappings: e.g. during dirty logging we may
1383 * have such mappings.
1384 */
kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1385 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1386 struct kvm_memory_slot *slot,
1387 gfn_t gfn_offset, unsigned long mask)
1388 {
1389 /*
1390 * Huge pages are NOT write protected when we start dirty logging in
1391 * initially-all-set mode; must write protect them here so that they
1392 * are split to 4K on the first write.
1393 *
1394 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
1395 * of memslot has no such restriction, so the range can cross two large
1396 * pages.
1397 */
1398 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
1399 gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
1400 gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
1401
1402 if (READ_ONCE(eager_page_split))
1403 kvm_mmu_try_split_huge_pages(kvm, slot, start, end + 1, PG_LEVEL_4K);
1404
1405 kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
1406
1407 /* Cross two large pages? */
1408 if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
1409 ALIGN(end << PAGE_SHIFT, PMD_SIZE))
1410 kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
1411 PG_LEVEL_2M);
1412 }
1413
1414 /* Now handle 4K PTEs. */
1415 if (kvm_x86_ops.cpu_dirty_log_size)
1416 kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1417 else
1418 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1419 }
1420
kvm_cpu_dirty_log_size(void)1421 int kvm_cpu_dirty_log_size(void)
1422 {
1423 return kvm_x86_ops.cpu_dirty_log_size;
1424 }
1425
kvm_mmu_slot_gfn_write_protect(struct kvm * kvm,struct kvm_memory_slot * slot,u64 gfn,int min_level)1426 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1427 struct kvm_memory_slot *slot, u64 gfn,
1428 int min_level)
1429 {
1430 struct kvm_rmap_head *rmap_head;
1431 int i;
1432 bool write_protected = false;
1433
1434 if (kvm_memslots_have_rmaps(kvm)) {
1435 for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1436 rmap_head = gfn_to_rmap(gfn, i, slot);
1437 write_protected |= rmap_write_protect(rmap_head, true);
1438 }
1439 }
1440
1441 if (tdp_mmu_enabled)
1442 write_protected |=
1443 kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
1444
1445 return write_protected;
1446 }
1447
kvm_vcpu_write_protect_gfn(struct kvm_vcpu * vcpu,u64 gfn)1448 static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
1449 {
1450 struct kvm_memory_slot *slot;
1451
1452 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1453 return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1454 }
1455
__kvm_zap_rmap(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)1456 static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1457 const struct kvm_memory_slot *slot)
1458 {
1459 return kvm_zap_all_rmap_sptes(kvm, rmap_head);
1460 }
1461
kvm_zap_rmap(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct kvm_memory_slot * slot,gfn_t gfn,int level)1462 static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1463 struct kvm_memory_slot *slot, gfn_t gfn, int level)
1464 {
1465 return __kvm_zap_rmap(kvm, rmap_head, slot);
1466 }
1467
1468 struct slot_rmap_walk_iterator {
1469 /* input fields. */
1470 const struct kvm_memory_slot *slot;
1471 gfn_t start_gfn;
1472 gfn_t end_gfn;
1473 int start_level;
1474 int end_level;
1475
1476 /* output fields. */
1477 gfn_t gfn;
1478 struct kvm_rmap_head *rmap;
1479 int level;
1480
1481 /* private field. */
1482 struct kvm_rmap_head *end_rmap;
1483 };
1484
rmap_walk_init_level(struct slot_rmap_walk_iterator * iterator,int level)1485 static void rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator,
1486 int level)
1487 {
1488 iterator->level = level;
1489 iterator->gfn = iterator->start_gfn;
1490 iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
1491 iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
1492 }
1493
slot_rmap_walk_init(struct slot_rmap_walk_iterator * iterator,const struct kvm_memory_slot * slot,int start_level,int end_level,gfn_t start_gfn,gfn_t end_gfn)1494 static void slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1495 const struct kvm_memory_slot *slot,
1496 int start_level, int end_level,
1497 gfn_t start_gfn, gfn_t end_gfn)
1498 {
1499 iterator->slot = slot;
1500 iterator->start_level = start_level;
1501 iterator->end_level = end_level;
1502 iterator->start_gfn = start_gfn;
1503 iterator->end_gfn = end_gfn;
1504
1505 rmap_walk_init_level(iterator, iterator->start_level);
1506 }
1507
slot_rmap_walk_okay(struct slot_rmap_walk_iterator * iterator)1508 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1509 {
1510 return !!iterator->rmap;
1511 }
1512
slot_rmap_walk_next(struct slot_rmap_walk_iterator * iterator)1513 static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1514 {
1515 while (++iterator->rmap <= iterator->end_rmap) {
1516 iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1517
1518 if (iterator->rmap->val)
1519 return;
1520 }
1521
1522 if (++iterator->level > iterator->end_level) {
1523 iterator->rmap = NULL;
1524 return;
1525 }
1526
1527 rmap_walk_init_level(iterator, iterator->level);
1528 }
1529
1530 #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_, \
1531 _start_gfn, _end_gfn, _iter_) \
1532 for (slot_rmap_walk_init(_iter_, _slot_, _start_level_, \
1533 _end_level_, _start_gfn, _end_gfn); \
1534 slot_rmap_walk_okay(_iter_); \
1535 slot_rmap_walk_next(_iter_))
1536
1537 typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1538 struct kvm_memory_slot *slot, gfn_t gfn,
1539 int level);
1540
kvm_handle_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range,rmap_handler_t handler)1541 static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
1542 struct kvm_gfn_range *range,
1543 rmap_handler_t handler)
1544 {
1545 struct slot_rmap_walk_iterator iterator;
1546 bool ret = false;
1547
1548 for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
1549 range->start, range->end - 1, &iterator)
1550 ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
1551 iterator.level);
1552
1553 return ret;
1554 }
1555
kvm_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range)1556 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1557 {
1558 bool flush = false;
1559
1560 if (kvm_memslots_have_rmaps(kvm))
1561 flush = kvm_handle_gfn_range(kvm, range, kvm_zap_rmap);
1562
1563 if (tdp_mmu_enabled)
1564 flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1565
1566 if (kvm_x86_ops.set_apic_access_page_addr &&
1567 range->slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT)
1568 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
1569
1570 return flush;
1571 }
1572
kvm_age_rmap(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct kvm_memory_slot * slot,gfn_t gfn,int level)1573 static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1574 struct kvm_memory_slot *slot, gfn_t gfn, int level)
1575 {
1576 u64 *sptep;
1577 struct rmap_iterator iter;
1578 int young = 0;
1579
1580 for_each_rmap_spte(rmap_head, &iter, sptep)
1581 young |= mmu_spte_age(sptep);
1582
1583 return young;
1584 }
1585
kvm_test_age_rmap(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct kvm_memory_slot * slot,gfn_t gfn,int level)1586 static bool kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1587 struct kvm_memory_slot *slot, gfn_t gfn, int level)
1588 {
1589 u64 *sptep;
1590 struct rmap_iterator iter;
1591
1592 for_each_rmap_spte(rmap_head, &iter, sptep)
1593 if (is_accessed_spte(*sptep))
1594 return true;
1595 return false;
1596 }
1597
1598 #define RMAP_RECYCLE_THRESHOLD 1000
1599
__rmap_add(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,const struct kvm_memory_slot * slot,u64 * spte,gfn_t gfn,unsigned int access)1600 static void __rmap_add(struct kvm *kvm,
1601 struct kvm_mmu_memory_cache *cache,
1602 const struct kvm_memory_slot *slot,
1603 u64 *spte, gfn_t gfn, unsigned int access)
1604 {
1605 struct kvm_mmu_page *sp;
1606 struct kvm_rmap_head *rmap_head;
1607 int rmap_count;
1608
1609 sp = sptep_to_sp(spte);
1610 kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access);
1611 kvm_update_page_stats(kvm, sp->role.level, 1);
1612
1613 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1614 rmap_count = pte_list_add(cache, spte, rmap_head);
1615
1616 if (rmap_count > kvm->stat.max_mmu_rmap_size)
1617 kvm->stat.max_mmu_rmap_size = rmap_count;
1618 if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
1619 kvm_zap_all_rmap_sptes(kvm, rmap_head);
1620 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
1621 }
1622 }
1623
rmap_add(struct kvm_vcpu * vcpu,const struct kvm_memory_slot * slot,u64 * spte,gfn_t gfn,unsigned int access)1624 static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
1625 u64 *spte, gfn_t gfn, unsigned int access)
1626 {
1627 struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_pte_list_desc_cache;
1628
1629 __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
1630 }
1631
kvm_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)1632 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1633 {
1634 bool young = false;
1635
1636 if (kvm_memslots_have_rmaps(kvm))
1637 young = kvm_handle_gfn_range(kvm, range, kvm_age_rmap);
1638
1639 if (tdp_mmu_enabled)
1640 young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
1641
1642 return young;
1643 }
1644
kvm_test_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)1645 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1646 {
1647 bool young = false;
1648
1649 if (kvm_memslots_have_rmaps(kvm))
1650 young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmap);
1651
1652 if (tdp_mmu_enabled)
1653 young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
1654
1655 return young;
1656 }
1657
kvm_mmu_check_sptes_at_free(struct kvm_mmu_page * sp)1658 static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp)
1659 {
1660 #ifdef CONFIG_KVM_PROVE_MMU
1661 int i;
1662
1663 for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
1664 if (KVM_MMU_WARN_ON(is_shadow_present_pte(sp->spt[i])))
1665 pr_err_ratelimited("SPTE %llx (@ %p) for gfn %llx shadow-present at free",
1666 sp->spt[i], &sp->spt[i],
1667 kvm_mmu_page_get_gfn(sp, i));
1668 }
1669 #endif
1670 }
1671
1672 /*
1673 * This value is the sum of all of the kvm instances's
1674 * kvm->arch.n_used_mmu_pages values. We need a global,
1675 * aggregate version in order to make the slab shrinker
1676 * faster
1677 */
kvm_mod_used_mmu_pages(struct kvm * kvm,long nr)1678 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
1679 {
1680 kvm->arch.n_used_mmu_pages += nr;
1681 percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1682 }
1683
kvm_account_mmu_page(struct kvm * kvm,struct kvm_mmu_page * sp)1684 static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1685 {
1686 kvm_mod_used_mmu_pages(kvm, +1);
1687 kvm_account_pgtable_pages((void *)sp->spt, +1);
1688 }
1689
kvm_unaccount_mmu_page(struct kvm * kvm,struct kvm_mmu_page * sp)1690 static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1691 {
1692 kvm_mod_used_mmu_pages(kvm, -1);
1693 kvm_account_pgtable_pages((void *)sp->spt, -1);
1694 }
1695
kvm_mmu_free_shadow_page(struct kvm_mmu_page * sp)1696 static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
1697 {
1698 kvm_mmu_check_sptes_at_free(sp);
1699
1700 hlist_del(&sp->hash_link);
1701 list_del(&sp->link);
1702 free_page((unsigned long)sp->spt);
1703 free_page((unsigned long)sp->shadowed_translation);
1704 kmem_cache_free(mmu_page_header_cache, sp);
1705 }
1706
kvm_page_table_hashfn(gfn_t gfn)1707 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1708 {
1709 return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1710 }
1711
mmu_page_add_parent_pte(struct kvm_mmu_memory_cache * cache,struct kvm_mmu_page * sp,u64 * parent_pte)1712 static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache,
1713 struct kvm_mmu_page *sp, u64 *parent_pte)
1714 {
1715 if (!parent_pte)
1716 return;
1717
1718 pte_list_add(cache, parent_pte, &sp->parent_ptes);
1719 }
1720
mmu_page_remove_parent_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * parent_pte)1721 static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
1722 u64 *parent_pte)
1723 {
1724 pte_list_remove(kvm, parent_pte, &sp->parent_ptes);
1725 }
1726
drop_parent_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * parent_pte)1727 static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
1728 u64 *parent_pte)
1729 {
1730 mmu_page_remove_parent_pte(kvm, sp, parent_pte);
1731 mmu_spte_clear_no_track(parent_pte);
1732 }
1733
1734 static void mark_unsync(u64 *spte);
kvm_mmu_mark_parents_unsync(struct kvm_mmu_page * sp)1735 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1736 {
1737 u64 *sptep;
1738 struct rmap_iterator iter;
1739
1740 for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1741 mark_unsync(sptep);
1742 }
1743 }
1744
mark_unsync(u64 * spte)1745 static void mark_unsync(u64 *spte)
1746 {
1747 struct kvm_mmu_page *sp;
1748
1749 sp = sptep_to_sp(spte);
1750 if (__test_and_set_bit(spte_index(spte), sp->unsync_child_bitmap))
1751 return;
1752 if (sp->unsync_children++)
1753 return;
1754 kvm_mmu_mark_parents_unsync(sp);
1755 }
1756
1757 #define KVM_PAGE_ARRAY_NR 16
1758
1759 struct kvm_mmu_pages {
1760 struct mmu_page_and_offset {
1761 struct kvm_mmu_page *sp;
1762 unsigned int idx;
1763 } page[KVM_PAGE_ARRAY_NR];
1764 unsigned int nr;
1765 };
1766
mmu_pages_add(struct kvm_mmu_pages * pvec,struct kvm_mmu_page * sp,int idx)1767 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1768 int idx)
1769 {
1770 int i;
1771
1772 if (sp->unsync)
1773 for (i=0; i < pvec->nr; i++)
1774 if (pvec->page[i].sp == sp)
1775 return 0;
1776
1777 pvec->page[pvec->nr].sp = sp;
1778 pvec->page[pvec->nr].idx = idx;
1779 pvec->nr++;
1780 return (pvec->nr == KVM_PAGE_ARRAY_NR);
1781 }
1782
clear_unsync_child_bit(struct kvm_mmu_page * sp,int idx)1783 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1784 {
1785 --sp->unsync_children;
1786 WARN_ON_ONCE((int)sp->unsync_children < 0);
1787 __clear_bit(idx, sp->unsync_child_bitmap);
1788 }
1789
__mmu_unsync_walk(struct kvm_mmu_page * sp,struct kvm_mmu_pages * pvec)1790 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1791 struct kvm_mmu_pages *pvec)
1792 {
1793 int i, ret, nr_unsync_leaf = 0;
1794
1795 for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1796 struct kvm_mmu_page *child;
1797 u64 ent = sp->spt[i];
1798
1799 if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1800 clear_unsync_child_bit(sp, i);
1801 continue;
1802 }
1803
1804 child = spte_to_child_sp(ent);
1805
1806 if (child->unsync_children) {
1807 if (mmu_pages_add(pvec, child, i))
1808 return -ENOSPC;
1809
1810 ret = __mmu_unsync_walk(child, pvec);
1811 if (!ret) {
1812 clear_unsync_child_bit(sp, i);
1813 continue;
1814 } else if (ret > 0) {
1815 nr_unsync_leaf += ret;
1816 } else
1817 return ret;
1818 } else if (child->unsync) {
1819 nr_unsync_leaf++;
1820 if (mmu_pages_add(pvec, child, i))
1821 return -ENOSPC;
1822 } else
1823 clear_unsync_child_bit(sp, i);
1824 }
1825
1826 return nr_unsync_leaf;
1827 }
1828
1829 #define INVALID_INDEX (-1)
1830
mmu_unsync_walk(struct kvm_mmu_page * sp,struct kvm_mmu_pages * pvec)1831 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1832 struct kvm_mmu_pages *pvec)
1833 {
1834 pvec->nr = 0;
1835 if (!sp->unsync_children)
1836 return 0;
1837
1838 mmu_pages_add(pvec, sp, INVALID_INDEX);
1839 return __mmu_unsync_walk(sp, pvec);
1840 }
1841
kvm_unlink_unsync_page(struct kvm * kvm,struct kvm_mmu_page * sp)1842 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1843 {
1844 WARN_ON_ONCE(!sp->unsync);
1845 trace_kvm_mmu_sync_page(sp);
1846 sp->unsync = 0;
1847 --kvm->stat.mmu_unsync;
1848 }
1849
1850 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1851 struct list_head *invalid_list);
1852 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1853 struct list_head *invalid_list);
1854
sp_has_gptes(struct kvm_mmu_page * sp)1855 static bool sp_has_gptes(struct kvm_mmu_page *sp)
1856 {
1857 if (sp->role.direct)
1858 return false;
1859
1860 if (sp->role.passthrough)
1861 return false;
1862
1863 return true;
1864 }
1865
1866 #define for_each_valid_sp(_kvm, _sp, _list) \
1867 hlist_for_each_entry(_sp, _list, hash_link) \
1868 if (is_obsolete_sp((_kvm), (_sp))) { \
1869 } else
1870
1871 #define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \
1872 for_each_valid_sp(_kvm, _sp, \
1873 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
1874 if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
1875
kvm_sync_page_check(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp)1876 static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1877 {
1878 union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
1879
1880 /*
1881 * Ignore various flags when verifying that it's safe to sync a shadow
1882 * page using the current MMU context.
1883 *
1884 * - level: not part of the overall MMU role and will never match as the MMU's
1885 * level tracks the root level
1886 * - access: updated based on the new guest PTE
1887 * - quadrant: not part of the overall MMU role (similar to level)
1888 */
1889 const union kvm_mmu_page_role sync_role_ign = {
1890 .level = 0xf,
1891 .access = 0x7,
1892 .quadrant = 0x3,
1893 .passthrough = 0x1,
1894 };
1895
1896 /*
1897 * Direct pages can never be unsync, and KVM should never attempt to
1898 * sync a shadow page for a different MMU context, e.g. if the role
1899 * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
1900 * reserved bits checks will be wrong, etc...
1901 */
1902 if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
1903 (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
1904 return false;
1905
1906 return true;
1907 }
1908
kvm_sync_spte(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,int i)1909 static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
1910 {
1911 /* sp->spt[i] has initial value of shadow page table allocation */
1912 if (sp->spt[i] == SHADOW_NONPRESENT_VALUE)
1913 return 0;
1914
1915 return vcpu->arch.mmu->sync_spte(vcpu, sp, i);
1916 }
1917
__kvm_sync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp)1918 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1919 {
1920 int flush = 0;
1921 int i;
1922
1923 if (!kvm_sync_page_check(vcpu, sp))
1924 return -1;
1925
1926 for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
1927 int ret = kvm_sync_spte(vcpu, sp, i);
1928
1929 if (ret < -1)
1930 return -1;
1931 flush |= ret;
1932 }
1933
1934 /*
1935 * Note, any flush is purely for KVM's correctness, e.g. when dropping
1936 * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
1937 * unmap or dirty logging event doesn't fail to flush. The guest is
1938 * responsible for flushing the TLB to ensure any changes in protection
1939 * bits are recognized, i.e. until the guest flushes or page faults on
1940 * a relevant address, KVM is architecturally allowed to let vCPUs use
1941 * cached translations with the old protection bits.
1942 */
1943 return flush;
1944 }
1945
kvm_sync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,struct list_head * invalid_list)1946 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1947 struct list_head *invalid_list)
1948 {
1949 int ret = __kvm_sync_page(vcpu, sp);
1950
1951 if (ret < 0)
1952 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1953 return ret;
1954 }
1955
kvm_mmu_remote_flush_or_zap(struct kvm * kvm,struct list_head * invalid_list,bool remote_flush)1956 static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
1957 struct list_head *invalid_list,
1958 bool remote_flush)
1959 {
1960 if (!remote_flush && list_empty(invalid_list))
1961 return false;
1962
1963 if (!list_empty(invalid_list))
1964 kvm_mmu_commit_zap_page(kvm, invalid_list);
1965 else
1966 kvm_flush_remote_tlbs(kvm);
1967 return true;
1968 }
1969
is_obsolete_sp(struct kvm * kvm,struct kvm_mmu_page * sp)1970 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
1971 {
1972 if (sp->role.invalid)
1973 return true;
1974
1975 /* TDP MMU pages do not use the MMU generation. */
1976 return !is_tdp_mmu_page(sp) &&
1977 unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1978 }
1979
1980 struct mmu_page_path {
1981 struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
1982 unsigned int idx[PT64_ROOT_MAX_LEVEL];
1983 };
1984
1985 #define for_each_sp(pvec, sp, parents, i) \
1986 for (i = mmu_pages_first(&pvec, &parents); \
1987 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
1988 i = mmu_pages_next(&pvec, &parents, i))
1989
mmu_pages_next(struct kvm_mmu_pages * pvec,struct mmu_page_path * parents,int i)1990 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1991 struct mmu_page_path *parents,
1992 int i)
1993 {
1994 int n;
1995
1996 for (n = i+1; n < pvec->nr; n++) {
1997 struct kvm_mmu_page *sp = pvec->page[n].sp;
1998 unsigned idx = pvec->page[n].idx;
1999 int level = sp->role.level;
2000
2001 parents->idx[level-1] = idx;
2002 if (level == PG_LEVEL_4K)
2003 break;
2004
2005 parents->parent[level-2] = sp;
2006 }
2007
2008 return n;
2009 }
2010
mmu_pages_first(struct kvm_mmu_pages * pvec,struct mmu_page_path * parents)2011 static int mmu_pages_first(struct kvm_mmu_pages *pvec,
2012 struct mmu_page_path *parents)
2013 {
2014 struct kvm_mmu_page *sp;
2015 int level;
2016
2017 if (pvec->nr == 0)
2018 return 0;
2019
2020 WARN_ON_ONCE(pvec->page[0].idx != INVALID_INDEX);
2021
2022 sp = pvec->page[0].sp;
2023 level = sp->role.level;
2024 WARN_ON_ONCE(level == PG_LEVEL_4K);
2025
2026 parents->parent[level-2] = sp;
2027
2028 /* Also set up a sentinel. Further entries in pvec are all
2029 * children of sp, so this element is never overwritten.
2030 */
2031 parents->parent[level-1] = NULL;
2032 return mmu_pages_next(pvec, parents, 0);
2033 }
2034
mmu_pages_clear_parents(struct mmu_page_path * parents)2035 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
2036 {
2037 struct kvm_mmu_page *sp;
2038 unsigned int level = 0;
2039
2040 do {
2041 unsigned int idx = parents->idx[level];
2042 sp = parents->parent[level];
2043 if (!sp)
2044 return;
2045
2046 WARN_ON_ONCE(idx == INVALID_INDEX);
2047 clear_unsync_child_bit(sp, idx);
2048 level++;
2049 } while (!sp->unsync_children);
2050 }
2051
mmu_sync_children(struct kvm_vcpu * vcpu,struct kvm_mmu_page * parent,bool can_yield)2052 static int mmu_sync_children(struct kvm_vcpu *vcpu,
2053 struct kvm_mmu_page *parent, bool can_yield)
2054 {
2055 int i;
2056 struct kvm_mmu_page *sp;
2057 struct mmu_page_path parents;
2058 struct kvm_mmu_pages pages;
2059 LIST_HEAD(invalid_list);
2060 bool flush = false;
2061
2062 while (mmu_unsync_walk(parent, &pages)) {
2063 bool protected = false;
2064
2065 for_each_sp(pages, sp, parents, i)
2066 protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn);
2067
2068 if (protected) {
2069 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true);
2070 flush = false;
2071 }
2072
2073 for_each_sp(pages, sp, parents, i) {
2074 kvm_unlink_unsync_page(vcpu->kvm, sp);
2075 flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0;
2076 mmu_pages_clear_parents(&parents);
2077 }
2078 if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2079 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2080 if (!can_yield) {
2081 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2082 return -EINTR;
2083 }
2084
2085 cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2086 flush = false;
2087 }
2088 }
2089
2090 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2091 return 0;
2092 }
2093
__clear_sp_write_flooding_count(struct kvm_mmu_page * sp)2094 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
2095 {
2096 atomic_set(&sp->write_flooding_count, 0);
2097 }
2098
clear_sp_write_flooding_count(u64 * spte)2099 static void clear_sp_write_flooding_count(u64 *spte)
2100 {
2101 __clear_sp_write_flooding_count(sptep_to_sp(spte));
2102 }
2103
2104 /*
2105 * The vCPU is required when finding indirect shadow pages; the shadow
2106 * page may already exist and syncing it needs the vCPU pointer in
2107 * order to read guest page tables. Direct shadow pages are never
2108 * unsync, thus @vcpu can be NULL if @role.direct is true.
2109 */
kvm_mmu_find_shadow_page(struct kvm * kvm,struct kvm_vcpu * vcpu,gfn_t gfn,struct hlist_head * sp_list,union kvm_mmu_page_role role)2110 static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm,
2111 struct kvm_vcpu *vcpu,
2112 gfn_t gfn,
2113 struct hlist_head *sp_list,
2114 union kvm_mmu_page_role role)
2115 {
2116 struct kvm_mmu_page *sp;
2117 int ret;
2118 int collisions = 0;
2119 LIST_HEAD(invalid_list);
2120
2121 for_each_valid_sp(kvm, sp, sp_list) {
2122 if (sp->gfn != gfn) {
2123 collisions++;
2124 continue;
2125 }
2126
2127 if (sp->role.word != role.word) {
2128 /*
2129 * If the guest is creating an upper-level page, zap
2130 * unsync pages for the same gfn. While it's possible
2131 * the guest is using recursive page tables, in all
2132 * likelihood the guest has stopped using the unsync
2133 * page and is installing a completely unrelated page.
2134 * Unsync pages must not be left as is, because the new
2135 * upper-level page will be write-protected.
2136 */
2137 if (role.level > PG_LEVEL_4K && sp->unsync)
2138 kvm_mmu_prepare_zap_page(kvm, sp,
2139 &invalid_list);
2140 continue;
2141 }
2142
2143 /* unsync and write-flooding only apply to indirect SPs. */
2144 if (sp->role.direct)
2145 goto out;
2146
2147 if (sp->unsync) {
2148 if (KVM_BUG_ON(!vcpu, kvm))
2149 break;
2150
2151 /*
2152 * The page is good, but is stale. kvm_sync_page does
2153 * get the latest guest state, but (unlike mmu_unsync_children)
2154 * it doesn't write-protect the page or mark it synchronized!
2155 * This way the validity of the mapping is ensured, but the
2156 * overhead of write protection is not incurred until the
2157 * guest invalidates the TLB mapping. This allows multiple
2158 * SPs for a single gfn to be unsync.
2159 *
2160 * If the sync fails, the page is zapped. If so, break
2161 * in order to rebuild it.
2162 */
2163 ret = kvm_sync_page(vcpu, sp, &invalid_list);
2164 if (ret < 0)
2165 break;
2166
2167 WARN_ON_ONCE(!list_empty(&invalid_list));
2168 if (ret > 0)
2169 kvm_flush_remote_tlbs(kvm);
2170 }
2171
2172 __clear_sp_write_flooding_count(sp);
2173
2174 goto out;
2175 }
2176
2177 sp = NULL;
2178 ++kvm->stat.mmu_cache_miss;
2179
2180 out:
2181 kvm_mmu_commit_zap_page(kvm, &invalid_list);
2182
2183 if (collisions > kvm->stat.max_mmu_page_hash_collisions)
2184 kvm->stat.max_mmu_page_hash_collisions = collisions;
2185 return sp;
2186 }
2187
2188 /* Caches used when allocating a new shadow page. */
2189 struct shadow_page_caches {
2190 struct kvm_mmu_memory_cache *page_header_cache;
2191 struct kvm_mmu_memory_cache *shadow_page_cache;
2192 struct kvm_mmu_memory_cache *shadowed_info_cache;
2193 };
2194
kvm_mmu_alloc_shadow_page(struct kvm * kvm,struct shadow_page_caches * caches,gfn_t gfn,struct hlist_head * sp_list,union kvm_mmu_page_role role)2195 static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
2196 struct shadow_page_caches *caches,
2197 gfn_t gfn,
2198 struct hlist_head *sp_list,
2199 union kvm_mmu_page_role role)
2200 {
2201 struct kvm_mmu_page *sp;
2202
2203 sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
2204 sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache);
2205 if (!role.direct && role.level <= KVM_MAX_HUGEPAGE_LEVEL)
2206 sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache);
2207
2208 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
2209
2210 INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
2211
2212 /*
2213 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
2214 * depends on valid pages being added to the head of the list. See
2215 * comments in kvm_zap_obsolete_pages().
2216 */
2217 sp->mmu_valid_gen = kvm->arch.mmu_valid_gen;
2218 list_add(&sp->link, &kvm->arch.active_mmu_pages);
2219 kvm_account_mmu_page(kvm, sp);
2220
2221 sp->gfn = gfn;
2222 sp->role = role;
2223 hlist_add_head(&sp->hash_link, sp_list);
2224 if (sp_has_gptes(sp))
2225 account_shadowed(kvm, sp);
2226
2227 return sp;
2228 }
2229
2230 /* Note, @vcpu may be NULL if @role.direct is true; see kvm_mmu_find_shadow_page. */
__kvm_mmu_get_shadow_page(struct kvm * kvm,struct kvm_vcpu * vcpu,struct shadow_page_caches * caches,gfn_t gfn,union kvm_mmu_page_role role)2231 static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm,
2232 struct kvm_vcpu *vcpu,
2233 struct shadow_page_caches *caches,
2234 gfn_t gfn,
2235 union kvm_mmu_page_role role)
2236 {
2237 struct hlist_head *sp_list;
2238 struct kvm_mmu_page *sp;
2239 bool created = false;
2240
2241 sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2242
2243 sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role);
2244 if (!sp) {
2245 created = true;
2246 sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role);
2247 }
2248
2249 trace_kvm_mmu_get_page(sp, created);
2250 return sp;
2251 }
2252
kvm_mmu_get_shadow_page(struct kvm_vcpu * vcpu,gfn_t gfn,union kvm_mmu_page_role role)2253 static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
2254 gfn_t gfn,
2255 union kvm_mmu_page_role role)
2256 {
2257 struct shadow_page_caches caches = {
2258 .page_header_cache = &vcpu->arch.mmu_page_header_cache,
2259 .shadow_page_cache = &vcpu->arch.mmu_shadow_page_cache,
2260 .shadowed_info_cache = &vcpu->arch.mmu_shadowed_info_cache,
2261 };
2262
2263 return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role);
2264 }
2265
kvm_mmu_child_role(u64 * sptep,bool direct,unsigned int access)2266 static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct,
2267 unsigned int access)
2268 {
2269 struct kvm_mmu_page *parent_sp = sptep_to_sp(sptep);
2270 union kvm_mmu_page_role role;
2271
2272 role = parent_sp->role;
2273 role.level--;
2274 role.access = access;
2275 role.direct = direct;
2276 role.passthrough = 0;
2277
2278 /*
2279 * If the guest has 4-byte PTEs then that means it's using 32-bit,
2280 * 2-level, non-PAE paging. KVM shadows such guests with PAE paging
2281 * (i.e. 8-byte PTEs). The difference in PTE size means that KVM must
2282 * shadow each guest page table with multiple shadow page tables, which
2283 * requires extra bookkeeping in the role.
2284 *
2285 * Specifically, to shadow the guest's page directory (which covers a
2286 * 4GiB address space), KVM uses 4 PAE page directories, each mapping
2287 * 1GiB of the address space. @role.quadrant encodes which quarter of
2288 * the address space each maps.
2289 *
2290 * To shadow the guest's page tables (which each map a 4MiB region), KVM
2291 * uses 2 PAE page tables, each mapping a 2MiB region. For these,
2292 * @role.quadrant encodes which half of the region they map.
2293 *
2294 * Concretely, a 4-byte PDE consumes bits 31:22, while an 8-byte PDE
2295 * consumes bits 29:21. To consume bits 31:30, KVM's uses 4 shadow
2296 * PDPTEs; those 4 PAE page directories are pre-allocated and their
2297 * quadrant is assigned in mmu_alloc_root(). A 4-byte PTE consumes
2298 * bits 21:12, while an 8-byte PTE consumes bits 20:12. To consume
2299 * bit 21 in the PTE (the child here), KVM propagates that bit to the
2300 * quadrant, i.e. sets quadrant to '0' or '1'. The parent 8-byte PDE
2301 * covers bit 21 (see above), thus the quadrant is calculated from the
2302 * _least_ significant bit of the PDE index.
2303 */
2304 if (role.has_4_byte_gpte) {
2305 WARN_ON_ONCE(role.level != PG_LEVEL_4K);
2306 role.quadrant = spte_index(sptep) & 1;
2307 }
2308
2309 return role;
2310 }
2311
kvm_mmu_get_child_sp(struct kvm_vcpu * vcpu,u64 * sptep,gfn_t gfn,bool direct,unsigned int access)2312 static struct kvm_mmu_page *kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu,
2313 u64 *sptep, gfn_t gfn,
2314 bool direct, unsigned int access)
2315 {
2316 union kvm_mmu_page_role role;
2317
2318 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
2319 return ERR_PTR(-EEXIST);
2320
2321 role = kvm_mmu_child_role(sptep, direct, access);
2322 return kvm_mmu_get_shadow_page(vcpu, gfn, role);
2323 }
2324
shadow_walk_init_using_root(struct kvm_shadow_walk_iterator * iterator,struct kvm_vcpu * vcpu,hpa_t root,u64 addr)2325 static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2326 struct kvm_vcpu *vcpu, hpa_t root,
2327 u64 addr)
2328 {
2329 iterator->addr = addr;
2330 iterator->shadow_addr = root;
2331 iterator->level = vcpu->arch.mmu->root_role.level;
2332
2333 if (iterator->level >= PT64_ROOT_4LEVEL &&
2334 vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
2335 !vcpu->arch.mmu->root_role.direct)
2336 iterator->level = PT32E_ROOT_LEVEL;
2337
2338 if (iterator->level == PT32E_ROOT_LEVEL) {
2339 /*
2340 * prev_root is currently only used for 64-bit hosts. So only
2341 * the active root_hpa is valid here.
2342 */
2343 BUG_ON(root != vcpu->arch.mmu->root.hpa);
2344
2345 iterator->shadow_addr
2346 = vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2347 iterator->shadow_addr &= SPTE_BASE_ADDR_MASK;
2348 --iterator->level;
2349 if (!iterator->shadow_addr)
2350 iterator->level = 0;
2351 }
2352 }
2353
shadow_walk_init(struct kvm_shadow_walk_iterator * iterator,struct kvm_vcpu * vcpu,u64 addr)2354 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2355 struct kvm_vcpu *vcpu, u64 addr)
2356 {
2357 shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root.hpa,
2358 addr);
2359 }
2360
shadow_walk_okay(struct kvm_shadow_walk_iterator * iterator)2361 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2362 {
2363 if (iterator->level < PG_LEVEL_4K)
2364 return false;
2365
2366 iterator->index = SPTE_INDEX(iterator->addr, iterator->level);
2367 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2368 return true;
2369 }
2370
__shadow_walk_next(struct kvm_shadow_walk_iterator * iterator,u64 spte)2371 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2372 u64 spte)
2373 {
2374 if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) {
2375 iterator->level = 0;
2376 return;
2377 }
2378
2379 iterator->shadow_addr = spte & SPTE_BASE_ADDR_MASK;
2380 --iterator->level;
2381 }
2382
shadow_walk_next(struct kvm_shadow_walk_iterator * iterator)2383 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2384 {
2385 __shadow_walk_next(iterator, *iterator->sptep);
2386 }
2387
__link_shadow_page(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,u64 * sptep,struct kvm_mmu_page * sp,bool flush)2388 static void __link_shadow_page(struct kvm *kvm,
2389 struct kvm_mmu_memory_cache *cache, u64 *sptep,
2390 struct kvm_mmu_page *sp, bool flush)
2391 {
2392 u64 spte;
2393
2394 BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2395
2396 /*
2397 * If an SPTE is present already, it must be a leaf and therefore
2398 * a large one. Drop it, and flush the TLB if needed, before
2399 * installing sp.
2400 */
2401 if (is_shadow_present_pte(*sptep))
2402 drop_large_spte(kvm, sptep, flush);
2403
2404 spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2405
2406 mmu_spte_set(sptep, spte);
2407
2408 mmu_page_add_parent_pte(cache, sp, sptep);
2409
2410 /*
2411 * The non-direct sub-pagetable must be updated before linking. For
2412 * L1 sp, the pagetable is updated via kvm_sync_page() in
2413 * kvm_mmu_find_shadow_page() without write-protecting the gfn,
2414 * so sp->unsync can be true or false. For higher level non-direct
2415 * sp, the pagetable is updated/synced via mmu_sync_children() in
2416 * FNAME(fetch)(), so sp->unsync_children can only be false.
2417 * WARN_ON_ONCE() if anything happens unexpectedly.
2418 */
2419 if (WARN_ON_ONCE(sp->unsync_children) || sp->unsync)
2420 mark_unsync(sptep);
2421 }
2422
link_shadow_page(struct kvm_vcpu * vcpu,u64 * sptep,struct kvm_mmu_page * sp)2423 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2424 struct kvm_mmu_page *sp)
2425 {
2426 __link_shadow_page(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, sptep, sp, true);
2427 }
2428
validate_direct_spte(struct kvm_vcpu * vcpu,u64 * sptep,unsigned direct_access)2429 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2430 unsigned direct_access)
2431 {
2432 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2433 struct kvm_mmu_page *child;
2434
2435 /*
2436 * For the direct sp, if the guest pte's dirty bit
2437 * changed form clean to dirty, it will corrupt the
2438 * sp's access: allow writable in the read-only sp,
2439 * so we should update the spte at this point to get
2440 * a new sp with the correct access.
2441 */
2442 child = spte_to_child_sp(*sptep);
2443 if (child->role.access == direct_access)
2444 return;
2445
2446 drop_parent_pte(vcpu->kvm, child, sptep);
2447 kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep);
2448 }
2449 }
2450
2451 /* Returns the number of zapped non-leaf child shadow pages. */
mmu_page_zap_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * spte,struct list_head * invalid_list)2452 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2453 u64 *spte, struct list_head *invalid_list)
2454 {
2455 u64 pte;
2456 struct kvm_mmu_page *child;
2457
2458 pte = *spte;
2459 if (is_shadow_present_pte(pte)) {
2460 if (is_last_spte(pte, sp->role.level)) {
2461 drop_spte(kvm, spte);
2462 } else {
2463 child = spte_to_child_sp(pte);
2464 drop_parent_pte(kvm, child, spte);
2465
2466 /*
2467 * Recursively zap nested TDP SPs, parentless SPs are
2468 * unlikely to be used again in the near future. This
2469 * avoids retaining a large number of stale nested SPs.
2470 */
2471 if (tdp_enabled && invalid_list &&
2472 child->role.guest_mode && !child->parent_ptes.val)
2473 return kvm_mmu_prepare_zap_page(kvm, child,
2474 invalid_list);
2475 }
2476 } else if (is_mmio_spte(kvm, pte)) {
2477 mmu_spte_clear_no_track(spte);
2478 }
2479 return 0;
2480 }
2481
kvm_mmu_page_unlink_children(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list)2482 static int kvm_mmu_page_unlink_children(struct kvm *kvm,
2483 struct kvm_mmu_page *sp,
2484 struct list_head *invalid_list)
2485 {
2486 int zapped = 0;
2487 unsigned i;
2488
2489 for (i = 0; i < SPTE_ENT_PER_PAGE; ++i)
2490 zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
2491
2492 return zapped;
2493 }
2494
kvm_mmu_unlink_parents(struct kvm * kvm,struct kvm_mmu_page * sp)2495 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2496 {
2497 u64 *sptep;
2498 struct rmap_iterator iter;
2499
2500 while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2501 drop_parent_pte(kvm, sp, sptep);
2502 }
2503
mmu_zap_unsync_children(struct kvm * kvm,struct kvm_mmu_page * parent,struct list_head * invalid_list)2504 static int mmu_zap_unsync_children(struct kvm *kvm,
2505 struct kvm_mmu_page *parent,
2506 struct list_head *invalid_list)
2507 {
2508 int i, zapped = 0;
2509 struct mmu_page_path parents;
2510 struct kvm_mmu_pages pages;
2511
2512 if (parent->role.level == PG_LEVEL_4K)
2513 return 0;
2514
2515 while (mmu_unsync_walk(parent, &pages)) {
2516 struct kvm_mmu_page *sp;
2517
2518 for_each_sp(pages, sp, parents, i) {
2519 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2520 mmu_pages_clear_parents(&parents);
2521 zapped++;
2522 }
2523 }
2524
2525 return zapped;
2526 }
2527
__kvm_mmu_prepare_zap_page(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list,int * nr_zapped)2528 static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2529 struct kvm_mmu_page *sp,
2530 struct list_head *invalid_list,
2531 int *nr_zapped)
2532 {
2533 bool list_unstable, zapped_root = false;
2534
2535 lockdep_assert_held_write(&kvm->mmu_lock);
2536 trace_kvm_mmu_prepare_zap_page(sp);
2537 ++kvm->stat.mmu_shadow_zapped;
2538 *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2539 *nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2540 kvm_mmu_unlink_parents(kvm, sp);
2541
2542 /* Zapping children means active_mmu_pages has become unstable. */
2543 list_unstable = *nr_zapped;
2544
2545 if (!sp->role.invalid && sp_has_gptes(sp))
2546 unaccount_shadowed(kvm, sp);
2547
2548 if (sp->unsync)
2549 kvm_unlink_unsync_page(kvm, sp);
2550 if (!sp->root_count) {
2551 /* Count self */
2552 (*nr_zapped)++;
2553
2554 /*
2555 * Already invalid pages (previously active roots) are not on
2556 * the active page list. See list_del() in the "else" case of
2557 * !sp->root_count.
2558 */
2559 if (sp->role.invalid)
2560 list_add(&sp->link, invalid_list);
2561 else
2562 list_move(&sp->link, invalid_list);
2563 kvm_unaccount_mmu_page(kvm, sp);
2564 } else {
2565 /*
2566 * Remove the active root from the active page list, the root
2567 * will be explicitly freed when the root_count hits zero.
2568 */
2569 list_del(&sp->link);
2570
2571 /*
2572 * Obsolete pages cannot be used on any vCPUs, see the comment
2573 * in kvm_mmu_zap_all_fast(). Note, is_obsolete_sp() also
2574 * treats invalid shadow pages as being obsolete.
2575 */
2576 zapped_root = !is_obsolete_sp(kvm, sp);
2577 }
2578
2579 if (sp->nx_huge_page_disallowed)
2580 unaccount_nx_huge_page(kvm, sp);
2581
2582 sp->role.invalid = 1;
2583
2584 /*
2585 * Make the request to free obsolete roots after marking the root
2586 * invalid, otherwise other vCPUs may not see it as invalid.
2587 */
2588 if (zapped_root)
2589 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);
2590 return list_unstable;
2591 }
2592
kvm_mmu_prepare_zap_page(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list)2593 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2594 struct list_head *invalid_list)
2595 {
2596 int nr_zapped;
2597
2598 __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2599 return nr_zapped;
2600 }
2601
kvm_mmu_commit_zap_page(struct kvm * kvm,struct list_head * invalid_list)2602 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2603 struct list_head *invalid_list)
2604 {
2605 struct kvm_mmu_page *sp, *nsp;
2606
2607 if (list_empty(invalid_list))
2608 return;
2609
2610 /*
2611 * We need to make sure everyone sees our modifications to
2612 * the page tables and see changes to vcpu->mode here. The barrier
2613 * in the kvm_flush_remote_tlbs() achieves this. This pairs
2614 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2615 *
2616 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2617 * guest mode and/or lockless shadow page table walks.
2618 */
2619 kvm_flush_remote_tlbs(kvm);
2620
2621 list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2622 WARN_ON_ONCE(!sp->role.invalid || sp->root_count);
2623 kvm_mmu_free_shadow_page(sp);
2624 }
2625 }
2626
kvm_mmu_zap_oldest_mmu_pages(struct kvm * kvm,unsigned long nr_to_zap)2627 static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
2628 unsigned long nr_to_zap)
2629 {
2630 unsigned long total_zapped = 0;
2631 struct kvm_mmu_page *sp, *tmp;
2632 LIST_HEAD(invalid_list);
2633 bool unstable;
2634 int nr_zapped;
2635
2636 if (list_empty(&kvm->arch.active_mmu_pages))
2637 return 0;
2638
2639 restart:
2640 list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2641 /*
2642 * Don't zap active root pages, the page itself can't be freed
2643 * and zapping it will just force vCPUs to realloc and reload.
2644 */
2645 if (sp->root_count)
2646 continue;
2647
2648 unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
2649 &nr_zapped);
2650 total_zapped += nr_zapped;
2651 if (total_zapped >= nr_to_zap)
2652 break;
2653
2654 if (unstable)
2655 goto restart;
2656 }
2657
2658 kvm_mmu_commit_zap_page(kvm, &invalid_list);
2659
2660 kvm->stat.mmu_recycled += total_zapped;
2661 return total_zapped;
2662 }
2663
kvm_mmu_available_pages(struct kvm * kvm)2664 static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2665 {
2666 if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2667 return kvm->arch.n_max_mmu_pages -
2668 kvm->arch.n_used_mmu_pages;
2669
2670 return 0;
2671 }
2672
make_mmu_pages_available(struct kvm_vcpu * vcpu)2673 static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2674 {
2675 unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2676
2677 if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2678 return 0;
2679
2680 kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2681
2682 /*
2683 * Note, this check is intentionally soft, it only guarantees that one
2684 * page is available, while the caller may end up allocating as many as
2685 * four pages, e.g. for PAE roots or for 5-level paging. Temporarily
2686 * exceeding the (arbitrary by default) limit will not harm the host,
2687 * being too aggressive may unnecessarily kill the guest, and getting an
2688 * exact count is far more trouble than it's worth, especially in the
2689 * page fault paths.
2690 */
2691 if (!kvm_mmu_available_pages(vcpu->kvm))
2692 return -ENOSPC;
2693 return 0;
2694 }
2695
2696 /*
2697 * Changing the number of mmu pages allocated to the vm
2698 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2699 */
kvm_mmu_change_mmu_pages(struct kvm * kvm,unsigned long goal_nr_mmu_pages)2700 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2701 {
2702 write_lock(&kvm->mmu_lock);
2703
2704 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2705 kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
2706 goal_nr_mmu_pages);
2707
2708 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2709 }
2710
2711 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2712
2713 write_unlock(&kvm->mmu_lock);
2714 }
2715
kvm_mmu_unprotect_page(struct kvm * kvm,gfn_t gfn)2716 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2717 {
2718 struct kvm_mmu_page *sp;
2719 LIST_HEAD(invalid_list);
2720 int r;
2721
2722 r = 0;
2723 write_lock(&kvm->mmu_lock);
2724 for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
2725 r = 1;
2726 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2727 }
2728 kvm_mmu_commit_zap_page(kvm, &invalid_list);
2729 write_unlock(&kvm->mmu_lock);
2730
2731 return r;
2732 }
2733
kvm_mmu_unprotect_page_virt(struct kvm_vcpu * vcpu,gva_t gva)2734 static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2735 {
2736 gpa_t gpa;
2737 int r;
2738
2739 if (vcpu->arch.mmu->root_role.direct)
2740 return 0;
2741
2742 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
2743
2744 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2745
2746 return r;
2747 }
2748
kvm_unsync_page(struct kvm * kvm,struct kvm_mmu_page * sp)2749 static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
2750 {
2751 trace_kvm_mmu_unsync_page(sp);
2752 ++kvm->stat.mmu_unsync;
2753 sp->unsync = 1;
2754
2755 kvm_mmu_mark_parents_unsync(sp);
2756 }
2757
2758 /*
2759 * Attempt to unsync any shadow pages that can be reached by the specified gfn,
2760 * KVM is creating a writable mapping for said gfn. Returns 0 if all pages
2761 * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
2762 * be write-protected.
2763 */
mmu_try_to_unsync_pages(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn,bool can_unsync,bool prefetch)2764 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
2765 gfn_t gfn, bool can_unsync, bool prefetch)
2766 {
2767 struct kvm_mmu_page *sp;
2768 bool locked = false;
2769
2770 /*
2771 * Force write-protection if the page is being tracked. Note, the page
2772 * track machinery is used to write-protect upper-level shadow pages,
2773 * i.e. this guards the role.level == 4K assertion below!
2774 */
2775 if (kvm_gfn_is_write_tracked(kvm, slot, gfn))
2776 return -EPERM;
2777
2778 /*
2779 * The page is not write-tracked, mark existing shadow pages unsync
2780 * unless KVM is synchronizing an unsync SP (can_unsync = false). In
2781 * that case, KVM must complete emulation of the guest TLB flush before
2782 * allowing shadow pages to become unsync (writable by the guest).
2783 */
2784 for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
2785 if (!can_unsync)
2786 return -EPERM;
2787
2788 if (sp->unsync)
2789 continue;
2790
2791 if (prefetch)
2792 return -EEXIST;
2793
2794 /*
2795 * TDP MMU page faults require an additional spinlock as they
2796 * run with mmu_lock held for read, not write, and the unsync
2797 * logic is not thread safe. Take the spinklock regardless of
2798 * the MMU type to avoid extra conditionals/parameters, there's
2799 * no meaningful penalty if mmu_lock is held for write.
2800 */
2801 if (!locked) {
2802 locked = true;
2803 spin_lock(&kvm->arch.mmu_unsync_pages_lock);
2804
2805 /*
2806 * Recheck after taking the spinlock, a different vCPU
2807 * may have since marked the page unsync. A false
2808 * negative on the unprotected check above is not
2809 * possible as clearing sp->unsync _must_ hold mmu_lock
2810 * for write, i.e. unsync cannot transition from 1->0
2811 * while this CPU holds mmu_lock for read (or write).
2812 */
2813 if (READ_ONCE(sp->unsync))
2814 continue;
2815 }
2816
2817 WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K);
2818 kvm_unsync_page(kvm, sp);
2819 }
2820 if (locked)
2821 spin_unlock(&kvm->arch.mmu_unsync_pages_lock);
2822
2823 /*
2824 * We need to ensure that the marking of unsync pages is visible
2825 * before the SPTE is updated to allow writes because
2826 * kvm_mmu_sync_roots() checks the unsync flags without holding
2827 * the MMU lock and so can race with this. If the SPTE was updated
2828 * before the page had been marked as unsync-ed, something like the
2829 * following could happen:
2830 *
2831 * CPU 1 CPU 2
2832 * ---------------------------------------------------------------------
2833 * 1.2 Host updates SPTE
2834 * to be writable
2835 * 2.1 Guest writes a GPTE for GVA X.
2836 * (GPTE being in the guest page table shadowed
2837 * by the SP from CPU 1.)
2838 * This reads SPTE during the page table walk.
2839 * Since SPTE.W is read as 1, there is no
2840 * fault.
2841 *
2842 * 2.2 Guest issues TLB flush.
2843 * That causes a VM Exit.
2844 *
2845 * 2.3 Walking of unsync pages sees sp->unsync is
2846 * false and skips the page.
2847 *
2848 * 2.4 Guest accesses GVA X.
2849 * Since the mapping in the SP was not updated,
2850 * so the old mapping for GVA X incorrectly
2851 * gets used.
2852 * 1.1 Host marks SP
2853 * as unsync
2854 * (sp->unsync = true)
2855 *
2856 * The write barrier below ensures that 1.1 happens before 1.2 and thus
2857 * the situation in 2.4 does not arise. It pairs with the read barrier
2858 * in is_unsync_root(), placed between 2.1's load of SPTE.W and 2.3.
2859 */
2860 smp_wmb();
2861
2862 return 0;
2863 }
2864
mmu_set_spte(struct kvm_vcpu * vcpu,struct kvm_memory_slot * slot,u64 * sptep,unsigned int pte_access,gfn_t gfn,kvm_pfn_t pfn,struct kvm_page_fault * fault)2865 static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
2866 u64 *sptep, unsigned int pte_access, gfn_t gfn,
2867 kvm_pfn_t pfn, struct kvm_page_fault *fault)
2868 {
2869 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
2870 int level = sp->role.level;
2871 int was_rmapped = 0;
2872 int ret = RET_PF_FIXED;
2873 bool flush = false;
2874 bool wrprot;
2875 u64 spte;
2876
2877 /* Prefetching always gets a writable pfn. */
2878 bool host_writable = !fault || fault->map_writable;
2879 bool prefetch = !fault || fault->prefetch;
2880 bool write_fault = fault && fault->write;
2881
2882 if (unlikely(is_noslot_pfn(pfn))) {
2883 vcpu->stat.pf_mmio_spte_created++;
2884 mark_mmio_spte(vcpu, sptep, gfn, pte_access);
2885 return RET_PF_EMULATE;
2886 }
2887
2888 if (is_shadow_present_pte(*sptep)) {
2889 /*
2890 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2891 * the parent of the now unreachable PTE.
2892 */
2893 if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
2894 struct kvm_mmu_page *child;
2895 u64 pte = *sptep;
2896
2897 child = spte_to_child_sp(pte);
2898 drop_parent_pte(vcpu->kvm, child, sptep);
2899 flush = true;
2900 } else if (pfn != spte_to_pfn(*sptep)) {
2901 drop_spte(vcpu->kvm, sptep);
2902 flush = true;
2903 } else
2904 was_rmapped = 1;
2905 }
2906
2907 wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
2908 true, host_writable, &spte);
2909
2910 if (*sptep == spte) {
2911 ret = RET_PF_SPURIOUS;
2912 } else {
2913 flush |= mmu_spte_update(sptep, spte);
2914 trace_kvm_mmu_set_spte(level, gfn, sptep);
2915 }
2916
2917 if (wrprot) {
2918 if (write_fault)
2919 ret = RET_PF_EMULATE;
2920 }
2921
2922 if (flush)
2923 kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level);
2924
2925 if (!was_rmapped) {
2926 WARN_ON_ONCE(ret == RET_PF_SPURIOUS);
2927 rmap_add(vcpu, slot, sptep, gfn, pte_access);
2928 } else {
2929 /* Already rmapped but the pte_access bits may have changed. */
2930 kvm_mmu_page_set_access(sp, spte_index(sptep), pte_access);
2931 }
2932
2933 return ret;
2934 }
2935
direct_pte_prefetch_many(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * start,u64 * end)2936 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2937 struct kvm_mmu_page *sp,
2938 u64 *start, u64 *end)
2939 {
2940 struct page *pages[PTE_PREFETCH_NUM];
2941 struct kvm_memory_slot *slot;
2942 unsigned int access = sp->role.access;
2943 int i, ret;
2944 gfn_t gfn;
2945
2946 gfn = kvm_mmu_page_get_gfn(sp, spte_index(start));
2947 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2948 if (!slot)
2949 return -1;
2950
2951 ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2952 if (ret <= 0)
2953 return -1;
2954
2955 for (i = 0; i < ret; i++, gfn++, start++) {
2956 mmu_set_spte(vcpu, slot, start, access, gfn,
2957 page_to_pfn(pages[i]), NULL);
2958 put_page(pages[i]);
2959 }
2960
2961 return 0;
2962 }
2963
__direct_pte_prefetch(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * sptep)2964 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2965 struct kvm_mmu_page *sp, u64 *sptep)
2966 {
2967 u64 *spte, *start = NULL;
2968 int i;
2969
2970 WARN_ON_ONCE(!sp->role.direct);
2971
2972 i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1);
2973 spte = sp->spt + i;
2974
2975 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2976 if (is_shadow_present_pte(*spte) || spte == sptep) {
2977 if (!start)
2978 continue;
2979 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2980 return;
2981 start = NULL;
2982 } else if (!start)
2983 start = spte;
2984 }
2985 if (start)
2986 direct_pte_prefetch_many(vcpu, sp, start, spte);
2987 }
2988
direct_pte_prefetch(struct kvm_vcpu * vcpu,u64 * sptep)2989 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2990 {
2991 struct kvm_mmu_page *sp;
2992
2993 sp = sptep_to_sp(sptep);
2994
2995 /*
2996 * Without accessed bits, there's no way to distinguish between
2997 * actually accessed translations and prefetched, so disable pte
2998 * prefetch if accessed bits aren't available.
2999 */
3000 if (sp_ad_disabled(sp))
3001 return;
3002
3003 if (sp->role.level > PG_LEVEL_4K)
3004 return;
3005
3006 /*
3007 * If addresses are being invalidated, skip prefetching to avoid
3008 * accidentally prefetching those addresses.
3009 */
3010 if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
3011 return;
3012
3013 __direct_pte_prefetch(vcpu, sp, sptep);
3014 }
3015
3016 /*
3017 * Lookup the mapping level for @gfn in the current mm.
3018 *
3019 * WARNING! Use of host_pfn_mapping_level() requires the caller and the end
3020 * consumer to be tied into KVM's handlers for MMU notifier events!
3021 *
3022 * There are several ways to safely use this helper:
3023 *
3024 * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
3025 * consuming it. In this case, mmu_lock doesn't need to be held during the
3026 * lookup, but it does need to be held while checking the MMU notifier.
3027 *
3028 * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation
3029 * event for the hva. This can be done by explicit checking the MMU notifier
3030 * or by ensuring that KVM already has a valid mapping that covers the hva.
3031 *
3032 * - Do not use the result to install new mappings, e.g. use the host mapping
3033 * level only to decide whether or not to zap an entry. In this case, it's
3034 * not required to hold mmu_lock (though it's highly likely the caller will
3035 * want to hold mmu_lock anyways, e.g. to modify SPTEs).
3036 *
3037 * Note! The lookup can still race with modifications to host page tables, but
3038 * the above "rules" ensure KVM will not _consume_ the result of the walk if a
3039 * race with the primary MMU occurs.
3040 */
host_pfn_mapping_level(struct kvm * kvm,gfn_t gfn,const struct kvm_memory_slot * slot)3041 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
3042 const struct kvm_memory_slot *slot)
3043 {
3044 int level = PG_LEVEL_4K;
3045 unsigned long hva;
3046 unsigned long flags;
3047 pgd_t pgd;
3048 p4d_t p4d;
3049 pud_t pud;
3050 pmd_t pmd;
3051
3052 /*
3053 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
3054 * is not solely for performance, it's also necessary to avoid the
3055 * "writable" check in __gfn_to_hva_many(), which will always fail on
3056 * read-only memslots due to gfn_to_hva() assuming writes. Earlier
3057 * page fault steps have already verified the guest isn't writing a
3058 * read-only memslot.
3059 */
3060 hva = __gfn_to_hva_memslot(slot, gfn);
3061
3062 /*
3063 * Disable IRQs to prevent concurrent tear down of host page tables,
3064 * e.g. if the primary MMU promotes a P*D to a huge page and then frees
3065 * the original page table.
3066 */
3067 local_irq_save(flags);
3068
3069 /*
3070 * Read each entry once. As above, a non-leaf entry can be promoted to
3071 * a huge page _during_ this walk. Re-reading the entry could send the
3072 * walk into the weeks, e.g. p*d_leaf() returns false (sees the old
3073 * value) and then p*d_offset() walks into the target huge page instead
3074 * of the old page table (sees the new value).
3075 */
3076 pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
3077 if (pgd_none(pgd))
3078 goto out;
3079
3080 p4d = READ_ONCE(*p4d_offset(&pgd, hva));
3081 if (p4d_none(p4d) || !p4d_present(p4d))
3082 goto out;
3083
3084 pud = READ_ONCE(*pud_offset(&p4d, hva));
3085 if (pud_none(pud) || !pud_present(pud))
3086 goto out;
3087
3088 if (pud_leaf(pud)) {
3089 level = PG_LEVEL_1G;
3090 goto out;
3091 }
3092
3093 pmd = READ_ONCE(*pmd_offset(&pud, hva));
3094 if (pmd_none(pmd) || !pmd_present(pmd))
3095 goto out;
3096
3097 if (pmd_leaf(pmd))
3098 level = PG_LEVEL_2M;
3099
3100 out:
3101 local_irq_restore(flags);
3102 return level;
3103 }
3104
__kvm_mmu_max_mapping_level(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn,int max_level,bool is_private)3105 static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
3106 const struct kvm_memory_slot *slot,
3107 gfn_t gfn, int max_level, bool is_private)
3108 {
3109 struct kvm_lpage_info *linfo;
3110 int host_level;
3111
3112 max_level = min(max_level, max_huge_page_level);
3113 for ( ; max_level > PG_LEVEL_4K; max_level--) {
3114 linfo = lpage_info_slot(gfn, slot, max_level);
3115 if (!linfo->disallow_lpage)
3116 break;
3117 }
3118
3119 if (is_private)
3120 return max_level;
3121
3122 if (max_level == PG_LEVEL_4K)
3123 return PG_LEVEL_4K;
3124
3125 host_level = host_pfn_mapping_level(kvm, gfn, slot);
3126 return min(host_level, max_level);
3127 }
3128
kvm_mmu_max_mapping_level(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn,int max_level)3129 int kvm_mmu_max_mapping_level(struct kvm *kvm,
3130 const struct kvm_memory_slot *slot, gfn_t gfn,
3131 int max_level)
3132 {
3133 bool is_private = kvm_slot_can_be_private(slot) &&
3134 kvm_mem_is_private(kvm, gfn);
3135
3136 return __kvm_mmu_max_mapping_level(kvm, slot, gfn, max_level, is_private);
3137 }
3138
kvm_mmu_hugepage_adjust(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3139 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3140 {
3141 struct kvm_memory_slot *slot = fault->slot;
3142 kvm_pfn_t mask;
3143
3144 fault->huge_page_disallowed = fault->exec && fault->nx_huge_page_workaround_enabled;
3145
3146 if (unlikely(fault->max_level == PG_LEVEL_4K))
3147 return;
3148
3149 if (is_error_noslot_pfn(fault->pfn))
3150 return;
3151
3152 if (kvm_slot_dirty_track_enabled(slot))
3153 return;
3154
3155 /*
3156 * Enforce the iTLB multihit workaround after capturing the requested
3157 * level, which will be used to do precise, accurate accounting.
3158 */
3159 fault->req_level = __kvm_mmu_max_mapping_level(vcpu->kvm, slot,
3160 fault->gfn, fault->max_level,
3161 fault->is_private);
3162 if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
3163 return;
3164
3165 /*
3166 * mmu_invalidate_retry() was successful and mmu_lock is held, so
3167 * the pmd can't be split from under us.
3168 */
3169 fault->goal_level = fault->req_level;
3170 mask = KVM_PAGES_PER_HPAGE(fault->goal_level) - 1;
3171 VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask));
3172 fault->pfn &= ~mask;
3173 }
3174
disallowed_hugepage_adjust(struct kvm_page_fault * fault,u64 spte,int cur_level)3175 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
3176 {
3177 if (cur_level > PG_LEVEL_4K &&
3178 cur_level == fault->goal_level &&
3179 is_shadow_present_pte(spte) &&
3180 !is_large_pte(spte) &&
3181 spte_to_child_sp(spte)->nx_huge_page_disallowed) {
3182 /*
3183 * A small SPTE exists for this pfn, but FNAME(fetch),
3184 * direct_map(), or kvm_tdp_mmu_map() would like to create a
3185 * large PTE instead: just force them to go down another level,
3186 * patching back for them into pfn the next 9 bits of the
3187 * address.
3188 */
3189 u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) -
3190 KVM_PAGES_PER_HPAGE(cur_level - 1);
3191 fault->pfn |= fault->gfn & page_mask;
3192 fault->goal_level--;
3193 }
3194 }
3195
direct_map(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3196 static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3197 {
3198 struct kvm_shadow_walk_iterator it;
3199 struct kvm_mmu_page *sp;
3200 int ret;
3201 gfn_t base_gfn = fault->gfn;
3202
3203 kvm_mmu_hugepage_adjust(vcpu, fault);
3204
3205 trace_kvm_mmu_spte_requested(fault);
3206 for_each_shadow_entry(vcpu, fault->addr, it) {
3207 /*
3208 * We cannot overwrite existing page tables with an NX
3209 * large page, as the leaf could be executable.
3210 */
3211 if (fault->nx_huge_page_workaround_enabled)
3212 disallowed_hugepage_adjust(fault, *it.sptep, it.level);
3213
3214 base_gfn = gfn_round_for_level(fault->gfn, it.level);
3215 if (it.level == fault->goal_level)
3216 break;
3217
3218 sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, true, ACC_ALL);
3219 if (sp == ERR_PTR(-EEXIST))
3220 continue;
3221
3222 link_shadow_page(vcpu, it.sptep, sp);
3223 if (fault->huge_page_disallowed)
3224 account_nx_huge_page(vcpu->kvm, sp,
3225 fault->req_level >= it.level);
3226 }
3227
3228 if (WARN_ON_ONCE(it.level != fault->goal_level))
3229 return -EFAULT;
3230
3231 ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL,
3232 base_gfn, fault->pfn, fault);
3233 if (ret == RET_PF_SPURIOUS)
3234 return ret;
3235
3236 direct_pte_prefetch(vcpu, it.sptep);
3237 return ret;
3238 }
3239
kvm_send_hwpoison_signal(struct kvm_memory_slot * slot,gfn_t gfn)3240 static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn)
3241 {
3242 unsigned long hva = gfn_to_hva_memslot(slot, gfn);
3243
3244 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, PAGE_SHIFT, current);
3245 }
3246
kvm_handle_error_pfn(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3247 static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3248 {
3249 if (is_sigpending_pfn(fault->pfn)) {
3250 kvm_handle_signal_exit(vcpu);
3251 return -EINTR;
3252 }
3253
3254 /*
3255 * Do not cache the mmio info caused by writing the readonly gfn
3256 * into the spte otherwise read access on readonly gfn also can
3257 * caused mmio page fault and treat it as mmio access.
3258 */
3259 if (fault->pfn == KVM_PFN_ERR_RO_FAULT)
3260 return RET_PF_EMULATE;
3261
3262 if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
3263 kvm_send_hwpoison_signal(fault->slot, fault->gfn);
3264 return RET_PF_RETRY;
3265 }
3266
3267 return -EFAULT;
3268 }
3269
kvm_handle_noslot_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,unsigned int access)3270 static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu,
3271 struct kvm_page_fault *fault,
3272 unsigned int access)
3273 {
3274 gva_t gva = fault->is_tdp ? 0 : fault->addr;
3275
3276 if (fault->is_private) {
3277 kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
3278 return -EFAULT;
3279 }
3280
3281 vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
3282 access & shadow_mmio_access_mask);
3283
3284 fault->slot = NULL;
3285 fault->pfn = KVM_PFN_NOSLOT;
3286 fault->map_writable = false;
3287 fault->hva = KVM_HVA_ERR_BAD;
3288
3289 /*
3290 * If MMIO caching is disabled, emulate immediately without
3291 * touching the shadow page tables as attempting to install an
3292 * MMIO SPTE will just be an expensive nop.
3293 */
3294 if (unlikely(!enable_mmio_caching))
3295 return RET_PF_EMULATE;
3296
3297 /*
3298 * Do not create an MMIO SPTE for a gfn greater than host.MAXPHYADDR,
3299 * any guest that generates such gfns is running nested and is being
3300 * tricked by L0 userspace (you can observe gfn > L1.MAXPHYADDR if and
3301 * only if L1's MAXPHYADDR is inaccurate with respect to the
3302 * hardware's).
3303 */
3304 if (unlikely(fault->gfn > kvm_mmu_max_gfn()))
3305 return RET_PF_EMULATE;
3306
3307 return RET_PF_CONTINUE;
3308 }
3309
page_fault_can_be_fast(struct kvm * kvm,struct kvm_page_fault * fault)3310 static bool page_fault_can_be_fast(struct kvm *kvm, struct kvm_page_fault *fault)
3311 {
3312 /*
3313 * Page faults with reserved bits set, i.e. faults on MMIO SPTEs, only
3314 * reach the common page fault handler if the SPTE has an invalid MMIO
3315 * generation number. Refreshing the MMIO generation needs to go down
3316 * the slow path. Note, EPT Misconfigs do NOT set the PRESENT flag!
3317 */
3318 if (fault->rsvd)
3319 return false;
3320
3321 /*
3322 * For hardware-protected VMs, certain conditions like attempting to
3323 * perform a write to a page which is not in the state that the guest
3324 * expects it to be in can result in a nested/extended #PF. In this
3325 * case, the below code might misconstrue this situation as being the
3326 * result of a write-protected access, and treat it as a spurious case
3327 * rather than taking any action to satisfy the real source of the #PF
3328 * such as generating a KVM_EXIT_MEMORY_FAULT. This can lead to the
3329 * guest spinning on a #PF indefinitely, so don't attempt the fast path
3330 * in this case.
3331 *
3332 * Note that the kvm_mem_is_private() check might race with an
3333 * attribute update, but this will either result in the guest spinning
3334 * on RET_PF_SPURIOUS until the update completes, or an actual spurious
3335 * case might go down the slow path. Either case will resolve itself.
3336 */
3337 if (kvm->arch.has_private_mem &&
3338 fault->is_private != kvm_mem_is_private(kvm, fault->gfn))
3339 return false;
3340
3341 /*
3342 * #PF can be fast if:
3343 *
3344 * 1. The shadow page table entry is not present and A/D bits are
3345 * disabled _by KVM_, which could mean that the fault is potentially
3346 * caused by access tracking (if enabled). If A/D bits are enabled
3347 * by KVM, but disabled by L1 for L2, KVM is forced to disable A/D
3348 * bits for L2 and employ access tracking, but the fast page fault
3349 * mechanism only supports direct MMUs.
3350 * 2. The shadow page table entry is present, the access is a write,
3351 * and no reserved bits are set (MMIO SPTEs cannot be "fixed"), i.e.
3352 * the fault was caused by a write-protection violation. If the
3353 * SPTE is MMU-writable (determined later), the fault can be fixed
3354 * by setting the Writable bit, which can be done out of mmu_lock.
3355 */
3356 if (!fault->present)
3357 return !kvm_ad_enabled();
3358
3359 /*
3360 * Note, instruction fetches and writes are mutually exclusive, ignore
3361 * the "exec" flag.
3362 */
3363 return fault->write;
3364 }
3365
3366 /*
3367 * Returns true if the SPTE was fixed successfully. Otherwise,
3368 * someone else modified the SPTE from its original value.
3369 */
fast_pf_fix_direct_spte(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,u64 * sptep,u64 old_spte,u64 new_spte)3370 static bool fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu,
3371 struct kvm_page_fault *fault,
3372 u64 *sptep, u64 old_spte, u64 new_spte)
3373 {
3374 /*
3375 * Theoretically we could also set dirty bit (and flush TLB) here in
3376 * order to eliminate unnecessary PML logging. See comments in
3377 * set_spte. But fast_page_fault is very unlikely to happen with PML
3378 * enabled, so we do not do this. This might result in the same GPA
3379 * to be logged in PML buffer again when the write really happens, and
3380 * eventually to be called by mark_page_dirty twice. But it's also no
3381 * harm. This also avoids the TLB flush needed after setting dirty bit
3382 * so non-PML cases won't be impacted.
3383 *
3384 * Compare with set_spte where instead shadow_dirty_mask is set.
3385 */
3386 if (!try_cmpxchg64(sptep, &old_spte, new_spte))
3387 return false;
3388
3389 if (is_writable_pte(new_spte) && !is_writable_pte(old_spte))
3390 mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn);
3391
3392 return true;
3393 }
3394
is_access_allowed(struct kvm_page_fault * fault,u64 spte)3395 static bool is_access_allowed(struct kvm_page_fault *fault, u64 spte)
3396 {
3397 if (fault->exec)
3398 return is_executable_pte(spte);
3399
3400 if (fault->write)
3401 return is_writable_pte(spte);
3402
3403 /* Fault was on Read access */
3404 return spte & PT_PRESENT_MASK;
3405 }
3406
3407 /*
3408 * Returns the last level spte pointer of the shadow page walk for the given
3409 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
3410 * walk could be performed, returns NULL and *spte does not contain valid data.
3411 *
3412 * Contract:
3413 * - Must be called between walk_shadow_page_lockless_{begin,end}.
3414 * - The returned sptep must not be used after walk_shadow_page_lockless_end.
3415 */
fast_pf_get_last_sptep(struct kvm_vcpu * vcpu,gpa_t gpa,u64 * spte)3416 static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
3417 {
3418 struct kvm_shadow_walk_iterator iterator;
3419 u64 old_spte;
3420 u64 *sptep = NULL;
3421
3422 for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
3423 sptep = iterator.sptep;
3424 *spte = old_spte;
3425 }
3426
3427 return sptep;
3428 }
3429
3430 /*
3431 * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3432 */
fast_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)3433 static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3434 {
3435 struct kvm_mmu_page *sp;
3436 int ret = RET_PF_INVALID;
3437 u64 spte;
3438 u64 *sptep;
3439 uint retry_count = 0;
3440
3441 if (!page_fault_can_be_fast(vcpu->kvm, fault))
3442 return ret;
3443
3444 walk_shadow_page_lockless_begin(vcpu);
3445
3446 do {
3447 u64 new_spte;
3448
3449 if (tdp_mmu_enabled)
3450 sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->gfn, &spte);
3451 else
3452 sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3453
3454 /*
3455 * It's entirely possible for the mapping to have been zapped
3456 * by a different task, but the root page should always be
3457 * available as the vCPU holds a reference to its root(s).
3458 */
3459 if (WARN_ON_ONCE(!sptep))
3460 spte = FROZEN_SPTE;
3461
3462 if (!is_shadow_present_pte(spte))
3463 break;
3464
3465 sp = sptep_to_sp(sptep);
3466 if (!is_last_spte(spte, sp->role.level))
3467 break;
3468
3469 /*
3470 * Check whether the memory access that caused the fault would
3471 * still cause it if it were to be performed right now. If not,
3472 * then this is a spurious fault caused by TLB lazily flushed,
3473 * or some other CPU has already fixed the PTE after the
3474 * current CPU took the fault.
3475 *
3476 * Need not check the access of upper level table entries since
3477 * they are always ACC_ALL.
3478 */
3479 if (is_access_allowed(fault, spte)) {
3480 ret = RET_PF_SPURIOUS;
3481 break;
3482 }
3483
3484 new_spte = spte;
3485
3486 /*
3487 * KVM only supports fixing page faults outside of MMU lock for
3488 * direct MMUs, nested MMUs are always indirect, and KVM always
3489 * uses A/D bits for non-nested MMUs. Thus, if A/D bits are
3490 * enabled, the SPTE can't be an access-tracked SPTE.
3491 */
3492 if (unlikely(!kvm_ad_enabled()) && is_access_track_spte(spte))
3493 new_spte = restore_acc_track_spte(new_spte);
3494
3495 /*
3496 * To keep things simple, only SPTEs that are MMU-writable can
3497 * be made fully writable outside of mmu_lock, e.g. only SPTEs
3498 * that were write-protected for dirty-logging or access
3499 * tracking are handled here. Don't bother checking if the
3500 * SPTE is writable to prioritize running with A/D bits enabled.
3501 * The is_access_allowed() check above handles the common case
3502 * of the fault being spurious, and the SPTE is known to be
3503 * shadow-present, i.e. except for access tracking restoration
3504 * making the new SPTE writable, the check is wasteful.
3505 */
3506 if (fault->write && is_mmu_writable_spte(spte)) {
3507 new_spte |= PT_WRITABLE_MASK;
3508
3509 /*
3510 * Do not fix write-permission on the large spte when
3511 * dirty logging is enabled. Since we only dirty the
3512 * first page into the dirty-bitmap in
3513 * fast_pf_fix_direct_spte(), other pages are missed
3514 * if its slot has dirty logging enabled.
3515 *
3516 * Instead, we let the slow page fault path create a
3517 * normal spte to fix the access.
3518 */
3519 if (sp->role.level > PG_LEVEL_4K &&
3520 kvm_slot_dirty_track_enabled(fault->slot))
3521 break;
3522 }
3523
3524 /* Verify that the fault can be handled in the fast path */
3525 if (new_spte == spte ||
3526 !is_access_allowed(fault, new_spte))
3527 break;
3528
3529 /*
3530 * Currently, fast page fault only works for direct mapping
3531 * since the gfn is not stable for indirect shadow page. See
3532 * Documentation/virt/kvm/locking.rst to get more detail.
3533 */
3534 if (fast_pf_fix_direct_spte(vcpu, fault, sptep, spte, new_spte)) {
3535 ret = RET_PF_FIXED;
3536 break;
3537 }
3538
3539 if (++retry_count > 4) {
3540 pr_warn_once("Fast #PF retrying more than 4 times.\n");
3541 break;
3542 }
3543
3544 } while (true);
3545
3546 trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
3547 walk_shadow_page_lockless_end(vcpu);
3548
3549 if (ret != RET_PF_INVALID)
3550 vcpu->stat.pf_fast++;
3551
3552 return ret;
3553 }
3554
mmu_free_root_page(struct kvm * kvm,hpa_t * root_hpa,struct list_head * invalid_list)3555 static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3556 struct list_head *invalid_list)
3557 {
3558 struct kvm_mmu_page *sp;
3559
3560 if (!VALID_PAGE(*root_hpa))
3561 return;
3562
3563 sp = root_to_sp(*root_hpa);
3564 if (WARN_ON_ONCE(!sp))
3565 return;
3566
3567 if (is_tdp_mmu_page(sp)) {
3568 lockdep_assert_held_read(&kvm->mmu_lock);
3569 kvm_tdp_mmu_put_root(kvm, sp);
3570 } else {
3571 lockdep_assert_held_write(&kvm->mmu_lock);
3572 if (!--sp->root_count && sp->role.invalid)
3573 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3574 }
3575
3576 *root_hpa = INVALID_PAGE;
3577 }
3578
3579 /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
kvm_mmu_free_roots(struct kvm * kvm,struct kvm_mmu * mmu,ulong roots_to_free)3580 void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
3581 ulong roots_to_free)
3582 {
3583 bool is_tdp_mmu = tdp_mmu_enabled && mmu->root_role.direct;
3584 int i;
3585 LIST_HEAD(invalid_list);
3586 bool free_active_root;
3587
3588 WARN_ON_ONCE(roots_to_free & ~KVM_MMU_ROOTS_ALL);
3589
3590 BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3591
3592 /* Before acquiring the MMU lock, see if we need to do any real work. */
3593 free_active_root = (roots_to_free & KVM_MMU_ROOT_CURRENT)
3594 && VALID_PAGE(mmu->root.hpa);
3595
3596 if (!free_active_root) {
3597 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3598 if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3599 VALID_PAGE(mmu->prev_roots[i].hpa))
3600 break;
3601
3602 if (i == KVM_MMU_NUM_PREV_ROOTS)
3603 return;
3604 }
3605
3606 if (is_tdp_mmu)
3607 read_lock(&kvm->mmu_lock);
3608 else
3609 write_lock(&kvm->mmu_lock);
3610
3611 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3612 if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
3613 mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3614 &invalid_list);
3615
3616 if (free_active_root) {
3617 if (kvm_mmu_is_dummy_root(mmu->root.hpa)) {
3618 /* Nothing to cleanup for dummy roots. */
3619 } else if (root_to_sp(mmu->root.hpa)) {
3620 mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list);
3621 } else if (mmu->pae_root) {
3622 for (i = 0; i < 4; ++i) {
3623 if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
3624 continue;
3625
3626 mmu_free_root_page(kvm, &mmu->pae_root[i],
3627 &invalid_list);
3628 mmu->pae_root[i] = INVALID_PAE_ROOT;
3629 }
3630 }
3631 mmu->root.hpa = INVALID_PAGE;
3632 mmu->root.pgd = 0;
3633 }
3634
3635 if (is_tdp_mmu) {
3636 read_unlock(&kvm->mmu_lock);
3637 WARN_ON_ONCE(!list_empty(&invalid_list));
3638 } else {
3639 kvm_mmu_commit_zap_page(kvm, &invalid_list);
3640 write_unlock(&kvm->mmu_lock);
3641 }
3642 }
3643 EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3644
kvm_mmu_free_guest_mode_roots(struct kvm * kvm,struct kvm_mmu * mmu)3645 void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
3646 {
3647 unsigned long roots_to_free = 0;
3648 struct kvm_mmu_page *sp;
3649 hpa_t root_hpa;
3650 int i;
3651
3652 /*
3653 * This should not be called while L2 is active, L2 can't invalidate
3654 * _only_ its own roots, e.g. INVVPID unconditionally exits.
3655 */
3656 WARN_ON_ONCE(mmu->root_role.guest_mode);
3657
3658 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3659 root_hpa = mmu->prev_roots[i].hpa;
3660 if (!VALID_PAGE(root_hpa))
3661 continue;
3662
3663 sp = root_to_sp(root_hpa);
3664 if (!sp || sp->role.guest_mode)
3665 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
3666 }
3667
3668 kvm_mmu_free_roots(kvm, mmu, roots_to_free);
3669 }
3670 EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
3671
mmu_alloc_root(struct kvm_vcpu * vcpu,gfn_t gfn,int quadrant,u8 level)3672 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
3673 u8 level)
3674 {
3675 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
3676 struct kvm_mmu_page *sp;
3677
3678 role.level = level;
3679 role.quadrant = quadrant;
3680
3681 WARN_ON_ONCE(quadrant && !role.has_4_byte_gpte);
3682 WARN_ON_ONCE(role.direct && role.has_4_byte_gpte);
3683
3684 sp = kvm_mmu_get_shadow_page(vcpu, gfn, role);
3685 ++sp->root_count;
3686
3687 return __pa(sp->spt);
3688 }
3689
mmu_alloc_direct_roots(struct kvm_vcpu * vcpu)3690 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3691 {
3692 struct kvm_mmu *mmu = vcpu->arch.mmu;
3693 u8 shadow_root_level = mmu->root_role.level;
3694 hpa_t root;
3695 unsigned i;
3696 int r;
3697
3698 if (tdp_mmu_enabled)
3699 return kvm_tdp_mmu_alloc_root(vcpu);
3700
3701 write_lock(&vcpu->kvm->mmu_lock);
3702 r = make_mmu_pages_available(vcpu);
3703 if (r < 0)
3704 goto out_unlock;
3705
3706 if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3707 root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level);
3708 mmu->root.hpa = root;
3709 } else if (shadow_root_level == PT32E_ROOT_LEVEL) {
3710 if (WARN_ON_ONCE(!mmu->pae_root)) {
3711 r = -EIO;
3712 goto out_unlock;
3713 }
3714
3715 for (i = 0; i < 4; ++i) {
3716 WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3717
3718 root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), 0,
3719 PT32_ROOT_LEVEL);
3720 mmu->pae_root[i] = root | PT_PRESENT_MASK |
3721 shadow_me_value;
3722 }
3723 mmu->root.hpa = __pa(mmu->pae_root);
3724 } else {
3725 WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
3726 r = -EIO;
3727 goto out_unlock;
3728 }
3729
3730 /* root.pgd is ignored for direct MMUs. */
3731 mmu->root.pgd = 0;
3732 out_unlock:
3733 write_unlock(&vcpu->kvm->mmu_lock);
3734 return r;
3735 }
3736
mmu_first_shadow_root_alloc(struct kvm * kvm)3737 static int mmu_first_shadow_root_alloc(struct kvm *kvm)
3738 {
3739 struct kvm_memslots *slots;
3740 struct kvm_memory_slot *slot;
3741 int r = 0, i, bkt;
3742
3743 /*
3744 * Check if this is the first shadow root being allocated before
3745 * taking the lock.
3746 */
3747 if (kvm_shadow_root_allocated(kvm))
3748 return 0;
3749
3750 mutex_lock(&kvm->slots_arch_lock);
3751
3752 /* Recheck, under the lock, whether this is the first shadow root. */
3753 if (kvm_shadow_root_allocated(kvm))
3754 goto out_unlock;
3755
3756 /*
3757 * Check if anything actually needs to be allocated, e.g. all metadata
3758 * will be allocated upfront if TDP is disabled.
3759 */
3760 if (kvm_memslots_have_rmaps(kvm) &&
3761 kvm_page_track_write_tracking_enabled(kvm))
3762 goto out_success;
3763
3764 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
3765 slots = __kvm_memslots(kvm, i);
3766 kvm_for_each_memslot(slot, bkt, slots) {
3767 /*
3768 * Both of these functions are no-ops if the target is
3769 * already allocated, so unconditionally calling both
3770 * is safe. Intentionally do NOT free allocations on
3771 * failure to avoid having to track which allocations
3772 * were made now versus when the memslot was created.
3773 * The metadata is guaranteed to be freed when the slot
3774 * is freed, and will be kept/used if userspace retries
3775 * KVM_RUN instead of killing the VM.
3776 */
3777 r = memslot_rmap_alloc(slot, slot->npages);
3778 if (r)
3779 goto out_unlock;
3780 r = kvm_page_track_write_tracking_alloc(slot);
3781 if (r)
3782 goto out_unlock;
3783 }
3784 }
3785
3786 /*
3787 * Ensure that shadow_root_allocated becomes true strictly after
3788 * all the related pointers are set.
3789 */
3790 out_success:
3791 smp_store_release(&kvm->arch.shadow_root_allocated, true);
3792
3793 out_unlock:
3794 mutex_unlock(&kvm->slots_arch_lock);
3795 return r;
3796 }
3797
mmu_alloc_shadow_roots(struct kvm_vcpu * vcpu)3798 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3799 {
3800 struct kvm_mmu *mmu = vcpu->arch.mmu;
3801 u64 pdptrs[4], pm_mask;
3802 gfn_t root_gfn, root_pgd;
3803 int quadrant, i, r;
3804 hpa_t root;
3805
3806 root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu);
3807 root_gfn = (root_pgd & __PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
3808
3809 if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3810 mmu->root.hpa = kvm_mmu_get_dummy_root();
3811 return 0;
3812 }
3813
3814 /*
3815 * On SVM, reading PDPTRs might access guest memory, which might fault
3816 * and thus might sleep. Grab the PDPTRs before acquiring mmu_lock.
3817 */
3818 if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
3819 for (i = 0; i < 4; ++i) {
3820 pdptrs[i] = mmu->get_pdptr(vcpu, i);
3821 if (!(pdptrs[i] & PT_PRESENT_MASK))
3822 continue;
3823
3824 if (!kvm_vcpu_is_visible_gfn(vcpu, pdptrs[i] >> PAGE_SHIFT))
3825 pdptrs[i] = 0;
3826 }
3827 }
3828
3829 r = mmu_first_shadow_root_alloc(vcpu->kvm);
3830 if (r)
3831 return r;
3832
3833 write_lock(&vcpu->kvm->mmu_lock);
3834 r = make_mmu_pages_available(vcpu);
3835 if (r < 0)
3836 goto out_unlock;
3837
3838 /*
3839 * Do we shadow a long mode page table? If so we need to
3840 * write-protect the guests page table root.
3841 */
3842 if (mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
3843 root = mmu_alloc_root(vcpu, root_gfn, 0,
3844 mmu->root_role.level);
3845 mmu->root.hpa = root;
3846 goto set_root_pgd;
3847 }
3848
3849 if (WARN_ON_ONCE(!mmu->pae_root)) {
3850 r = -EIO;
3851 goto out_unlock;
3852 }
3853
3854 /*
3855 * We shadow a 32 bit page table. This may be a legacy 2-level
3856 * or a PAE 3-level page table. In either case we need to be aware that
3857 * the shadow page table may be a PAE or a long mode page table.
3858 */
3859 pm_mask = PT_PRESENT_MASK | shadow_me_value;
3860 if (mmu->root_role.level >= PT64_ROOT_4LEVEL) {
3861 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3862
3863 if (WARN_ON_ONCE(!mmu->pml4_root)) {
3864 r = -EIO;
3865 goto out_unlock;
3866 }
3867 mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
3868
3869 if (mmu->root_role.level == PT64_ROOT_5LEVEL) {
3870 if (WARN_ON_ONCE(!mmu->pml5_root)) {
3871 r = -EIO;
3872 goto out_unlock;
3873 }
3874 mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;
3875 }
3876 }
3877
3878 for (i = 0; i < 4; ++i) {
3879 WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3880
3881 if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
3882 if (!(pdptrs[i] & PT_PRESENT_MASK)) {
3883 mmu->pae_root[i] = INVALID_PAE_ROOT;
3884 continue;
3885 }
3886 root_gfn = pdptrs[i] >> PAGE_SHIFT;
3887 }
3888
3889 /*
3890 * If shadowing 32-bit non-PAE page tables, each PAE page
3891 * directory maps one quarter of the guest's non-PAE page
3892 * directory. Othwerise each PAE page direct shadows one guest
3893 * PAE page directory so that quadrant should be 0.
3894 */
3895 quadrant = (mmu->cpu_role.base.level == PT32_ROOT_LEVEL) ? i : 0;
3896
3897 root = mmu_alloc_root(vcpu, root_gfn, quadrant, PT32_ROOT_LEVEL);
3898 mmu->pae_root[i] = root | pm_mask;
3899 }
3900
3901 if (mmu->root_role.level == PT64_ROOT_5LEVEL)
3902 mmu->root.hpa = __pa(mmu->pml5_root);
3903 else if (mmu->root_role.level == PT64_ROOT_4LEVEL)
3904 mmu->root.hpa = __pa(mmu->pml4_root);
3905 else
3906 mmu->root.hpa = __pa(mmu->pae_root);
3907
3908 set_root_pgd:
3909 mmu->root.pgd = root_pgd;
3910 out_unlock:
3911 write_unlock(&vcpu->kvm->mmu_lock);
3912
3913 return r;
3914 }
3915
mmu_alloc_special_roots(struct kvm_vcpu * vcpu)3916 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
3917 {
3918 struct kvm_mmu *mmu = vcpu->arch.mmu;
3919 bool need_pml5 = mmu->root_role.level > PT64_ROOT_4LEVEL;
3920 u64 *pml5_root = NULL;
3921 u64 *pml4_root = NULL;
3922 u64 *pae_root;
3923
3924 /*
3925 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
3926 * tables are allocated and initialized at root creation as there is no
3927 * equivalent level in the guest's NPT to shadow. Allocate the tables
3928 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
3929 */
3930 if (mmu->root_role.direct ||
3931 mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL ||
3932 mmu->root_role.level < PT64_ROOT_4LEVEL)
3933 return 0;
3934
3935 /*
3936 * NPT, the only paging mode that uses this horror, uses a fixed number
3937 * of levels for the shadow page tables, e.g. all MMUs are 4-level or
3938 * all MMus are 5-level. Thus, this can safely require that pml5_root
3939 * is allocated if the other roots are valid and pml5 is needed, as any
3940 * prior MMU would also have required pml5.
3941 */
3942 if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root))
3943 return 0;
3944
3945 /*
3946 * The special roots should always be allocated in concert. Yell and
3947 * bail if KVM ends up in a state where only one of the roots is valid.
3948 */
3949 if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
3950 (need_pml5 && mmu->pml5_root)))
3951 return -EIO;
3952
3953 /*
3954 * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
3955 * doesn't need to be decrypted.
3956 */
3957 pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3958 if (!pae_root)
3959 return -ENOMEM;
3960
3961 #ifdef CONFIG_X86_64
3962 pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3963 if (!pml4_root)
3964 goto err_pml4;
3965
3966 if (need_pml5) {
3967 pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3968 if (!pml5_root)
3969 goto err_pml5;
3970 }
3971 #endif
3972
3973 mmu->pae_root = pae_root;
3974 mmu->pml4_root = pml4_root;
3975 mmu->pml5_root = pml5_root;
3976
3977 return 0;
3978
3979 #ifdef CONFIG_X86_64
3980 err_pml5:
3981 free_page((unsigned long)pml4_root);
3982 err_pml4:
3983 free_page((unsigned long)pae_root);
3984 return -ENOMEM;
3985 #endif
3986 }
3987
is_unsync_root(hpa_t root)3988 static bool is_unsync_root(hpa_t root)
3989 {
3990 struct kvm_mmu_page *sp;
3991
3992 if (!VALID_PAGE(root) || kvm_mmu_is_dummy_root(root))
3993 return false;
3994
3995 /*
3996 * The read barrier orders the CPU's read of SPTE.W during the page table
3997 * walk before the reads of sp->unsync/sp->unsync_children here.
3998 *
3999 * Even if another CPU was marking the SP as unsync-ed simultaneously,
4000 * any guest page table changes are not guaranteed to be visible anyway
4001 * until this VCPU issues a TLB flush strictly after those changes are
4002 * made. We only need to ensure that the other CPU sets these flags
4003 * before any actual changes to the page tables are made. The comments
4004 * in mmu_try_to_unsync_pages() describe what could go wrong if this
4005 * requirement isn't satisfied.
4006 */
4007 smp_rmb();
4008 sp = root_to_sp(root);
4009
4010 /*
4011 * PAE roots (somewhat arbitrarily) aren't backed by shadow pages, the
4012 * PDPTEs for a given PAE root need to be synchronized individually.
4013 */
4014 if (WARN_ON_ONCE(!sp))
4015 return false;
4016
4017 if (sp->unsync || sp->unsync_children)
4018 return true;
4019
4020 return false;
4021 }
4022
kvm_mmu_sync_roots(struct kvm_vcpu * vcpu)4023 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
4024 {
4025 int i;
4026 struct kvm_mmu_page *sp;
4027
4028 if (vcpu->arch.mmu->root_role.direct)
4029 return;
4030
4031 if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
4032 return;
4033
4034 vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4035
4036 if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
4037 hpa_t root = vcpu->arch.mmu->root.hpa;
4038
4039 if (!is_unsync_root(root))
4040 return;
4041
4042 sp = root_to_sp(root);
4043
4044 write_lock(&vcpu->kvm->mmu_lock);
4045 mmu_sync_children(vcpu, sp, true);
4046 write_unlock(&vcpu->kvm->mmu_lock);
4047 return;
4048 }
4049
4050 write_lock(&vcpu->kvm->mmu_lock);
4051
4052 for (i = 0; i < 4; ++i) {
4053 hpa_t root = vcpu->arch.mmu->pae_root[i];
4054
4055 if (IS_VALID_PAE_ROOT(root)) {
4056 sp = spte_to_child_sp(root);
4057 mmu_sync_children(vcpu, sp, true);
4058 }
4059 }
4060
4061 write_unlock(&vcpu->kvm->mmu_lock);
4062 }
4063
kvm_mmu_sync_prev_roots(struct kvm_vcpu * vcpu)4064 void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
4065 {
4066 unsigned long roots_to_free = 0;
4067 int i;
4068
4069 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
4070 if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa))
4071 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
4072
4073 /* sync prev_roots by simply freeing them */
4074 kvm_mmu_free_roots(vcpu->kvm, vcpu->arch.mmu, roots_to_free);
4075 }
4076
nonpaging_gva_to_gpa(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,gpa_t vaddr,u64 access,struct x86_exception * exception)4077 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
4078 gpa_t vaddr, u64 access,
4079 struct x86_exception *exception)
4080 {
4081 if (exception)
4082 exception->error_code = 0;
4083 return kvm_translate_gpa(vcpu, mmu, vaddr, access, exception);
4084 }
4085
mmio_info_in_cache(struct kvm_vcpu * vcpu,u64 addr,bool direct)4086 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
4087 {
4088 /*
4089 * A nested guest cannot use the MMIO cache if it is using nested
4090 * page tables, because cr2 is a nGPA while the cache stores GPAs.
4091 */
4092 if (mmu_is_nested(vcpu))
4093 return false;
4094
4095 if (direct)
4096 return vcpu_match_mmio_gpa(vcpu, addr);
4097
4098 return vcpu_match_mmio_gva(vcpu, addr);
4099 }
4100
4101 /*
4102 * Return the level of the lowest level SPTE added to sptes.
4103 * That SPTE may be non-present.
4104 *
4105 * Must be called between walk_shadow_page_lockless_{begin,end}.
4106 */
get_walk(struct kvm_vcpu * vcpu,u64 addr,u64 * sptes,int * root_level)4107 static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
4108 {
4109 struct kvm_shadow_walk_iterator iterator;
4110 int leaf = -1;
4111 u64 spte;
4112
4113 for (shadow_walk_init(&iterator, vcpu, addr),
4114 *root_level = iterator.level;
4115 shadow_walk_okay(&iterator);
4116 __shadow_walk_next(&iterator, spte)) {
4117 leaf = iterator.level;
4118 spte = mmu_spte_get_lockless(iterator.sptep);
4119
4120 sptes[leaf] = spte;
4121 }
4122
4123 return leaf;
4124 }
4125
get_sptes_lockless(struct kvm_vcpu * vcpu,u64 addr,u64 * sptes,int * root_level)4126 static int get_sptes_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
4127 int *root_level)
4128 {
4129 int leaf;
4130
4131 walk_shadow_page_lockless_begin(vcpu);
4132
4133 if (is_tdp_mmu_active(vcpu))
4134 leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root_level);
4135 else
4136 leaf = get_walk(vcpu, addr, sptes, root_level);
4137
4138 walk_shadow_page_lockless_end(vcpu);
4139 return leaf;
4140 }
4141
4142 /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
get_mmio_spte(struct kvm_vcpu * vcpu,u64 addr,u64 * sptep)4143 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
4144 {
4145 u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
4146 struct rsvd_bits_validate *rsvd_check;
4147 int root, leaf, level;
4148 bool reserved = false;
4149
4150 leaf = get_sptes_lockless(vcpu, addr, sptes, &root);
4151 if (unlikely(leaf < 0)) {
4152 *sptep = 0ull;
4153 return reserved;
4154 }
4155
4156 *sptep = sptes[leaf];
4157
4158 /*
4159 * Skip reserved bits checks on the terminal leaf if it's not a valid
4160 * SPTE. Note, this also (intentionally) skips MMIO SPTEs, which, by
4161 * design, always have reserved bits set. The purpose of the checks is
4162 * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
4163 */
4164 if (!is_shadow_present_pte(sptes[leaf]))
4165 leaf++;
4166
4167 rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
4168
4169 for (level = root; level >= leaf; level--)
4170 reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
4171
4172 if (reserved) {
4173 pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
4174 __func__, addr);
4175 for (level = root; level >= leaf; level--)
4176 pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
4177 sptes[level], level,
4178 get_rsvd_bits(rsvd_check, sptes[level], level));
4179 }
4180
4181 return reserved;
4182 }
4183
handle_mmio_page_fault(struct kvm_vcpu * vcpu,u64 addr,bool direct)4184 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
4185 {
4186 u64 spte;
4187 bool reserved;
4188
4189 if (mmio_info_in_cache(vcpu, addr, direct))
4190 return RET_PF_EMULATE;
4191
4192 reserved = get_mmio_spte(vcpu, addr, &spte);
4193 if (WARN_ON_ONCE(reserved))
4194 return -EINVAL;
4195
4196 if (is_mmio_spte(vcpu->kvm, spte)) {
4197 gfn_t gfn = get_mmio_spte_gfn(spte);
4198 unsigned int access = get_mmio_spte_access(spte);
4199
4200 if (!check_mmio_spte(vcpu, spte))
4201 return RET_PF_INVALID;
4202
4203 if (direct)
4204 addr = 0;
4205
4206 trace_handle_mmio_page_fault(addr, gfn, access);
4207 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
4208 return RET_PF_EMULATE;
4209 }
4210
4211 /*
4212 * If the page table is zapped by other cpus, let CPU fault again on
4213 * the address.
4214 */
4215 return RET_PF_RETRY;
4216 }
4217
page_fault_handle_page_track(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4218 static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
4219 struct kvm_page_fault *fault)
4220 {
4221 if (unlikely(fault->rsvd))
4222 return false;
4223
4224 if (!fault->present || !fault->write)
4225 return false;
4226
4227 /*
4228 * guest is writing the page which is write tracked which can
4229 * not be fixed by page fault handler.
4230 */
4231 if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn))
4232 return true;
4233
4234 return false;
4235 }
4236
shadow_page_table_clear_flood(struct kvm_vcpu * vcpu,gva_t addr)4237 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
4238 {
4239 struct kvm_shadow_walk_iterator iterator;
4240 u64 spte;
4241
4242 walk_shadow_page_lockless_begin(vcpu);
4243 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
4244 clear_sp_write_flooding_count(iterator.sptep);
4245 walk_shadow_page_lockless_end(vcpu);
4246 }
4247
alloc_apf_token(struct kvm_vcpu * vcpu)4248 static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
4249 {
4250 /* make sure the token value is not 0 */
4251 u32 id = vcpu->arch.apf.id;
4252
4253 if (id << 12 == 0)
4254 vcpu->arch.apf.id = 1;
4255
4256 return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
4257 }
4258
kvm_arch_setup_async_pf(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4259 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu,
4260 struct kvm_page_fault *fault)
4261 {
4262 struct kvm_arch_async_pf arch;
4263
4264 arch.token = alloc_apf_token(vcpu);
4265 arch.gfn = fault->gfn;
4266 arch.error_code = fault->error_code;
4267 arch.direct_map = vcpu->arch.mmu->root_role.direct;
4268 arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu);
4269
4270 return kvm_setup_async_pf(vcpu, fault->addr,
4271 kvm_vcpu_gfn_to_hva(vcpu, fault->gfn), &arch);
4272 }
4273
kvm_arch_async_page_ready(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)4274 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
4275 {
4276 int r;
4277
4278 if (WARN_ON_ONCE(work->arch.error_code & PFERR_PRIVATE_ACCESS))
4279 return;
4280
4281 if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
4282 work->wakeup_all)
4283 return;
4284
4285 r = kvm_mmu_reload(vcpu);
4286 if (unlikely(r))
4287 return;
4288
4289 if (!vcpu->arch.mmu->root_role.direct &&
4290 work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
4291 return;
4292
4293 r = kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code,
4294 true, NULL, NULL);
4295
4296 /*
4297 * Account fixed page faults, otherwise they'll never be counted, but
4298 * ignore stats for all other return times. Page-ready "faults" aren't
4299 * truly spurious and never trigger emulation
4300 */
4301 if (r == RET_PF_FIXED)
4302 vcpu->stat.pf_fixed++;
4303 }
4304
kvm_max_level_for_order(int order)4305 static inline u8 kvm_max_level_for_order(int order)
4306 {
4307 BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
4308
4309 KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) &&
4310 order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) &&
4311 order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K));
4312
4313 if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
4314 return PG_LEVEL_1G;
4315
4316 if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
4317 return PG_LEVEL_2M;
4318
4319 return PG_LEVEL_4K;
4320 }
4321
kvm_max_private_mapping_level(struct kvm * kvm,kvm_pfn_t pfn,u8 max_level,int gmem_order)4322 static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
4323 u8 max_level, int gmem_order)
4324 {
4325 u8 req_max_level;
4326
4327 if (max_level == PG_LEVEL_4K)
4328 return PG_LEVEL_4K;
4329
4330 max_level = min(kvm_max_level_for_order(gmem_order), max_level);
4331 if (max_level == PG_LEVEL_4K)
4332 return PG_LEVEL_4K;
4333
4334 req_max_level = kvm_x86_call(private_max_mapping_level)(kvm, pfn);
4335 if (req_max_level)
4336 max_level = min(max_level, req_max_level);
4337
4338 return max_level;
4339 }
4340
kvm_faultin_pfn_private(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4341 static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
4342 struct kvm_page_fault *fault)
4343 {
4344 int max_order, r;
4345
4346 if (!kvm_slot_can_be_private(fault->slot)) {
4347 kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4348 return -EFAULT;
4349 }
4350
4351 r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn,
4352 &max_order);
4353 if (r) {
4354 kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4355 return r;
4356 }
4357
4358 fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY);
4359 fault->max_level = kvm_max_private_mapping_level(vcpu->kvm, fault->pfn,
4360 fault->max_level, max_order);
4361
4362 return RET_PF_CONTINUE;
4363 }
4364
__kvm_faultin_pfn(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4365 static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4366 {
4367 bool async;
4368
4369 if (fault->is_private)
4370 return kvm_faultin_pfn_private(vcpu, fault);
4371
4372 async = false;
4373 fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, false,
4374 &async, fault->write,
4375 &fault->map_writable, &fault->hva);
4376 if (!async)
4377 return RET_PF_CONTINUE; /* *pfn has correct page already */
4378
4379 if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
4380 trace_kvm_try_async_get_page(fault->addr, fault->gfn);
4381 if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
4382 trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn);
4383 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
4384 return RET_PF_RETRY;
4385 } else if (kvm_arch_setup_async_pf(vcpu, fault)) {
4386 return RET_PF_RETRY;
4387 }
4388 }
4389
4390 /*
4391 * Allow gup to bail on pending non-fatal signals when it's also allowed
4392 * to wait for IO. Note, gup always bails if it is unable to quickly
4393 * get a page and a fatal signal, i.e. SIGKILL, is pending.
4394 */
4395 fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, true,
4396 NULL, fault->write,
4397 &fault->map_writable, &fault->hva);
4398 return RET_PF_CONTINUE;
4399 }
4400
kvm_faultin_pfn(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault,unsigned int access)4401 static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
4402 unsigned int access)
4403 {
4404 struct kvm_memory_slot *slot = fault->slot;
4405 int ret;
4406
4407 /*
4408 * Note that the mmu_invalidate_seq also serves to detect a concurrent
4409 * change in attributes. is_page_fault_stale() will detect an
4410 * invalidation relate to fault->fn and resume the guest without
4411 * installing a mapping in the page tables.
4412 */
4413 fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
4414 smp_rmb();
4415
4416 /*
4417 * Now that we have a snapshot of mmu_invalidate_seq we can check for a
4418 * private vs. shared mismatch.
4419 */
4420 if (fault->is_private != kvm_mem_is_private(vcpu->kvm, fault->gfn)) {
4421 kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
4422 return -EFAULT;
4423 }
4424
4425 if (unlikely(!slot))
4426 return kvm_handle_noslot_fault(vcpu, fault, access);
4427
4428 /*
4429 * Retry the page fault if the gfn hit a memslot that is being deleted
4430 * or moved. This ensures any existing SPTEs for the old memslot will
4431 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
4432 */
4433 if (slot->flags & KVM_MEMSLOT_INVALID)
4434 return RET_PF_RETRY;
4435
4436 if (slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) {
4437 /*
4438 * Don't map L1's APIC access page into L2, KVM doesn't support
4439 * using APICv/AVIC to accelerate L2 accesses to L1's APIC,
4440 * i.e. the access needs to be emulated. Emulating access to
4441 * L1's APIC is also correct if L1 is accelerating L2's own
4442 * virtual APIC, but for some reason L1 also maps _L1's_ APIC
4443 * into L2. Note, vcpu_is_mmio_gpa() always treats access to
4444 * the APIC as MMIO. Allow an MMIO SPTE to be created, as KVM
4445 * uses different roots for L1 vs. L2, i.e. there is no danger
4446 * of breaking APICv/AVIC for L1.
4447 */
4448 if (is_guest_mode(vcpu))
4449 return kvm_handle_noslot_fault(vcpu, fault, access);
4450
4451 /*
4452 * If the APIC access page exists but is disabled, go directly
4453 * to emulation without caching the MMIO access or creating a
4454 * MMIO SPTE. That way the cache doesn't need to be purged
4455 * when the AVIC is re-enabled.
4456 */
4457 if (!kvm_apicv_activated(vcpu->kvm))
4458 return RET_PF_EMULATE;
4459 }
4460
4461 /*
4462 * Check for a relevant mmu_notifier invalidation event before getting
4463 * the pfn from the primary MMU, and before acquiring mmu_lock.
4464 *
4465 * For mmu_lock, if there is an in-progress invalidation and the kernel
4466 * allows preemption, the invalidation task may drop mmu_lock and yield
4467 * in response to mmu_lock being contended, which is *very* counter-
4468 * productive as this vCPU can't actually make forward progress until
4469 * the invalidation completes.
4470 *
4471 * Retrying now can also avoid unnessary lock contention in the primary
4472 * MMU, as the primary MMU doesn't necessarily hold a single lock for
4473 * the duration of the invalidation, i.e. faulting in a conflicting pfn
4474 * can cause the invalidation to take longer by holding locks that are
4475 * needed to complete the invalidation.
4476 *
4477 * Do the pre-check even for non-preemtible kernels, i.e. even if KVM
4478 * will never yield mmu_lock in response to contention, as this vCPU is
4479 * *guaranteed* to need to retry, i.e. waiting until mmu_lock is held
4480 * to detect retry guarantees the worst case latency for the vCPU.
4481 */
4482 if (mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn))
4483 return RET_PF_RETRY;
4484
4485 ret = __kvm_faultin_pfn(vcpu, fault);
4486 if (ret != RET_PF_CONTINUE)
4487 return ret;
4488
4489 if (unlikely(is_error_pfn(fault->pfn)))
4490 return kvm_handle_error_pfn(vcpu, fault);
4491
4492 if (WARN_ON_ONCE(!fault->slot || is_noslot_pfn(fault->pfn)))
4493 return kvm_handle_noslot_fault(vcpu, fault, access);
4494
4495 /*
4496 * Check again for a relevant mmu_notifier invalidation event purely to
4497 * avoid contending mmu_lock. Most invalidations will be detected by
4498 * the previous check, but checking is extremely cheap relative to the
4499 * overall cost of failing to detect the invalidation until after
4500 * mmu_lock is acquired.
4501 */
4502 if (mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn)) {
4503 kvm_release_pfn_clean(fault->pfn);
4504 return RET_PF_RETRY;
4505 }
4506
4507 return RET_PF_CONTINUE;
4508 }
4509
4510 /*
4511 * Returns true if the page fault is stale and needs to be retried, i.e. if the
4512 * root was invalidated by a memslot update or a relevant mmu_notifier fired.
4513 */
is_page_fault_stale(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4514 static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
4515 struct kvm_page_fault *fault)
4516 {
4517 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
4518
4519 /* Special roots, e.g. pae_root, are not backed by shadow pages. */
4520 if (sp && is_obsolete_sp(vcpu->kvm, sp))
4521 return true;
4522
4523 /*
4524 * Roots without an associated shadow page are considered invalid if
4525 * there is a pending request to free obsolete roots. The request is
4526 * only a hint that the current root _may_ be obsolete and needs to be
4527 * reloaded, e.g. if the guest frees a PGD that KVM is tracking as a
4528 * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs
4529 * to reload even if no vCPU is actively using the root.
4530 */
4531 if (!sp && kvm_test_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
4532 return true;
4533
4534 /*
4535 * Check for a relevant mmu_notifier invalidation event one last time
4536 * now that mmu_lock is held, as the "unsafe" checks performed without
4537 * holding mmu_lock can get false negatives.
4538 */
4539 return fault->slot &&
4540 mmu_invalidate_retry_gfn(vcpu->kvm, fault->mmu_seq, fault->gfn);
4541 }
4542
direct_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4543 static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4544 {
4545 int r;
4546
4547 /* Dummy roots are used only for shadowing bad guest roots. */
4548 if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa)))
4549 return RET_PF_RETRY;
4550
4551 if (page_fault_handle_page_track(vcpu, fault))
4552 return RET_PF_EMULATE;
4553
4554 r = fast_page_fault(vcpu, fault);
4555 if (r != RET_PF_INVALID)
4556 return r;
4557
4558 r = mmu_topup_memory_caches(vcpu, false);
4559 if (r)
4560 return r;
4561
4562 r = kvm_faultin_pfn(vcpu, fault, ACC_ALL);
4563 if (r != RET_PF_CONTINUE)
4564 return r;
4565
4566 r = RET_PF_RETRY;
4567 write_lock(&vcpu->kvm->mmu_lock);
4568
4569 if (is_page_fault_stale(vcpu, fault))
4570 goto out_unlock;
4571
4572 r = make_mmu_pages_available(vcpu);
4573 if (r)
4574 goto out_unlock;
4575
4576 r = direct_map(vcpu, fault);
4577
4578 out_unlock:
4579 write_unlock(&vcpu->kvm->mmu_lock);
4580 kvm_release_pfn_clean(fault->pfn);
4581 return r;
4582 }
4583
nonpaging_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4584 static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
4585 struct kvm_page_fault *fault)
4586 {
4587 /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
4588 fault->max_level = PG_LEVEL_2M;
4589 return direct_page_fault(vcpu, fault);
4590 }
4591
kvm_handle_page_fault(struct kvm_vcpu * vcpu,u64 error_code,u64 fault_address,char * insn,int insn_len)4592 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
4593 u64 fault_address, char *insn, int insn_len)
4594 {
4595 int r = 1;
4596 u32 flags = vcpu->arch.apf.host_apf_flags;
4597
4598 #ifndef CONFIG_X86_64
4599 /* A 64-bit CR2 should be impossible on 32-bit KVM. */
4600 if (WARN_ON_ONCE(fault_address >> 32))
4601 return -EFAULT;
4602 #endif
4603 /*
4604 * Legacy #PF exception only have a 32-bit error code. Simply drop the
4605 * upper bits as KVM doesn't use them for #PF (because they are never
4606 * set), and to ensure there are no collisions with KVM-defined bits.
4607 */
4608 if (WARN_ON_ONCE(error_code >> 32))
4609 error_code = lower_32_bits(error_code);
4610
4611 /*
4612 * Restrict KVM-defined flags to bits 63:32 so that it's impossible for
4613 * them to conflict with #PF error codes, which are limited to 32 bits.
4614 */
4615 BUILD_BUG_ON(lower_32_bits(PFERR_SYNTHETIC_MASK));
4616
4617 vcpu->arch.l1tf_flush_l1d = true;
4618 if (!flags) {
4619 trace_kvm_page_fault(vcpu, fault_address, error_code);
4620
4621 if (kvm_event_needs_reinjection(vcpu))
4622 kvm_mmu_unprotect_page_virt(vcpu, fault_address);
4623 r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
4624 insn_len);
4625 } else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
4626 vcpu->arch.apf.host_apf_flags = 0;
4627 local_irq_disable();
4628 kvm_async_pf_task_wait_schedule(fault_address);
4629 local_irq_enable();
4630 } else {
4631 WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
4632 }
4633
4634 return r;
4635 }
4636 EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
4637
4638 #ifdef CONFIG_X86_64
kvm_tdp_mmu_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4639 static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
4640 struct kvm_page_fault *fault)
4641 {
4642 int r;
4643
4644 if (page_fault_handle_page_track(vcpu, fault))
4645 return RET_PF_EMULATE;
4646
4647 r = fast_page_fault(vcpu, fault);
4648 if (r != RET_PF_INVALID)
4649 return r;
4650
4651 r = mmu_topup_memory_caches(vcpu, false);
4652 if (r)
4653 return r;
4654
4655 r = kvm_faultin_pfn(vcpu, fault, ACC_ALL);
4656 if (r != RET_PF_CONTINUE)
4657 return r;
4658
4659 r = RET_PF_RETRY;
4660 read_lock(&vcpu->kvm->mmu_lock);
4661
4662 if (is_page_fault_stale(vcpu, fault))
4663 goto out_unlock;
4664
4665 r = kvm_tdp_mmu_map(vcpu, fault);
4666
4667 out_unlock:
4668 read_unlock(&vcpu->kvm->mmu_lock);
4669 kvm_release_pfn_clean(fault->pfn);
4670 return r;
4671 }
4672 #endif
4673
kvm_mmu_may_ignore_guest_pat(void)4674 bool kvm_mmu_may_ignore_guest_pat(void)
4675 {
4676 /*
4677 * When EPT is enabled (shadow_memtype_mask is non-zero), and the VM
4678 * has non-coherent DMA (DMA doesn't snoop CPU caches), KVM's ABI is to
4679 * honor the memtype from the guest's PAT so that guest accesses to
4680 * memory that is DMA'd aren't cached against the guest's wishes. As a
4681 * result, KVM _may_ ignore guest PAT, whereas without non-coherent DMA,
4682 * KVM _always_ ignores guest PAT (when EPT is enabled).
4683 */
4684 return shadow_memtype_mask;
4685 }
4686
kvm_tdp_page_fault(struct kvm_vcpu * vcpu,struct kvm_page_fault * fault)4687 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4688 {
4689 #ifdef CONFIG_X86_64
4690 if (tdp_mmu_enabled)
4691 return kvm_tdp_mmu_page_fault(vcpu, fault);
4692 #endif
4693
4694 return direct_page_fault(vcpu, fault);
4695 }
4696
kvm_tdp_map_page(struct kvm_vcpu * vcpu,gpa_t gpa,u64 error_code,u8 * level)4697 static int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code,
4698 u8 *level)
4699 {
4700 int r;
4701
4702 /*
4703 * Restrict to TDP page fault, since that's the only case where the MMU
4704 * is indexed by GPA.
4705 */
4706 if (vcpu->arch.mmu->page_fault != kvm_tdp_page_fault)
4707 return -EOPNOTSUPP;
4708
4709 do {
4710 if (signal_pending(current))
4711 return -EINTR;
4712 cond_resched();
4713 r = kvm_mmu_do_page_fault(vcpu, gpa, error_code, true, NULL, level);
4714 } while (r == RET_PF_RETRY);
4715
4716 if (r < 0)
4717 return r;
4718
4719 switch (r) {
4720 case RET_PF_FIXED:
4721 case RET_PF_SPURIOUS:
4722 return 0;
4723
4724 case RET_PF_EMULATE:
4725 return -ENOENT;
4726
4727 case RET_PF_RETRY:
4728 case RET_PF_CONTINUE:
4729 case RET_PF_INVALID:
4730 default:
4731 WARN_ONCE(1, "could not fix page fault during prefault");
4732 return -EIO;
4733 }
4734 }
4735
kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu * vcpu,struct kvm_pre_fault_memory * range)4736 long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
4737 struct kvm_pre_fault_memory *range)
4738 {
4739 u64 error_code = PFERR_GUEST_FINAL_MASK;
4740 u8 level = PG_LEVEL_4K;
4741 u64 end;
4742 int r;
4743
4744 if (!vcpu->kvm->arch.pre_fault_allowed)
4745 return -EOPNOTSUPP;
4746
4747 /*
4748 * reload is efficient when called repeatedly, so we can do it on
4749 * every iteration.
4750 */
4751 r = kvm_mmu_reload(vcpu);
4752 if (r)
4753 return r;
4754
4755 if (kvm_arch_has_private_mem(vcpu->kvm) &&
4756 kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(range->gpa)))
4757 error_code |= PFERR_PRIVATE_ACCESS;
4758
4759 /*
4760 * Shadow paging uses GVA for kvm page fault, so restrict to
4761 * two-dimensional paging.
4762 */
4763 r = kvm_tdp_map_page(vcpu, range->gpa, error_code, &level);
4764 if (r < 0)
4765 return r;
4766
4767 /*
4768 * If the mapping that covers range->gpa can use a huge page, it
4769 * may start below it or end after range->gpa + range->size.
4770 */
4771 end = (range->gpa & KVM_HPAGE_MASK(level)) + KVM_HPAGE_SIZE(level);
4772 return min(range->size, end - range->gpa);
4773 }
4774
nonpaging_init_context(struct kvm_mmu * context)4775 static void nonpaging_init_context(struct kvm_mmu *context)
4776 {
4777 context->page_fault = nonpaging_page_fault;
4778 context->gva_to_gpa = nonpaging_gva_to_gpa;
4779 context->sync_spte = NULL;
4780 }
4781
is_root_usable(struct kvm_mmu_root_info * root,gpa_t pgd,union kvm_mmu_page_role role)4782 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
4783 union kvm_mmu_page_role role)
4784 {
4785 struct kvm_mmu_page *sp;
4786
4787 if (!VALID_PAGE(root->hpa))
4788 return false;
4789
4790 if (!role.direct && pgd != root->pgd)
4791 return false;
4792
4793 sp = root_to_sp(root->hpa);
4794 if (WARN_ON_ONCE(!sp))
4795 return false;
4796
4797 return role.word == sp->role.word;
4798 }
4799
4800 /*
4801 * Find out if a previously cached root matching the new pgd/role is available,
4802 * and insert the current root as the MRU in the cache.
4803 * If a matching root is found, it is assigned to kvm_mmu->root and
4804 * true is returned.
4805 * If no match is found, kvm_mmu->root is left invalid, the LRU root is
4806 * evicted to make room for the current root, and false is returned.
4807 */
cached_root_find_and_keep_current(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role)4808 static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu,
4809 gpa_t new_pgd,
4810 union kvm_mmu_page_role new_role)
4811 {
4812 uint i;
4813
4814 if (is_root_usable(&mmu->root, new_pgd, new_role))
4815 return true;
4816
4817 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
4818 /*
4819 * The swaps end up rotating the cache like this:
4820 * C 0 1 2 3 (on entry to the function)
4821 * 0 C 1 2 3
4822 * 1 C 0 2 3
4823 * 2 C 0 1 3
4824 * 3 C 0 1 2 (on exit from the loop)
4825 */
4826 swap(mmu->root, mmu->prev_roots[i]);
4827 if (is_root_usable(&mmu->root, new_pgd, new_role))
4828 return true;
4829 }
4830
4831 kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
4832 return false;
4833 }
4834
4835 /*
4836 * Find out if a previously cached root matching the new pgd/role is available.
4837 * On entry, mmu->root is invalid.
4838 * If a matching root is found, it is assigned to kvm_mmu->root, the LRU entry
4839 * of the cache becomes invalid, and true is returned.
4840 * If no match is found, kvm_mmu->root is left invalid and false is returned.
4841 */
cached_root_find_without_current(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role)4842 static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu,
4843 gpa_t new_pgd,
4844 union kvm_mmu_page_role new_role)
4845 {
4846 uint i;
4847
4848 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
4849 if (is_root_usable(&mmu->prev_roots[i], new_pgd, new_role))
4850 goto hit;
4851
4852 return false;
4853
4854 hit:
4855 swap(mmu->root, mmu->prev_roots[i]);
4856 /* Bubble up the remaining roots. */
4857 for (; i < KVM_MMU_NUM_PREV_ROOTS - 1; i++)
4858 mmu->prev_roots[i] = mmu->prev_roots[i + 1];
4859 mmu->prev_roots[i].hpa = INVALID_PAGE;
4860 return true;
4861 }
4862
fast_pgd_switch(struct kvm * kvm,struct kvm_mmu * mmu,gpa_t new_pgd,union kvm_mmu_page_role new_role)4863 static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
4864 gpa_t new_pgd, union kvm_mmu_page_role new_role)
4865 {
4866 /*
4867 * Limit reuse to 64-bit hosts+VMs without "special" roots in order to
4868 * avoid having to deal with PDPTEs and other complexities.
4869 */
4870 if (VALID_PAGE(mmu->root.hpa) && !root_to_sp(mmu->root.hpa))
4871 kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
4872
4873 if (VALID_PAGE(mmu->root.hpa))
4874 return cached_root_find_and_keep_current(kvm, mmu, new_pgd, new_role);
4875 else
4876 return cached_root_find_without_current(kvm, mmu, new_pgd, new_role);
4877 }
4878
kvm_mmu_new_pgd(struct kvm_vcpu * vcpu,gpa_t new_pgd)4879 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
4880 {
4881 struct kvm_mmu *mmu = vcpu->arch.mmu;
4882 union kvm_mmu_page_role new_role = mmu->root_role;
4883
4884 /*
4885 * Return immediately if no usable root was found, kvm_mmu_reload()
4886 * will establish a valid root prior to the next VM-Enter.
4887 */
4888 if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role))
4889 return;
4890
4891 /*
4892 * It's possible that the cached previous root page is obsolete because
4893 * of a change in the MMU generation number. However, changing the
4894 * generation number is accompanied by KVM_REQ_MMU_FREE_OBSOLETE_ROOTS,
4895 * which will free the root set here and allocate a new one.
4896 */
4897 kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
4898
4899 if (force_flush_and_sync_on_reuse) {
4900 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
4901 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
4902 }
4903
4904 /*
4905 * The last MMIO access's GVA and GPA are cached in the VCPU. When
4906 * switching to a new CR3, that GVA->GPA mapping may no longer be
4907 * valid. So clear any cached MMIO info even when we don't need to sync
4908 * the shadow page tables.
4909 */
4910 vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4911
4912 /*
4913 * If this is a direct root page, it doesn't have a write flooding
4914 * count. Otherwise, clear the write flooding count.
4915 */
4916 if (!new_role.direct) {
4917 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
4918
4919 if (!WARN_ON_ONCE(!sp))
4920 __clear_sp_write_flooding_count(sp);
4921 }
4922 }
4923 EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
4924
sync_mmio_spte(struct kvm_vcpu * vcpu,u64 * sptep,gfn_t gfn,unsigned int access)4925 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4926 unsigned int access)
4927 {
4928 if (unlikely(is_mmio_spte(vcpu->kvm, *sptep))) {
4929 if (gfn != get_mmio_spte_gfn(*sptep)) {
4930 mmu_spte_clear_no_track(sptep);
4931 return true;
4932 }
4933
4934 mark_mmio_spte(vcpu, sptep, gfn, access);
4935 return true;
4936 }
4937
4938 return false;
4939 }
4940
4941 #define PTTYPE_EPT 18 /* arbitrary */
4942 #define PTTYPE PTTYPE_EPT
4943 #include "paging_tmpl.h"
4944 #undef PTTYPE
4945
4946 #define PTTYPE 64
4947 #include "paging_tmpl.h"
4948 #undef PTTYPE
4949
4950 #define PTTYPE 32
4951 #include "paging_tmpl.h"
4952 #undef PTTYPE
4953
__reset_rsvds_bits_mask(struct rsvd_bits_validate * rsvd_check,u64 pa_bits_rsvd,int level,bool nx,bool gbpages,bool pse,bool amd)4954 static void __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
4955 u64 pa_bits_rsvd, int level, bool nx,
4956 bool gbpages, bool pse, bool amd)
4957 {
4958 u64 gbpages_bit_rsvd = 0;
4959 u64 nonleaf_bit8_rsvd = 0;
4960 u64 high_bits_rsvd;
4961
4962 rsvd_check->bad_mt_xwr = 0;
4963
4964 if (!gbpages)
4965 gbpages_bit_rsvd = rsvd_bits(7, 7);
4966
4967 if (level == PT32E_ROOT_LEVEL)
4968 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
4969 else
4970 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4971
4972 /* Note, NX doesn't exist in PDPTEs, this is handled below. */
4973 if (!nx)
4974 high_bits_rsvd |= rsvd_bits(63, 63);
4975
4976 /*
4977 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
4978 * leaf entries) on AMD CPUs only.
4979 */
4980 if (amd)
4981 nonleaf_bit8_rsvd = rsvd_bits(8, 8);
4982
4983 switch (level) {
4984 case PT32_ROOT_LEVEL:
4985 /* no rsvd bits for 2 level 4K page table entries */
4986 rsvd_check->rsvd_bits_mask[0][1] = 0;
4987 rsvd_check->rsvd_bits_mask[0][0] = 0;
4988 rsvd_check->rsvd_bits_mask[1][0] =
4989 rsvd_check->rsvd_bits_mask[0][0];
4990
4991 if (!pse) {
4992 rsvd_check->rsvd_bits_mask[1][1] = 0;
4993 break;
4994 }
4995
4996 if (is_cpuid_PSE36())
4997 /* 36bits PSE 4MB page */
4998 rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4999 else
5000 /* 32 bits PSE 4MB page */
5001 rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
5002 break;
5003 case PT32E_ROOT_LEVEL:
5004 rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
5005 high_bits_rsvd |
5006 rsvd_bits(5, 8) |
5007 rsvd_bits(1, 2); /* PDPTE */
5008 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd; /* PDE */
5009 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; /* PTE */
5010 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
5011 rsvd_bits(13, 20); /* large page */
5012 rsvd_check->rsvd_bits_mask[1][0] =
5013 rsvd_check->rsvd_bits_mask[0][0];
5014 break;
5015 case PT64_ROOT_5LEVEL:
5016 rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
5017 nonleaf_bit8_rsvd |
5018 rsvd_bits(7, 7);
5019 rsvd_check->rsvd_bits_mask[1][4] =
5020 rsvd_check->rsvd_bits_mask[0][4];
5021 fallthrough;
5022 case PT64_ROOT_4LEVEL:
5023 rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
5024 nonleaf_bit8_rsvd |
5025 rsvd_bits(7, 7);
5026 rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
5027 gbpages_bit_rsvd;
5028 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
5029 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
5030 rsvd_check->rsvd_bits_mask[1][3] =
5031 rsvd_check->rsvd_bits_mask[0][3];
5032 rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
5033 gbpages_bit_rsvd |
5034 rsvd_bits(13, 29);
5035 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
5036 rsvd_bits(13, 20); /* large page */
5037 rsvd_check->rsvd_bits_mask[1][0] =
5038 rsvd_check->rsvd_bits_mask[0][0];
5039 break;
5040 }
5041 }
5042
reset_guest_rsvds_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context)5043 static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
5044 struct kvm_mmu *context)
5045 {
5046 __reset_rsvds_bits_mask(&context->guest_rsvd_check,
5047 vcpu->arch.reserved_gpa_bits,
5048 context->cpu_role.base.level, is_efer_nx(context),
5049 guest_can_use(vcpu, X86_FEATURE_GBPAGES),
5050 is_cr4_pse(context),
5051 guest_cpuid_is_amd_compatible(vcpu));
5052 }
5053
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate * rsvd_check,u64 pa_bits_rsvd,bool execonly,int huge_page_level)5054 static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
5055 u64 pa_bits_rsvd, bool execonly,
5056 int huge_page_level)
5057 {
5058 u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
5059 u64 large_1g_rsvd = 0, large_2m_rsvd = 0;
5060 u64 bad_mt_xwr;
5061
5062 if (huge_page_level < PG_LEVEL_1G)
5063 large_1g_rsvd = rsvd_bits(7, 7);
5064 if (huge_page_level < PG_LEVEL_2M)
5065 large_2m_rsvd = rsvd_bits(7, 7);
5066
5067 rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
5068 rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
5069 rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6) | large_1g_rsvd;
5070 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6) | large_2m_rsvd;
5071 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
5072
5073 /* large page */
5074 rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
5075 rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
5076 rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29) | large_1g_rsvd;
5077 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20) | large_2m_rsvd;
5078 rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
5079
5080 bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */
5081 bad_mt_xwr |= 0xFFull << (3 * 8); /* bits 3..5 must not be 3 */
5082 bad_mt_xwr |= 0xFFull << (7 * 8); /* bits 3..5 must not be 7 */
5083 bad_mt_xwr |= REPEAT_BYTE(1ull << 2); /* bits 0..2 must not be 010 */
5084 bad_mt_xwr |= REPEAT_BYTE(1ull << 6); /* bits 0..2 must not be 110 */
5085 if (!execonly) {
5086 /* bits 0..2 must not be 100 unless VMX capabilities allow it */
5087 bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
5088 }
5089 rsvd_check->bad_mt_xwr = bad_mt_xwr;
5090 }
5091
reset_rsvds_bits_mask_ept(struct kvm_vcpu * vcpu,struct kvm_mmu * context,bool execonly,int huge_page_level)5092 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
5093 struct kvm_mmu *context, bool execonly, int huge_page_level)
5094 {
5095 __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
5096 vcpu->arch.reserved_gpa_bits, execonly,
5097 huge_page_level);
5098 }
5099
reserved_hpa_bits(void)5100 static inline u64 reserved_hpa_bits(void)
5101 {
5102 return rsvd_bits(kvm_host.maxphyaddr, 63);
5103 }
5104
5105 /*
5106 * the page table on host is the shadow page table for the page
5107 * table in guest or amd nested guest, its mmu features completely
5108 * follow the features in guest.
5109 */
reset_shadow_zero_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context)5110 static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
5111 struct kvm_mmu *context)
5112 {
5113 /* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
5114 bool is_amd = true;
5115 /* KVM doesn't use 2-level page tables for the shadow MMU. */
5116 bool is_pse = false;
5117 struct rsvd_bits_validate *shadow_zero_check;
5118 int i;
5119
5120 WARN_ON_ONCE(context->root_role.level < PT32E_ROOT_LEVEL);
5121
5122 shadow_zero_check = &context->shadow_zero_check;
5123 __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
5124 context->root_role.level,
5125 context->root_role.efer_nx,
5126 guest_can_use(vcpu, X86_FEATURE_GBPAGES),
5127 is_pse, is_amd);
5128
5129 if (!shadow_me_mask)
5130 return;
5131
5132 for (i = context->root_role.level; --i >= 0;) {
5133 /*
5134 * So far shadow_me_value is a constant during KVM's life
5135 * time. Bits in shadow_me_value are allowed to be set.
5136 * Bits in shadow_me_mask but not in shadow_me_value are
5137 * not allowed to be set.
5138 */
5139 shadow_zero_check->rsvd_bits_mask[0][i] |= shadow_me_mask;
5140 shadow_zero_check->rsvd_bits_mask[1][i] |= shadow_me_mask;
5141 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_value;
5142 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_value;
5143 }
5144
5145 }
5146
boot_cpu_is_amd(void)5147 static inline bool boot_cpu_is_amd(void)
5148 {
5149 WARN_ON_ONCE(!tdp_enabled);
5150 return shadow_x_mask == 0;
5151 }
5152
5153 /*
5154 * the direct page table on host, use as much mmu features as
5155 * possible, however, kvm currently does not do execution-protection.
5156 */
reset_tdp_shadow_zero_bits_mask(struct kvm_mmu * context)5157 static void reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context)
5158 {
5159 struct rsvd_bits_validate *shadow_zero_check;
5160 int i;
5161
5162 shadow_zero_check = &context->shadow_zero_check;
5163
5164 if (boot_cpu_is_amd())
5165 __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
5166 context->root_role.level, true,
5167 boot_cpu_has(X86_FEATURE_GBPAGES),
5168 false, true);
5169 else
5170 __reset_rsvds_bits_mask_ept(shadow_zero_check,
5171 reserved_hpa_bits(), false,
5172 max_huge_page_level);
5173
5174 if (!shadow_me_mask)
5175 return;
5176
5177 for (i = context->root_role.level; --i >= 0;) {
5178 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
5179 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
5180 }
5181 }
5182
5183 /*
5184 * as the comments in reset_shadow_zero_bits_mask() except it
5185 * is the shadow page table for intel nested guest.
5186 */
5187 static void
reset_ept_shadow_zero_bits_mask(struct kvm_mmu * context,bool execonly)5188 reset_ept_shadow_zero_bits_mask(struct kvm_mmu *context, bool execonly)
5189 {
5190 __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
5191 reserved_hpa_bits(), execonly,
5192 max_huge_page_level);
5193 }
5194
5195 #define BYTE_MASK(access) \
5196 ((1 & (access) ? 2 : 0) | \
5197 (2 & (access) ? 4 : 0) | \
5198 (3 & (access) ? 8 : 0) | \
5199 (4 & (access) ? 16 : 0) | \
5200 (5 & (access) ? 32 : 0) | \
5201 (6 & (access) ? 64 : 0) | \
5202 (7 & (access) ? 128 : 0))
5203
5204
update_permission_bitmask(struct kvm_mmu * mmu,bool ept)5205 static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
5206 {
5207 unsigned byte;
5208
5209 const u8 x = BYTE_MASK(ACC_EXEC_MASK);
5210 const u8 w = BYTE_MASK(ACC_WRITE_MASK);
5211 const u8 u = BYTE_MASK(ACC_USER_MASK);
5212
5213 bool cr4_smep = is_cr4_smep(mmu);
5214 bool cr4_smap = is_cr4_smap(mmu);
5215 bool cr0_wp = is_cr0_wp(mmu);
5216 bool efer_nx = is_efer_nx(mmu);
5217
5218 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
5219 unsigned pfec = byte << 1;
5220
5221 /*
5222 * Each "*f" variable has a 1 bit for each UWX value
5223 * that causes a fault with the given PFEC.
5224 */
5225
5226 /* Faults from writes to non-writable pages */
5227 u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
5228 /* Faults from user mode accesses to supervisor pages */
5229 u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
5230 /* Faults from fetches of non-executable pages*/
5231 u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
5232 /* Faults from kernel mode fetches of user pages */
5233 u8 smepf = 0;
5234 /* Faults from kernel mode accesses of user pages */
5235 u8 smapf = 0;
5236
5237 if (!ept) {
5238 /* Faults from kernel mode accesses to user pages */
5239 u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
5240
5241 /* Not really needed: !nx will cause pte.nx to fault */
5242 if (!efer_nx)
5243 ff = 0;
5244
5245 /* Allow supervisor writes if !cr0.wp */
5246 if (!cr0_wp)
5247 wf = (pfec & PFERR_USER_MASK) ? wf : 0;
5248
5249 /* Disallow supervisor fetches of user code if cr4.smep */
5250 if (cr4_smep)
5251 smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
5252
5253 /*
5254 * SMAP:kernel-mode data accesses from user-mode
5255 * mappings should fault. A fault is considered
5256 * as a SMAP violation if all of the following
5257 * conditions are true:
5258 * - X86_CR4_SMAP is set in CR4
5259 * - A user page is accessed
5260 * - The access is not a fetch
5261 * - The access is supervisor mode
5262 * - If implicit supervisor access or X86_EFLAGS_AC is clear
5263 *
5264 * Here, we cover the first four conditions.
5265 * The fifth is computed dynamically in permission_fault();
5266 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
5267 * *not* subject to SMAP restrictions.
5268 */
5269 if (cr4_smap)
5270 smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
5271 }
5272
5273 mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
5274 }
5275 }
5276
5277 /*
5278 * PKU is an additional mechanism by which the paging controls access to
5279 * user-mode addresses based on the value in the PKRU register. Protection
5280 * key violations are reported through a bit in the page fault error code.
5281 * Unlike other bits of the error code, the PK bit is not known at the
5282 * call site of e.g. gva_to_gpa; it must be computed directly in
5283 * permission_fault based on two bits of PKRU, on some machine state (CR4,
5284 * CR0, EFER, CPL), and on other bits of the error code and the page tables.
5285 *
5286 * In particular the following conditions come from the error code, the
5287 * page tables and the machine state:
5288 * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
5289 * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
5290 * - PK is always zero if U=0 in the page tables
5291 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
5292 *
5293 * The PKRU bitmask caches the result of these four conditions. The error
5294 * code (minus the P bit) and the page table's U bit form an index into the
5295 * PKRU bitmask. Two bits of the PKRU bitmask are then extracted and ANDed
5296 * with the two bits of the PKRU register corresponding to the protection key.
5297 * For the first three conditions above the bits will be 00, thus masking
5298 * away both AD and WD. For all reads or if the last condition holds, WD
5299 * only will be masked away.
5300 */
update_pkru_bitmask(struct kvm_mmu * mmu)5301 static void update_pkru_bitmask(struct kvm_mmu *mmu)
5302 {
5303 unsigned bit;
5304 bool wp;
5305
5306 mmu->pkru_mask = 0;
5307
5308 if (!is_cr4_pke(mmu))
5309 return;
5310
5311 wp = is_cr0_wp(mmu);
5312
5313 for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
5314 unsigned pfec, pkey_bits;
5315 bool check_pkey, check_write, ff, uf, wf, pte_user;
5316
5317 pfec = bit << 1;
5318 ff = pfec & PFERR_FETCH_MASK;
5319 uf = pfec & PFERR_USER_MASK;
5320 wf = pfec & PFERR_WRITE_MASK;
5321
5322 /* PFEC.RSVD is replaced by ACC_USER_MASK. */
5323 pte_user = pfec & PFERR_RSVD_MASK;
5324
5325 /*
5326 * Only need to check the access which is not an
5327 * instruction fetch and is to a user page.
5328 */
5329 check_pkey = (!ff && pte_user);
5330 /*
5331 * write access is controlled by PKRU if it is a
5332 * user access or CR0.WP = 1.
5333 */
5334 check_write = check_pkey && wf && (uf || wp);
5335
5336 /* PKRU.AD stops both read and write access. */
5337 pkey_bits = !!check_pkey;
5338 /* PKRU.WD stops write access. */
5339 pkey_bits |= (!!check_write) << 1;
5340
5341 mmu->pkru_mask |= (pkey_bits & 3) << pfec;
5342 }
5343 }
5344
reset_guest_paging_metadata(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)5345 static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
5346 struct kvm_mmu *mmu)
5347 {
5348 if (!is_cr0_pg(mmu))
5349 return;
5350
5351 reset_guest_rsvds_bits_mask(vcpu, mmu);
5352 update_permission_bitmask(mmu, false);
5353 update_pkru_bitmask(mmu);
5354 }
5355
paging64_init_context(struct kvm_mmu * context)5356 static void paging64_init_context(struct kvm_mmu *context)
5357 {
5358 context->page_fault = paging64_page_fault;
5359 context->gva_to_gpa = paging64_gva_to_gpa;
5360 context->sync_spte = paging64_sync_spte;
5361 }
5362
paging32_init_context(struct kvm_mmu * context)5363 static void paging32_init_context(struct kvm_mmu *context)
5364 {
5365 context->page_fault = paging32_page_fault;
5366 context->gva_to_gpa = paging32_gva_to_gpa;
5367 context->sync_spte = paging32_sync_spte;
5368 }
5369
kvm_calc_cpu_role(struct kvm_vcpu * vcpu,const struct kvm_mmu_role_regs * regs)5370 static union kvm_cpu_role kvm_calc_cpu_role(struct kvm_vcpu *vcpu,
5371 const struct kvm_mmu_role_regs *regs)
5372 {
5373 union kvm_cpu_role role = {0};
5374
5375 role.base.access = ACC_ALL;
5376 role.base.smm = is_smm(vcpu);
5377 role.base.guest_mode = is_guest_mode(vcpu);
5378 role.ext.valid = 1;
5379
5380 if (!____is_cr0_pg(regs)) {
5381 role.base.direct = 1;
5382 return role;
5383 }
5384
5385 role.base.efer_nx = ____is_efer_nx(regs);
5386 role.base.cr0_wp = ____is_cr0_wp(regs);
5387 role.base.smep_andnot_wp = ____is_cr4_smep(regs) && !____is_cr0_wp(regs);
5388 role.base.smap_andnot_wp = ____is_cr4_smap(regs) && !____is_cr0_wp(regs);
5389 role.base.has_4_byte_gpte = !____is_cr4_pae(regs);
5390
5391 if (____is_efer_lma(regs))
5392 role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL
5393 : PT64_ROOT_4LEVEL;
5394 else if (____is_cr4_pae(regs))
5395 role.base.level = PT32E_ROOT_LEVEL;
5396 else
5397 role.base.level = PT32_ROOT_LEVEL;
5398
5399 role.ext.cr4_smep = ____is_cr4_smep(regs);
5400 role.ext.cr4_smap = ____is_cr4_smap(regs);
5401 role.ext.cr4_pse = ____is_cr4_pse(regs);
5402
5403 /* PKEY and LA57 are active iff long mode is active. */
5404 role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
5405 role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
5406 role.ext.efer_lma = ____is_efer_lma(regs);
5407 return role;
5408 }
5409
__kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)5410 void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
5411 struct kvm_mmu *mmu)
5412 {
5413 const bool cr0_wp = kvm_is_cr0_bit_set(vcpu, X86_CR0_WP);
5414
5415 BUILD_BUG_ON((KVM_MMU_CR0_ROLE_BITS & KVM_POSSIBLE_CR0_GUEST_BITS) != X86_CR0_WP);
5416 BUILD_BUG_ON((KVM_MMU_CR4_ROLE_BITS & KVM_POSSIBLE_CR4_GUEST_BITS));
5417
5418 if (is_cr0_wp(mmu) == cr0_wp)
5419 return;
5420
5421 mmu->cpu_role.base.cr0_wp = cr0_wp;
5422 reset_guest_paging_metadata(vcpu, mmu);
5423 }
5424
kvm_mmu_get_tdp_level(struct kvm_vcpu * vcpu)5425 static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
5426 {
5427 /* tdp_root_level is architecture forced level, use it if nonzero */
5428 if (tdp_root_level)
5429 return tdp_root_level;
5430
5431 /* Use 5-level TDP if and only if it's useful/necessary. */
5432 if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
5433 return 4;
5434
5435 return max_tdp_level;
5436 }
5437
kvm_mmu_get_max_tdp_level(void)5438 u8 kvm_mmu_get_max_tdp_level(void)
5439 {
5440 return tdp_root_level ? tdp_root_level : max_tdp_level;
5441 }
5442
5443 static union kvm_mmu_page_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5444 kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
5445 union kvm_cpu_role cpu_role)
5446 {
5447 union kvm_mmu_page_role role = {0};
5448
5449 role.access = ACC_ALL;
5450 role.cr0_wp = true;
5451 role.efer_nx = true;
5452 role.smm = cpu_role.base.smm;
5453 role.guest_mode = cpu_role.base.guest_mode;
5454 role.ad_disabled = !kvm_ad_enabled();
5455 role.level = kvm_mmu_get_tdp_level(vcpu);
5456 role.direct = true;
5457 role.has_4_byte_gpte = false;
5458
5459 return role;
5460 }
5461
init_kvm_tdp_mmu(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5462 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
5463 union kvm_cpu_role cpu_role)
5464 {
5465 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5466 union kvm_mmu_page_role root_role = kvm_calc_tdp_mmu_root_page_role(vcpu, cpu_role);
5467
5468 if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
5469 root_role.word == context->root_role.word)
5470 return;
5471
5472 context->cpu_role.as_u64 = cpu_role.as_u64;
5473 context->root_role.word = root_role.word;
5474 context->page_fault = kvm_tdp_page_fault;
5475 context->sync_spte = NULL;
5476 context->get_guest_pgd = get_guest_cr3;
5477 context->get_pdptr = kvm_pdptr_read;
5478 context->inject_page_fault = kvm_inject_page_fault;
5479
5480 if (!is_cr0_pg(context))
5481 context->gva_to_gpa = nonpaging_gva_to_gpa;
5482 else if (is_cr4_pae(context))
5483 context->gva_to_gpa = paging64_gva_to_gpa;
5484 else
5485 context->gva_to_gpa = paging32_gva_to_gpa;
5486
5487 reset_guest_paging_metadata(vcpu, context);
5488 reset_tdp_shadow_zero_bits_mask(context);
5489 }
5490
shadow_mmu_init_context(struct kvm_vcpu * vcpu,struct kvm_mmu * context,union kvm_cpu_role cpu_role,union kvm_mmu_page_role root_role)5491 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
5492 union kvm_cpu_role cpu_role,
5493 union kvm_mmu_page_role root_role)
5494 {
5495 if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
5496 root_role.word == context->root_role.word)
5497 return;
5498
5499 context->cpu_role.as_u64 = cpu_role.as_u64;
5500 context->root_role.word = root_role.word;
5501
5502 if (!is_cr0_pg(context))
5503 nonpaging_init_context(context);
5504 else if (is_cr4_pae(context))
5505 paging64_init_context(context);
5506 else
5507 paging32_init_context(context);
5508
5509 reset_guest_paging_metadata(vcpu, context);
5510 reset_shadow_zero_bits_mask(vcpu, context);
5511 }
5512
kvm_init_shadow_mmu(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5513 static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
5514 union kvm_cpu_role cpu_role)
5515 {
5516 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5517 union kvm_mmu_page_role root_role;
5518
5519 root_role = cpu_role.base;
5520
5521 /* KVM uses PAE paging whenever the guest isn't using 64-bit paging. */
5522 root_role.level = max_t(u32, root_role.level, PT32E_ROOT_LEVEL);
5523
5524 /*
5525 * KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role.
5526 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
5527 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
5528 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
5529 * The iTLB multi-hit workaround can be toggled at any time, so assume
5530 * NX can be used by any non-nested shadow MMU to avoid having to reset
5531 * MMU contexts.
5532 */
5533 root_role.efer_nx = true;
5534
5535 shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
5536 }
5537
kvm_init_shadow_npt_mmu(struct kvm_vcpu * vcpu,unsigned long cr0,unsigned long cr4,u64 efer,gpa_t nested_cr3)5538 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
5539 unsigned long cr4, u64 efer, gpa_t nested_cr3)
5540 {
5541 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5542 struct kvm_mmu_role_regs regs = {
5543 .cr0 = cr0,
5544 .cr4 = cr4 & ~X86_CR4_PKE,
5545 .efer = efer,
5546 };
5547 union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, ®s);
5548 union kvm_mmu_page_role root_role;
5549
5550 /* NPT requires CR0.PG=1. */
5551 WARN_ON_ONCE(cpu_role.base.direct);
5552
5553 root_role = cpu_role.base;
5554 root_role.level = kvm_mmu_get_tdp_level(vcpu);
5555 if (root_role.level == PT64_ROOT_5LEVEL &&
5556 cpu_role.base.level == PT64_ROOT_4LEVEL)
5557 root_role.passthrough = 1;
5558
5559 shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
5560 kvm_mmu_new_pgd(vcpu, nested_cr3);
5561 }
5562 EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
5563
5564 static union kvm_cpu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu * vcpu,bool accessed_dirty,bool execonly,u8 level)5565 kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
5566 bool execonly, u8 level)
5567 {
5568 union kvm_cpu_role role = {0};
5569
5570 /*
5571 * KVM does not support SMM transfer monitors, and consequently does not
5572 * support the "entry to SMM" control either. role.base.smm is always 0.
5573 */
5574 WARN_ON_ONCE(is_smm(vcpu));
5575 role.base.level = level;
5576 role.base.has_4_byte_gpte = false;
5577 role.base.direct = false;
5578 role.base.ad_disabled = !accessed_dirty;
5579 role.base.guest_mode = true;
5580 role.base.access = ACC_ALL;
5581
5582 role.ext.word = 0;
5583 role.ext.execonly = execonly;
5584 role.ext.valid = 1;
5585
5586 return role;
5587 }
5588
kvm_init_shadow_ept_mmu(struct kvm_vcpu * vcpu,bool execonly,int huge_page_level,bool accessed_dirty,gpa_t new_eptp)5589 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
5590 int huge_page_level, bool accessed_dirty,
5591 gpa_t new_eptp)
5592 {
5593 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5594 u8 level = vmx_eptp_page_walk_level(new_eptp);
5595 union kvm_cpu_role new_mode =
5596 kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
5597 execonly, level);
5598
5599 if (new_mode.as_u64 != context->cpu_role.as_u64) {
5600 /* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
5601 context->cpu_role.as_u64 = new_mode.as_u64;
5602 context->root_role.word = new_mode.base.word;
5603
5604 context->page_fault = ept_page_fault;
5605 context->gva_to_gpa = ept_gva_to_gpa;
5606 context->sync_spte = ept_sync_spte;
5607
5608 update_permission_bitmask(context, true);
5609 context->pkru_mask = 0;
5610 reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level);
5611 reset_ept_shadow_zero_bits_mask(context, execonly);
5612 }
5613
5614 kvm_mmu_new_pgd(vcpu, new_eptp);
5615 }
5616 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
5617
init_kvm_softmmu(struct kvm_vcpu * vcpu,union kvm_cpu_role cpu_role)5618 static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
5619 union kvm_cpu_role cpu_role)
5620 {
5621 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5622
5623 kvm_init_shadow_mmu(vcpu, cpu_role);
5624
5625 context->get_guest_pgd = get_guest_cr3;
5626 context->get_pdptr = kvm_pdptr_read;
5627 context->inject_page_fault = kvm_inject_page_fault;
5628 }
5629
init_kvm_nested_mmu(struct kvm_vcpu * vcpu,union kvm_cpu_role new_mode)5630 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
5631 union kvm_cpu_role new_mode)
5632 {
5633 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
5634
5635 if (new_mode.as_u64 == g_context->cpu_role.as_u64)
5636 return;
5637
5638 g_context->cpu_role.as_u64 = new_mode.as_u64;
5639 g_context->get_guest_pgd = get_guest_cr3;
5640 g_context->get_pdptr = kvm_pdptr_read;
5641 g_context->inject_page_fault = kvm_inject_page_fault;
5642
5643 /*
5644 * L2 page tables are never shadowed, so there is no need to sync
5645 * SPTEs.
5646 */
5647 g_context->sync_spte = NULL;
5648
5649 /*
5650 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
5651 * L1's nested page tables (e.g. EPT12). The nested translation
5652 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
5653 * L2's page tables as the first level of translation and L1's
5654 * nested page tables as the second level of translation. Basically
5655 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
5656 */
5657 if (!is_paging(vcpu))
5658 g_context->gva_to_gpa = nonpaging_gva_to_gpa;
5659 else if (is_long_mode(vcpu))
5660 g_context->gva_to_gpa = paging64_gva_to_gpa;
5661 else if (is_pae(vcpu))
5662 g_context->gva_to_gpa = paging64_gva_to_gpa;
5663 else
5664 g_context->gva_to_gpa = paging32_gva_to_gpa;
5665
5666 reset_guest_paging_metadata(vcpu, g_context);
5667 }
5668
kvm_init_mmu(struct kvm_vcpu * vcpu)5669 void kvm_init_mmu(struct kvm_vcpu *vcpu)
5670 {
5671 struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
5672 union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, ®s);
5673
5674 if (mmu_is_nested(vcpu))
5675 init_kvm_nested_mmu(vcpu, cpu_role);
5676 else if (tdp_enabled)
5677 init_kvm_tdp_mmu(vcpu, cpu_role);
5678 else
5679 init_kvm_softmmu(vcpu, cpu_role);
5680 }
5681 EXPORT_SYMBOL_GPL(kvm_init_mmu);
5682
kvm_mmu_after_set_cpuid(struct kvm_vcpu * vcpu)5683 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
5684 {
5685 /*
5686 * Invalidate all MMU roles to force them to reinitialize as CPUID
5687 * information is factored into reserved bit calculations.
5688 *
5689 * Correctly handling multiple vCPU models with respect to paging and
5690 * physical address properties) in a single VM would require tracking
5691 * all relevant CPUID information in kvm_mmu_page_role. That is very
5692 * undesirable as it would increase the memory requirements for
5693 * gfn_write_track (see struct kvm_mmu_page_role comments). For now
5694 * that problem is swept under the rug; KVM's CPUID API is horrific and
5695 * it's all but impossible to solve it without introducing a new API.
5696 */
5697 vcpu->arch.root_mmu.root_role.invalid = 1;
5698 vcpu->arch.guest_mmu.root_role.invalid = 1;
5699 vcpu->arch.nested_mmu.root_role.invalid = 1;
5700 vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
5701 vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
5702 vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
5703 kvm_mmu_reset_context(vcpu);
5704
5705 /*
5706 * Changing guest CPUID after KVM_RUN is forbidden, see the comment in
5707 * kvm_arch_vcpu_ioctl().
5708 */
5709 KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm);
5710 }
5711
kvm_mmu_reset_context(struct kvm_vcpu * vcpu)5712 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
5713 {
5714 kvm_mmu_unload(vcpu);
5715 kvm_init_mmu(vcpu);
5716 }
5717 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
5718
kvm_mmu_load(struct kvm_vcpu * vcpu)5719 int kvm_mmu_load(struct kvm_vcpu *vcpu)
5720 {
5721 int r;
5722
5723 r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct);
5724 if (r)
5725 goto out;
5726 r = mmu_alloc_special_roots(vcpu);
5727 if (r)
5728 goto out;
5729 if (vcpu->arch.mmu->root_role.direct)
5730 r = mmu_alloc_direct_roots(vcpu);
5731 else
5732 r = mmu_alloc_shadow_roots(vcpu);
5733 if (r)
5734 goto out;
5735
5736 kvm_mmu_sync_roots(vcpu);
5737
5738 kvm_mmu_load_pgd(vcpu);
5739
5740 /*
5741 * Flush any TLB entries for the new root, the provenance of the root
5742 * is unknown. Even if KVM ensures there are no stale TLB entries
5743 * for a freed root, in theory another hypervisor could have left
5744 * stale entries. Flushing on alloc also allows KVM to skip the TLB
5745 * flush when freeing a root (see kvm_tdp_mmu_put_root()).
5746 */
5747 kvm_x86_call(flush_tlb_current)(vcpu);
5748 out:
5749 return r;
5750 }
5751
kvm_mmu_unload(struct kvm_vcpu * vcpu)5752 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
5753 {
5754 struct kvm *kvm = vcpu->kvm;
5755
5756 kvm_mmu_free_roots(kvm, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
5757 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa));
5758 kvm_mmu_free_roots(kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5759 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa));
5760 vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
5761 }
5762
is_obsolete_root(struct kvm * kvm,hpa_t root_hpa)5763 static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
5764 {
5765 struct kvm_mmu_page *sp;
5766
5767 if (!VALID_PAGE(root_hpa))
5768 return false;
5769
5770 /*
5771 * When freeing obsolete roots, treat roots as obsolete if they don't
5772 * have an associated shadow page, as it's impossible to determine if
5773 * such roots are fresh or stale. This does mean KVM will get false
5774 * positives and free roots that don't strictly need to be freed, but
5775 * such false positives are relatively rare:
5776 *
5777 * (a) only PAE paging and nested NPT have roots without shadow pages
5778 * (or any shadow paging flavor with a dummy root, see note below)
5779 * (b) remote reloads due to a memslot update obsoletes _all_ roots
5780 * (c) KVM doesn't track previous roots for PAE paging, and the guest
5781 * is unlikely to zap an in-use PGD.
5782 *
5783 * Note! Dummy roots are unique in that they are obsoleted by memslot
5784 * _creation_! See also FNAME(fetch).
5785 */
5786 sp = root_to_sp(root_hpa);
5787 return !sp || is_obsolete_sp(kvm, sp);
5788 }
5789
__kvm_mmu_free_obsolete_roots(struct kvm * kvm,struct kvm_mmu * mmu)5790 static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
5791 {
5792 unsigned long roots_to_free = 0;
5793 int i;
5794
5795 if (is_obsolete_root(kvm, mmu->root.hpa))
5796 roots_to_free |= KVM_MMU_ROOT_CURRENT;
5797
5798 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5799 if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa))
5800 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
5801 }
5802
5803 if (roots_to_free)
5804 kvm_mmu_free_roots(kvm, mmu, roots_to_free);
5805 }
5806
kvm_mmu_free_obsolete_roots(struct kvm_vcpu * vcpu)5807 void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
5808 {
5809 __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
5810 __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
5811 }
5812
mmu_pte_write_fetch_gpte(struct kvm_vcpu * vcpu,gpa_t * gpa,int * bytes)5813 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5814 int *bytes)
5815 {
5816 u64 gentry = 0;
5817 int r;
5818
5819 /*
5820 * Assume that the pte write on a page table of the same type
5821 * as the current vcpu paging mode since we update the sptes only
5822 * when they have the same mode.
5823 */
5824 if (is_pae(vcpu) && *bytes == 4) {
5825 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5826 *gpa &= ~(gpa_t)7;
5827 *bytes = 8;
5828 }
5829
5830 if (*bytes == 4 || *bytes == 8) {
5831 r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
5832 if (r)
5833 gentry = 0;
5834 }
5835
5836 return gentry;
5837 }
5838
5839 /*
5840 * If we're seeing too many writes to a page, it may no longer be a page table,
5841 * or we may be forking, in which case it is better to unmap the page.
5842 */
detect_write_flooding(struct kvm_mmu_page * sp)5843 static bool detect_write_flooding(struct kvm_mmu_page *sp)
5844 {
5845 /*
5846 * Skip write-flooding detected for the sp whose level is 1, because
5847 * it can become unsync, then the guest page is not write-protected.
5848 */
5849 if (sp->role.level == PG_LEVEL_4K)
5850 return false;
5851
5852 atomic_inc(&sp->write_flooding_count);
5853 return atomic_read(&sp->write_flooding_count) >= 3;
5854 }
5855
5856 /*
5857 * Misaligned accesses are too much trouble to fix up; also, they usually
5858 * indicate a page is not used as a page table.
5859 */
detect_write_misaligned(struct kvm_mmu_page * sp,gpa_t gpa,int bytes)5860 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
5861 int bytes)
5862 {
5863 unsigned offset, pte_size, misaligned;
5864
5865 offset = offset_in_page(gpa);
5866 pte_size = sp->role.has_4_byte_gpte ? 4 : 8;
5867
5868 /*
5869 * Sometimes, the OS only writes the last one bytes to update status
5870 * bits, for example, in linux, andb instruction is used in clear_bit().
5871 */
5872 if (!(offset & (pte_size - 1)) && bytes == 1)
5873 return false;
5874
5875 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
5876 misaligned |= bytes < 4;
5877
5878 return misaligned;
5879 }
5880
get_written_sptes(struct kvm_mmu_page * sp,gpa_t gpa,int * nspte)5881 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
5882 {
5883 unsigned page_offset, quadrant;
5884 u64 *spte;
5885 int level;
5886
5887 page_offset = offset_in_page(gpa);
5888 level = sp->role.level;
5889 *nspte = 1;
5890 if (sp->role.has_4_byte_gpte) {
5891 page_offset <<= 1; /* 32->64 */
5892 /*
5893 * A 32-bit pde maps 4MB while the shadow pdes map
5894 * only 2MB. So we need to double the offset again
5895 * and zap two pdes instead of one.
5896 */
5897 if (level == PT32_ROOT_LEVEL) {
5898 page_offset &= ~7; /* kill rounding error */
5899 page_offset <<= 1;
5900 *nspte = 2;
5901 }
5902 quadrant = page_offset >> PAGE_SHIFT;
5903 page_offset &= ~PAGE_MASK;
5904 if (quadrant != sp->role.quadrant)
5905 return NULL;
5906 }
5907
5908 spte = &sp->spt[page_offset / sizeof(*spte)];
5909 return spte;
5910 }
5911
kvm_mmu_track_write(struct kvm_vcpu * vcpu,gpa_t gpa,const u8 * new,int bytes)5912 void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
5913 int bytes)
5914 {
5915 gfn_t gfn = gpa >> PAGE_SHIFT;
5916 struct kvm_mmu_page *sp;
5917 LIST_HEAD(invalid_list);
5918 u64 entry, gentry, *spte;
5919 int npte;
5920 bool flush = false;
5921
5922 /*
5923 * When emulating guest writes, ensure the written value is visible to
5924 * any task that is handling page faults before checking whether or not
5925 * KVM is shadowing a guest PTE. This ensures either KVM will create
5926 * the correct SPTE in the page fault handler, or this task will see
5927 * a non-zero indirect_shadow_pages. Pairs with the smp_mb() in
5928 * account_shadowed().
5929 */
5930 smp_mb();
5931 if (!vcpu->kvm->arch.indirect_shadow_pages)
5932 return;
5933
5934 write_lock(&vcpu->kvm->mmu_lock);
5935
5936 gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5937
5938 ++vcpu->kvm->stat.mmu_pte_write;
5939
5940 for_each_gfn_valid_sp_with_gptes(vcpu->kvm, sp, gfn) {
5941 if (detect_write_misaligned(sp, gpa, bytes) ||
5942 detect_write_flooding(sp)) {
5943 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
5944 ++vcpu->kvm->stat.mmu_flooded;
5945 continue;
5946 }
5947
5948 spte = get_written_sptes(sp, gpa, &npte);
5949 if (!spte)
5950 continue;
5951
5952 while (npte--) {
5953 entry = *spte;
5954 mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5955 if (gentry && sp->role.level != PG_LEVEL_4K)
5956 ++vcpu->kvm->stat.mmu_pde_zapped;
5957 if (is_shadow_present_pte(entry))
5958 flush = true;
5959 ++spte;
5960 }
5961 }
5962 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
5963 write_unlock(&vcpu->kvm->mmu_lock);
5964 }
5965
kvm_mmu_page_fault(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,u64 error_code,void * insn,int insn_len)5966 int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5967 void *insn, int insn_len)
5968 {
5969 int r, emulation_type = EMULTYPE_PF;
5970 bool direct = vcpu->arch.mmu->root_role.direct;
5971
5972 if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
5973 return RET_PF_RETRY;
5974
5975 /*
5976 * Except for reserved faults (emulated MMIO is shared-only), set the
5977 * PFERR_PRIVATE_ACCESS flag for software-protected VMs based on the gfn's
5978 * current attributes, which are the source of truth for such VMs. Note,
5979 * this wrong for nested MMUs as the GPA is an L2 GPA, but KVM doesn't
5980 * currently supported nested virtualization (among many other things)
5981 * for software-protected VMs.
5982 */
5983 if (IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) &&
5984 !(error_code & PFERR_RSVD_MASK) &&
5985 vcpu->kvm->arch.vm_type == KVM_X86_SW_PROTECTED_VM &&
5986 kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(cr2_or_gpa)))
5987 error_code |= PFERR_PRIVATE_ACCESS;
5988
5989 r = RET_PF_INVALID;
5990 if (unlikely(error_code & PFERR_RSVD_MASK)) {
5991 if (WARN_ON_ONCE(error_code & PFERR_PRIVATE_ACCESS))
5992 return -EFAULT;
5993
5994 r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5995 if (r == RET_PF_EMULATE)
5996 goto emulate;
5997 }
5998
5999 if (r == RET_PF_INVALID) {
6000 vcpu->stat.pf_taken++;
6001
6002 r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, error_code, false,
6003 &emulation_type, NULL);
6004 if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
6005 return -EIO;
6006 }
6007
6008 if (r < 0)
6009 return r;
6010
6011 if (r == RET_PF_FIXED)
6012 vcpu->stat.pf_fixed++;
6013 else if (r == RET_PF_EMULATE)
6014 vcpu->stat.pf_emulate++;
6015 else if (r == RET_PF_SPURIOUS)
6016 vcpu->stat.pf_spurious++;
6017
6018 if (r != RET_PF_EMULATE)
6019 return 1;
6020
6021 /*
6022 * Before emulating the instruction, check if the error code
6023 * was due to a RO violation while translating the guest page.
6024 * This can occur when using nested virtualization with nested
6025 * paging in both guests. If true, we simply unprotect the page
6026 * and resume the guest.
6027 */
6028 if (vcpu->arch.mmu->root_role.direct &&
6029 (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
6030 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
6031 return 1;
6032 }
6033
6034 /*
6035 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
6036 * optimistically try to just unprotect the page and let the processor
6037 * re-execute the instruction that caused the page fault. Do not allow
6038 * retrying MMIO emulation, as it's not only pointless but could also
6039 * cause us to enter an infinite loop because the processor will keep
6040 * faulting on the non-existent MMIO address. Retrying an instruction
6041 * from a nested guest is also pointless and dangerous as we are only
6042 * explicitly shadowing L1's page tables, i.e. unprotecting something
6043 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
6044 */
6045 if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
6046 emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
6047 emulate:
6048 return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
6049 insn_len);
6050 }
6051 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
6052
kvm_mmu_print_sptes(struct kvm_vcpu * vcpu,gpa_t gpa,const char * msg)6053 void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg)
6054 {
6055 u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
6056 int root_level, leaf, level;
6057
6058 leaf = get_sptes_lockless(vcpu, gpa, sptes, &root_level);
6059 if (unlikely(leaf < 0))
6060 return;
6061
6062 pr_err("%s %llx", msg, gpa);
6063 for (level = root_level; level >= leaf; level--)
6064 pr_cont(", spte[%d] = 0x%llx", level, sptes[level]);
6065 pr_cont("\n");
6066 }
6067 EXPORT_SYMBOL_GPL(kvm_mmu_print_sptes);
6068
__kvm_mmu_invalidate_addr(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,u64 addr,hpa_t root_hpa)6069 static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
6070 u64 addr, hpa_t root_hpa)
6071 {
6072 struct kvm_shadow_walk_iterator iterator;
6073
6074 vcpu_clear_mmio_info(vcpu, addr);
6075
6076 /*
6077 * Walking and synchronizing SPTEs both assume they are operating in
6078 * the context of the current MMU, and would need to be reworked if
6079 * this is ever used to sync the guest_mmu, e.g. to emulate INVEPT.
6080 */
6081 if (WARN_ON_ONCE(mmu != vcpu->arch.mmu))
6082 return;
6083
6084 if (!VALID_PAGE(root_hpa))
6085 return;
6086
6087 write_lock(&vcpu->kvm->mmu_lock);
6088 for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) {
6089 struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep);
6090
6091 if (sp->unsync) {
6092 int ret = kvm_sync_spte(vcpu, sp, iterator.index);
6093
6094 if (ret < 0)
6095 mmu_page_zap_pte(vcpu->kvm, sp, iterator.sptep, NULL);
6096 if (ret)
6097 kvm_flush_remote_tlbs_sptep(vcpu->kvm, iterator.sptep);
6098 }
6099
6100 if (!sp->unsync_children)
6101 break;
6102 }
6103 write_unlock(&vcpu->kvm->mmu_lock);
6104 }
6105
kvm_mmu_invalidate_addr(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,u64 addr,unsigned long roots)6106 void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
6107 u64 addr, unsigned long roots)
6108 {
6109 int i;
6110
6111 WARN_ON_ONCE(roots & ~KVM_MMU_ROOTS_ALL);
6112
6113 /* It's actually a GPA for vcpu->arch.guest_mmu. */
6114 if (mmu != &vcpu->arch.guest_mmu) {
6115 /* INVLPG on a non-canonical address is a NOP according to the SDM. */
6116 if (is_noncanonical_address(addr, vcpu))
6117 return;
6118
6119 kvm_x86_call(flush_tlb_gva)(vcpu, addr);
6120 }
6121
6122 if (!mmu->sync_spte)
6123 return;
6124
6125 if (roots & KVM_MMU_ROOT_CURRENT)
6126 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa);
6127
6128 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
6129 if (roots & KVM_MMU_ROOT_PREVIOUS(i))
6130 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa);
6131 }
6132 }
6133 EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_addr);
6134
kvm_mmu_invlpg(struct kvm_vcpu * vcpu,gva_t gva)6135 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
6136 {
6137 /*
6138 * INVLPG is required to invalidate any global mappings for the VA,
6139 * irrespective of PCID. Blindly sync all roots as it would take
6140 * roughly the same amount of work/time to determine whether any of the
6141 * previous roots have a global mapping.
6142 *
6143 * Mappings not reachable via the current or previous cached roots will
6144 * be synced when switching to that new cr3, so nothing needs to be
6145 * done here for them.
6146 */
6147 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL);
6148 ++vcpu->stat.invlpg;
6149 }
6150 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
6151
6152
kvm_mmu_invpcid_gva(struct kvm_vcpu * vcpu,gva_t gva,unsigned long pcid)6153 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
6154 {
6155 struct kvm_mmu *mmu = vcpu->arch.mmu;
6156 unsigned long roots = 0;
6157 uint i;
6158
6159 if (pcid == kvm_get_active_pcid(vcpu))
6160 roots |= KVM_MMU_ROOT_CURRENT;
6161
6162 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
6163 if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
6164 pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd))
6165 roots |= KVM_MMU_ROOT_PREVIOUS(i);
6166 }
6167
6168 if (roots)
6169 kvm_mmu_invalidate_addr(vcpu, mmu, gva, roots);
6170 ++vcpu->stat.invlpg;
6171
6172 /*
6173 * Mappings not reachable via the current cr3 or the prev_roots will be
6174 * synced when switching to that cr3, so nothing needs to be done here
6175 * for them.
6176 */
6177 }
6178
kvm_configure_mmu(bool enable_tdp,int tdp_forced_root_level,int tdp_max_root_level,int tdp_huge_page_level)6179 void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
6180 int tdp_max_root_level, int tdp_huge_page_level)
6181 {
6182 tdp_enabled = enable_tdp;
6183 tdp_root_level = tdp_forced_root_level;
6184 max_tdp_level = tdp_max_root_level;
6185
6186 #ifdef CONFIG_X86_64
6187 tdp_mmu_enabled = tdp_mmu_allowed && tdp_enabled;
6188 #endif
6189 /*
6190 * max_huge_page_level reflects KVM's MMU capabilities irrespective
6191 * of kernel support, e.g. KVM may be capable of using 1GB pages when
6192 * the kernel is not. But, KVM never creates a page size greater than
6193 * what is used by the kernel for any given HVA, i.e. the kernel's
6194 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
6195 */
6196 if (tdp_enabled)
6197 max_huge_page_level = tdp_huge_page_level;
6198 else if (boot_cpu_has(X86_FEATURE_GBPAGES))
6199 max_huge_page_level = PG_LEVEL_1G;
6200 else
6201 max_huge_page_level = PG_LEVEL_2M;
6202 }
6203 EXPORT_SYMBOL_GPL(kvm_configure_mmu);
6204
6205 /* The return value indicates if tlb flush on all vcpus is needed. */
6206 typedef bool (*slot_rmaps_handler) (struct kvm *kvm,
6207 struct kvm_rmap_head *rmap_head,
6208 const struct kvm_memory_slot *slot);
6209
__walk_slot_rmaps(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,int start_level,int end_level,gfn_t start_gfn,gfn_t end_gfn,bool flush_on_yield,bool flush)6210 static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
6211 const struct kvm_memory_slot *slot,
6212 slot_rmaps_handler fn,
6213 int start_level, int end_level,
6214 gfn_t start_gfn, gfn_t end_gfn,
6215 bool flush_on_yield, bool flush)
6216 {
6217 struct slot_rmap_walk_iterator iterator;
6218
6219 lockdep_assert_held_write(&kvm->mmu_lock);
6220
6221 for_each_slot_rmap_range(slot, start_level, end_level, start_gfn,
6222 end_gfn, &iterator) {
6223 if (iterator.rmap)
6224 flush |= fn(kvm, iterator.rmap, slot);
6225
6226 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6227 if (flush && flush_on_yield) {
6228 kvm_flush_remote_tlbs_range(kvm, start_gfn,
6229 iterator.gfn - start_gfn + 1);
6230 flush = false;
6231 }
6232 cond_resched_rwlock_write(&kvm->mmu_lock);
6233 }
6234 }
6235
6236 return flush;
6237 }
6238
walk_slot_rmaps(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,int start_level,int end_level,bool flush_on_yield)6239 static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
6240 const struct kvm_memory_slot *slot,
6241 slot_rmaps_handler fn,
6242 int start_level, int end_level,
6243 bool flush_on_yield)
6244 {
6245 return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
6246 slot->base_gfn, slot->base_gfn + slot->npages - 1,
6247 flush_on_yield, false);
6248 }
6249
walk_slot_rmaps_4k(struct kvm * kvm,const struct kvm_memory_slot * slot,slot_rmaps_handler fn,bool flush_on_yield)6250 static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
6251 const struct kvm_memory_slot *slot,
6252 slot_rmaps_handler fn,
6253 bool flush_on_yield)
6254 {
6255 return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield);
6256 }
6257
free_mmu_pages(struct kvm_mmu * mmu)6258 static void free_mmu_pages(struct kvm_mmu *mmu)
6259 {
6260 if (!tdp_enabled && mmu->pae_root)
6261 set_memory_encrypted((unsigned long)mmu->pae_root, 1);
6262 free_page((unsigned long)mmu->pae_root);
6263 free_page((unsigned long)mmu->pml4_root);
6264 free_page((unsigned long)mmu->pml5_root);
6265 }
6266
__kvm_mmu_create(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)6267 static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
6268 {
6269 struct page *page;
6270 int i;
6271
6272 mmu->root.hpa = INVALID_PAGE;
6273 mmu->root.pgd = 0;
6274 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
6275 mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
6276
6277 /* vcpu->arch.guest_mmu isn't used when !tdp_enabled. */
6278 if (!tdp_enabled && mmu == &vcpu->arch.guest_mmu)
6279 return 0;
6280
6281 /*
6282 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
6283 * while the PDP table is a per-vCPU construct that's allocated at MMU
6284 * creation. When emulating 32-bit mode, cr3 is only 32 bits even on
6285 * x86_64. Therefore we need to allocate the PDP table in the first
6286 * 4GB of memory, which happens to fit the DMA32 zone. TDP paging
6287 * generally doesn't use PAE paging and can skip allocating the PDP
6288 * table. The main exception, handled here, is SVM's 32-bit NPT. The
6289 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
6290 * KVM; that horror is handled on-demand by mmu_alloc_special_roots().
6291 */
6292 if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
6293 return 0;
6294
6295 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
6296 if (!page)
6297 return -ENOMEM;
6298
6299 mmu->pae_root = page_address(page);
6300
6301 /*
6302 * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
6303 * get the CPU to treat the PDPTEs as encrypted. Decrypt the page so
6304 * that KVM's writes and the CPU's reads get along. Note, this is
6305 * only necessary when using shadow paging, as 64-bit NPT can get at
6306 * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
6307 * by 32-bit kernels (when KVM itself uses 32-bit NPT).
6308 */
6309 if (!tdp_enabled)
6310 set_memory_decrypted((unsigned long)mmu->pae_root, 1);
6311 else
6312 WARN_ON_ONCE(shadow_me_value);
6313
6314 for (i = 0; i < 4; ++i)
6315 mmu->pae_root[i] = INVALID_PAE_ROOT;
6316
6317 return 0;
6318 }
6319
kvm_mmu_create(struct kvm_vcpu * vcpu)6320 int kvm_mmu_create(struct kvm_vcpu *vcpu)
6321 {
6322 int ret;
6323
6324 vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
6325 vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
6326
6327 vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
6328 vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
6329
6330 vcpu->arch.mmu_shadow_page_cache.init_value =
6331 SHADOW_NONPRESENT_VALUE;
6332 if (!vcpu->arch.mmu_shadow_page_cache.init_value)
6333 vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
6334
6335 vcpu->arch.mmu = &vcpu->arch.root_mmu;
6336 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
6337
6338 ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
6339 if (ret)
6340 return ret;
6341
6342 ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
6343 if (ret)
6344 goto fail_allocate_root;
6345
6346 return ret;
6347 fail_allocate_root:
6348 free_mmu_pages(&vcpu->arch.guest_mmu);
6349 return ret;
6350 }
6351
6352 #define BATCH_ZAP_PAGES 10
kvm_zap_obsolete_pages(struct kvm * kvm)6353 static void kvm_zap_obsolete_pages(struct kvm *kvm)
6354 {
6355 struct kvm_mmu_page *sp, *node;
6356 int nr_zapped, batch = 0;
6357 bool unstable;
6358
6359 restart:
6360 list_for_each_entry_safe_reverse(sp, node,
6361 &kvm->arch.active_mmu_pages, link) {
6362 /*
6363 * No obsolete valid page exists before a newly created page
6364 * since active_mmu_pages is a FIFO list.
6365 */
6366 if (!is_obsolete_sp(kvm, sp))
6367 break;
6368
6369 /*
6370 * Invalid pages should never land back on the list of active
6371 * pages. Skip the bogus page, otherwise we'll get stuck in an
6372 * infinite loop if the page gets put back on the list (again).
6373 */
6374 if (WARN_ON_ONCE(sp->role.invalid))
6375 continue;
6376
6377 /*
6378 * No need to flush the TLB since we're only zapping shadow
6379 * pages with an obsolete generation number and all vCPUS have
6380 * loaded a new root, i.e. the shadow pages being zapped cannot
6381 * be in active use by the guest.
6382 */
6383 if (batch >= BATCH_ZAP_PAGES &&
6384 cond_resched_rwlock_write(&kvm->mmu_lock)) {
6385 batch = 0;
6386 goto restart;
6387 }
6388
6389 unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
6390 &kvm->arch.zapped_obsolete_pages, &nr_zapped);
6391 batch += nr_zapped;
6392
6393 if (unstable)
6394 goto restart;
6395 }
6396
6397 /*
6398 * Kick all vCPUs (via remote TLB flush) before freeing the page tables
6399 * to ensure KVM is not in the middle of a lockless shadow page table
6400 * walk, which may reference the pages. The remote TLB flush itself is
6401 * not required and is simply a convenient way to kick vCPUs as needed.
6402 * KVM performs a local TLB flush when allocating a new root (see
6403 * kvm_mmu_load()), and the reload in the caller ensure no vCPUs are
6404 * running with an obsolete MMU.
6405 */
6406 kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
6407 }
6408
6409 /*
6410 * Fast invalidate all shadow pages and use lock-break technique
6411 * to zap obsolete pages.
6412 *
6413 * It's required when memslot is being deleted or VM is being
6414 * destroyed, in these cases, we should ensure that KVM MMU does
6415 * not use any resource of the being-deleted slot or all slots
6416 * after calling the function.
6417 */
kvm_mmu_zap_all_fast(struct kvm * kvm)6418 static void kvm_mmu_zap_all_fast(struct kvm *kvm)
6419 {
6420 lockdep_assert_held(&kvm->slots_lock);
6421
6422 write_lock(&kvm->mmu_lock);
6423 trace_kvm_mmu_zap_all_fast(kvm);
6424
6425 /*
6426 * Toggle mmu_valid_gen between '0' and '1'. Because slots_lock is
6427 * held for the entire duration of zapping obsolete pages, it's
6428 * impossible for there to be multiple invalid generations associated
6429 * with *valid* shadow pages at any given time, i.e. there is exactly
6430 * one valid generation and (at most) one invalid generation.
6431 */
6432 kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
6433
6434 /*
6435 * In order to ensure all vCPUs drop their soon-to-be invalid roots,
6436 * invalidating TDP MMU roots must be done while holding mmu_lock for
6437 * write and in the same critical section as making the reload request,
6438 * e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield.
6439 */
6440 if (tdp_mmu_enabled)
6441 kvm_tdp_mmu_invalidate_all_roots(kvm);
6442
6443 /*
6444 * Notify all vcpus to reload its shadow page table and flush TLB.
6445 * Then all vcpus will switch to new shadow page table with the new
6446 * mmu_valid_gen.
6447 *
6448 * Note: we need to do this under the protection of mmu_lock,
6449 * otherwise, vcpu would purge shadow page but miss tlb flush.
6450 */
6451 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);
6452
6453 kvm_zap_obsolete_pages(kvm);
6454
6455 write_unlock(&kvm->mmu_lock);
6456
6457 /*
6458 * Zap the invalidated TDP MMU roots, all SPTEs must be dropped before
6459 * returning to the caller, e.g. if the zap is in response to a memslot
6460 * deletion, mmu_notifier callbacks will be unable to reach the SPTEs
6461 * associated with the deleted memslot once the update completes, and
6462 * Deferring the zap until the final reference to the root is put would
6463 * lead to use-after-free.
6464 */
6465 if (tdp_mmu_enabled)
6466 kvm_tdp_mmu_zap_invalidated_roots(kvm);
6467 }
6468
kvm_has_zapped_obsolete_pages(struct kvm * kvm)6469 static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
6470 {
6471 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
6472 }
6473
kvm_mmu_init_vm(struct kvm * kvm)6474 void kvm_mmu_init_vm(struct kvm *kvm)
6475 {
6476 kvm->arch.shadow_mmio_value = shadow_mmio_value;
6477 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6478 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
6479 INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
6480 spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
6481
6482 if (tdp_mmu_enabled)
6483 kvm_mmu_init_tdp_mmu(kvm);
6484
6485 kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
6486 kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
6487
6488 kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
6489
6490 kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
6491 kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
6492 }
6493
mmu_free_vm_memory_caches(struct kvm * kvm)6494 static void mmu_free_vm_memory_caches(struct kvm *kvm)
6495 {
6496 kvm_mmu_free_memory_cache(&kvm->arch.split_desc_cache);
6497 kvm_mmu_free_memory_cache(&kvm->arch.split_page_header_cache);
6498 kvm_mmu_free_memory_cache(&kvm->arch.split_shadow_page_cache);
6499 }
6500
kvm_mmu_uninit_vm(struct kvm * kvm)6501 void kvm_mmu_uninit_vm(struct kvm *kvm)
6502 {
6503 if (tdp_mmu_enabled)
6504 kvm_mmu_uninit_tdp_mmu(kvm);
6505
6506 mmu_free_vm_memory_caches(kvm);
6507 }
6508
kvm_rmap_zap_gfn_range(struct kvm * kvm,gfn_t gfn_start,gfn_t gfn_end)6509 static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
6510 {
6511 const struct kvm_memory_slot *memslot;
6512 struct kvm_memslots *slots;
6513 struct kvm_memslot_iter iter;
6514 bool flush = false;
6515 gfn_t start, end;
6516 int i;
6517
6518 if (!kvm_memslots_have_rmaps(kvm))
6519 return flush;
6520
6521 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
6522 slots = __kvm_memslots(kvm, i);
6523
6524 kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
6525 memslot = iter.slot;
6526 start = max(gfn_start, memslot->base_gfn);
6527 end = min(gfn_end, memslot->base_gfn + memslot->npages);
6528 if (WARN_ON_ONCE(start >= end))
6529 continue;
6530
6531 flush = __walk_slot_rmaps(kvm, memslot, __kvm_zap_rmap,
6532 PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
6533 start, end - 1, true, flush);
6534 }
6535 }
6536
6537 return flush;
6538 }
6539
6540 /*
6541 * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
6542 * (not including it)
6543 */
kvm_zap_gfn_range(struct kvm * kvm,gfn_t gfn_start,gfn_t gfn_end)6544 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
6545 {
6546 bool flush;
6547
6548 if (WARN_ON_ONCE(gfn_end <= gfn_start))
6549 return;
6550
6551 write_lock(&kvm->mmu_lock);
6552
6553 kvm_mmu_invalidate_begin(kvm);
6554
6555 kvm_mmu_invalidate_range_add(kvm, gfn_start, gfn_end);
6556
6557 flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
6558
6559 if (tdp_mmu_enabled)
6560 flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush);
6561
6562 if (flush)
6563 kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);
6564
6565 kvm_mmu_invalidate_end(kvm);
6566
6567 write_unlock(&kvm->mmu_lock);
6568 }
6569
slot_rmap_write_protect(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)6570 static bool slot_rmap_write_protect(struct kvm *kvm,
6571 struct kvm_rmap_head *rmap_head,
6572 const struct kvm_memory_slot *slot)
6573 {
6574 return rmap_write_protect(rmap_head, false);
6575 }
6576
kvm_mmu_slot_remove_write_access(struct kvm * kvm,const struct kvm_memory_slot * memslot,int start_level)6577 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
6578 const struct kvm_memory_slot *memslot,
6579 int start_level)
6580 {
6581 if (kvm_memslots_have_rmaps(kvm)) {
6582 write_lock(&kvm->mmu_lock);
6583 walk_slot_rmaps(kvm, memslot, slot_rmap_write_protect,
6584 start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
6585 write_unlock(&kvm->mmu_lock);
6586 }
6587
6588 if (tdp_mmu_enabled) {
6589 read_lock(&kvm->mmu_lock);
6590 kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
6591 read_unlock(&kvm->mmu_lock);
6592 }
6593 }
6594
need_topup(struct kvm_mmu_memory_cache * cache,int min)6595 static inline bool need_topup(struct kvm_mmu_memory_cache *cache, int min)
6596 {
6597 return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
6598 }
6599
need_topup_split_caches_or_resched(struct kvm * kvm)6600 static bool need_topup_split_caches_or_resched(struct kvm *kvm)
6601 {
6602 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
6603 return true;
6604
6605 /*
6606 * In the worst case, SPLIT_DESC_CACHE_MIN_NR_OBJECTS descriptors are needed
6607 * to split a single huge page. Calculating how many are actually needed
6608 * is possible but not worth the complexity.
6609 */
6610 return need_topup(&kvm->arch.split_desc_cache, SPLIT_DESC_CACHE_MIN_NR_OBJECTS) ||
6611 need_topup(&kvm->arch.split_page_header_cache, 1) ||
6612 need_topup(&kvm->arch.split_shadow_page_cache, 1);
6613 }
6614
topup_split_caches(struct kvm * kvm)6615 static int topup_split_caches(struct kvm *kvm)
6616 {
6617 /*
6618 * Allocating rmap list entries when splitting huge pages for nested
6619 * MMUs is uncommon as KVM needs to use a list if and only if there is
6620 * more than one rmap entry for a gfn, i.e. requires an L1 gfn to be
6621 * aliased by multiple L2 gfns and/or from multiple nested roots with
6622 * different roles. Aliasing gfns when using TDP is atypical for VMMs;
6623 * a few gfns are often aliased during boot, e.g. when remapping BIOS,
6624 * but aliasing rarely occurs post-boot or for many gfns. If there is
6625 * only one rmap entry, rmap->val points directly at that one entry and
6626 * doesn't need to allocate a list. Buffer the cache by the default
6627 * capacity so that KVM doesn't have to drop mmu_lock to topup if KVM
6628 * encounters an aliased gfn or two.
6629 */
6630 const int capacity = SPLIT_DESC_CACHE_MIN_NR_OBJECTS +
6631 KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE;
6632 int r;
6633
6634 lockdep_assert_held(&kvm->slots_lock);
6635
6636 r = __kvm_mmu_topup_memory_cache(&kvm->arch.split_desc_cache, capacity,
6637 SPLIT_DESC_CACHE_MIN_NR_OBJECTS);
6638 if (r)
6639 return r;
6640
6641 r = kvm_mmu_topup_memory_cache(&kvm->arch.split_page_header_cache, 1);
6642 if (r)
6643 return r;
6644
6645 return kvm_mmu_topup_memory_cache(&kvm->arch.split_shadow_page_cache, 1);
6646 }
6647
shadow_mmu_get_sp_for_split(struct kvm * kvm,u64 * huge_sptep)6648 static struct kvm_mmu_page *shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *huge_sptep)
6649 {
6650 struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep);
6651 struct shadow_page_caches caches = {};
6652 union kvm_mmu_page_role role;
6653 unsigned int access;
6654 gfn_t gfn;
6655
6656 gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
6657 access = kvm_mmu_page_get_access(huge_sp, spte_index(huge_sptep));
6658
6659 /*
6660 * Note, huge page splitting always uses direct shadow pages, regardless
6661 * of whether the huge page itself is mapped by a direct or indirect
6662 * shadow page, since the huge page region itself is being directly
6663 * mapped with smaller pages.
6664 */
6665 role = kvm_mmu_child_role(huge_sptep, /*direct=*/true, access);
6666
6667 /* Direct SPs do not require a shadowed_info_cache. */
6668 caches.page_header_cache = &kvm->arch.split_page_header_cache;
6669 caches.shadow_page_cache = &kvm->arch.split_shadow_page_cache;
6670
6671 /* Safe to pass NULL for vCPU since requesting a direct SP. */
6672 return __kvm_mmu_get_shadow_page(kvm, NULL, &caches, gfn, role);
6673 }
6674
shadow_mmu_split_huge_page(struct kvm * kvm,const struct kvm_memory_slot * slot,u64 * huge_sptep)6675 static void shadow_mmu_split_huge_page(struct kvm *kvm,
6676 const struct kvm_memory_slot *slot,
6677 u64 *huge_sptep)
6678
6679 {
6680 struct kvm_mmu_memory_cache *cache = &kvm->arch.split_desc_cache;
6681 u64 huge_spte = READ_ONCE(*huge_sptep);
6682 struct kvm_mmu_page *sp;
6683 bool flush = false;
6684 u64 *sptep, spte;
6685 gfn_t gfn;
6686 int index;
6687
6688 sp = shadow_mmu_get_sp_for_split(kvm, huge_sptep);
6689
6690 for (index = 0; index < SPTE_ENT_PER_PAGE; index++) {
6691 sptep = &sp->spt[index];
6692 gfn = kvm_mmu_page_get_gfn(sp, index);
6693
6694 /*
6695 * The SP may already have populated SPTEs, e.g. if this huge
6696 * page is aliased by multiple sptes with the same access
6697 * permissions. These entries are guaranteed to map the same
6698 * gfn-to-pfn translation since the SP is direct, so no need to
6699 * modify them.
6700 *
6701 * However, if a given SPTE points to a lower level page table,
6702 * that lower level page table may only be partially populated.
6703 * Installing such SPTEs would effectively unmap a potion of the
6704 * huge page. Unmapping guest memory always requires a TLB flush
6705 * since a subsequent operation on the unmapped regions would
6706 * fail to detect the need to flush.
6707 */
6708 if (is_shadow_present_pte(*sptep)) {
6709 flush |= !is_last_spte(*sptep, sp->role.level);
6710 continue;
6711 }
6712
6713 spte = make_huge_page_split_spte(kvm, huge_spte, sp->role, index);
6714 mmu_spte_set(sptep, spte);
6715 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);
6716 }
6717
6718 __link_shadow_page(kvm, cache, huge_sptep, sp, flush);
6719 }
6720
shadow_mmu_try_split_huge_page(struct kvm * kvm,const struct kvm_memory_slot * slot,u64 * huge_sptep)6721 static int shadow_mmu_try_split_huge_page(struct kvm *kvm,
6722 const struct kvm_memory_slot *slot,
6723 u64 *huge_sptep)
6724 {
6725 struct kvm_mmu_page *huge_sp = sptep_to_sp(huge_sptep);
6726 int level, r = 0;
6727 gfn_t gfn;
6728 u64 spte;
6729
6730 /* Grab information for the tracepoint before dropping the MMU lock. */
6731 gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
6732 level = huge_sp->role.level;
6733 spte = *huge_sptep;
6734
6735 if (kvm_mmu_available_pages(kvm) <= KVM_MIN_FREE_MMU_PAGES) {
6736 r = -ENOSPC;
6737 goto out;
6738 }
6739
6740 if (need_topup_split_caches_or_resched(kvm)) {
6741 write_unlock(&kvm->mmu_lock);
6742 cond_resched();
6743 /*
6744 * If the topup succeeds, return -EAGAIN to indicate that the
6745 * rmap iterator should be restarted because the MMU lock was
6746 * dropped.
6747 */
6748 r = topup_split_caches(kvm) ?: -EAGAIN;
6749 write_lock(&kvm->mmu_lock);
6750 goto out;
6751 }
6752
6753 shadow_mmu_split_huge_page(kvm, slot, huge_sptep);
6754
6755 out:
6756 trace_kvm_mmu_split_huge_page(gfn, spte, level, r);
6757 return r;
6758 }
6759
shadow_mmu_try_split_huge_pages(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)6760 static bool shadow_mmu_try_split_huge_pages(struct kvm *kvm,
6761 struct kvm_rmap_head *rmap_head,
6762 const struct kvm_memory_slot *slot)
6763 {
6764 struct rmap_iterator iter;
6765 struct kvm_mmu_page *sp;
6766 u64 *huge_sptep;
6767 int r;
6768
6769 restart:
6770 for_each_rmap_spte(rmap_head, &iter, huge_sptep) {
6771 sp = sptep_to_sp(huge_sptep);
6772
6773 /* TDP MMU is enabled, so rmap only contains nested MMU SPs. */
6774 if (WARN_ON_ONCE(!sp->role.guest_mode))
6775 continue;
6776
6777 /* The rmaps should never contain non-leaf SPTEs. */
6778 if (WARN_ON_ONCE(!is_large_pte(*huge_sptep)))
6779 continue;
6780
6781 /* SPs with level >PG_LEVEL_4K should never by unsync. */
6782 if (WARN_ON_ONCE(sp->unsync))
6783 continue;
6784
6785 /* Don't bother splitting huge pages on invalid SPs. */
6786 if (sp->role.invalid)
6787 continue;
6788
6789 r = shadow_mmu_try_split_huge_page(kvm, slot, huge_sptep);
6790
6791 /*
6792 * The split succeeded or needs to be retried because the MMU
6793 * lock was dropped. Either way, restart the iterator to get it
6794 * back into a consistent state.
6795 */
6796 if (!r || r == -EAGAIN)
6797 goto restart;
6798
6799 /* The split failed and shouldn't be retried (e.g. -ENOMEM). */
6800 break;
6801 }
6802
6803 return false;
6804 }
6805
kvm_shadow_mmu_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t start,gfn_t end,int target_level)6806 static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
6807 const struct kvm_memory_slot *slot,
6808 gfn_t start, gfn_t end,
6809 int target_level)
6810 {
6811 int level;
6812
6813 /*
6814 * Split huge pages starting with KVM_MAX_HUGEPAGE_LEVEL and working
6815 * down to the target level. This ensures pages are recursively split
6816 * all the way to the target level. There's no need to split pages
6817 * already at the target level.
6818 */
6819 for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--)
6820 __walk_slot_rmaps(kvm, slot, shadow_mmu_try_split_huge_pages,
6821 level, level, start, end - 1, true, false);
6822 }
6823
6824 /* Must be called with the mmu_lock held in write-mode. */
kvm_mmu_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * memslot,u64 start,u64 end,int target_level)6825 void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
6826 const struct kvm_memory_slot *memslot,
6827 u64 start, u64 end,
6828 int target_level)
6829 {
6830 if (!tdp_mmu_enabled)
6831 return;
6832
6833 if (kvm_memslots_have_rmaps(kvm))
6834 kvm_shadow_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level);
6835
6836 kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, false);
6837
6838 /*
6839 * A TLB flush is unnecessary at this point for the same reasons as in
6840 * kvm_mmu_slot_try_split_huge_pages().
6841 */
6842 }
6843
kvm_mmu_slot_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * memslot,int target_level)6844 void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
6845 const struct kvm_memory_slot *memslot,
6846 int target_level)
6847 {
6848 u64 start = memslot->base_gfn;
6849 u64 end = start + memslot->npages;
6850
6851 if (!tdp_mmu_enabled)
6852 return;
6853
6854 if (kvm_memslots_have_rmaps(kvm)) {
6855 write_lock(&kvm->mmu_lock);
6856 kvm_shadow_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level);
6857 write_unlock(&kvm->mmu_lock);
6858 }
6859
6860 read_lock(&kvm->mmu_lock);
6861 kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, true);
6862 read_unlock(&kvm->mmu_lock);
6863
6864 /*
6865 * No TLB flush is necessary here. KVM will flush TLBs after
6866 * write-protecting and/or clearing dirty on the newly split SPTEs to
6867 * ensure that guest writes are reflected in the dirty log before the
6868 * ioctl to enable dirty logging on this memslot completes. Since the
6869 * split SPTEs retain the write and dirty bits of the huge SPTE, it is
6870 * safe for KVM to decide if a TLB flush is necessary based on the split
6871 * SPTEs.
6872 */
6873 }
6874
kvm_mmu_zap_collapsible_spte(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)6875 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
6876 struct kvm_rmap_head *rmap_head,
6877 const struct kvm_memory_slot *slot)
6878 {
6879 u64 *sptep;
6880 struct rmap_iterator iter;
6881 int need_tlb_flush = 0;
6882 struct kvm_mmu_page *sp;
6883
6884 restart:
6885 for_each_rmap_spte(rmap_head, &iter, sptep) {
6886 sp = sptep_to_sp(sptep);
6887
6888 /*
6889 * We cannot do huge page mapping for indirect shadow pages,
6890 * which are found on the last rmap (level = 1) when not using
6891 * tdp; such shadow pages are synced with the page table in
6892 * the guest, and the guest page table is using 4K page size
6893 * mapping if the indirect sp has level = 1.
6894 */
6895 if (sp->role.direct &&
6896 sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
6897 PG_LEVEL_NUM)) {
6898 kvm_zap_one_rmap_spte(kvm, rmap_head, sptep);
6899
6900 if (kvm_available_flush_remote_tlbs_range())
6901 kvm_flush_remote_tlbs_sptep(kvm, sptep);
6902 else
6903 need_tlb_flush = 1;
6904
6905 goto restart;
6906 }
6907 }
6908
6909 return need_tlb_flush;
6910 }
6911 EXPORT_SYMBOL_GPL(kvm_zap_gfn_range);
6912
kvm_rmap_zap_collapsible_sptes(struct kvm * kvm,const struct kvm_memory_slot * slot)6913 static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
6914 const struct kvm_memory_slot *slot)
6915 {
6916 /*
6917 * Note, use KVM_MAX_HUGEPAGE_LEVEL - 1 since there's no need to zap
6918 * pages that are already mapped at the maximum hugepage level.
6919 */
6920 if (walk_slot_rmaps(kvm, slot, kvm_mmu_zap_collapsible_spte,
6921 PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true))
6922 kvm_flush_remote_tlbs_memslot(kvm, slot);
6923 }
6924
kvm_mmu_zap_collapsible_sptes(struct kvm * kvm,const struct kvm_memory_slot * slot)6925 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
6926 const struct kvm_memory_slot *slot)
6927 {
6928 if (kvm_memslots_have_rmaps(kvm)) {
6929 write_lock(&kvm->mmu_lock);
6930 kvm_rmap_zap_collapsible_sptes(kvm, slot);
6931 write_unlock(&kvm->mmu_lock);
6932 }
6933
6934 if (tdp_mmu_enabled) {
6935 read_lock(&kvm->mmu_lock);
6936 kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
6937 read_unlock(&kvm->mmu_lock);
6938 }
6939 }
6940
kvm_mmu_slot_leaf_clear_dirty(struct kvm * kvm,const struct kvm_memory_slot * memslot)6941 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
6942 const struct kvm_memory_slot *memslot)
6943 {
6944 if (kvm_memslots_have_rmaps(kvm)) {
6945 write_lock(&kvm->mmu_lock);
6946 /*
6947 * Clear dirty bits only on 4k SPTEs since the legacy MMU only
6948 * support dirty logging at a 4k granularity.
6949 */
6950 walk_slot_rmaps_4k(kvm, memslot, __rmap_clear_dirty, false);
6951 write_unlock(&kvm->mmu_lock);
6952 }
6953
6954 if (tdp_mmu_enabled) {
6955 read_lock(&kvm->mmu_lock);
6956 kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
6957 read_unlock(&kvm->mmu_lock);
6958 }
6959
6960 /*
6961 * The caller will flush the TLBs after this function returns.
6962 *
6963 * It's also safe to flush TLBs out of mmu lock here as currently this
6964 * function is only used for dirty logging, in which case flushing TLB
6965 * out of mmu lock also guarantees no dirty pages will be lost in
6966 * dirty_bitmap.
6967 */
6968 }
6969
kvm_mmu_zap_all(struct kvm * kvm)6970 static void kvm_mmu_zap_all(struct kvm *kvm)
6971 {
6972 struct kvm_mmu_page *sp, *node;
6973 LIST_HEAD(invalid_list);
6974 int ign;
6975
6976 write_lock(&kvm->mmu_lock);
6977 restart:
6978 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
6979 if (WARN_ON_ONCE(sp->role.invalid))
6980 continue;
6981 if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
6982 goto restart;
6983 if (cond_resched_rwlock_write(&kvm->mmu_lock))
6984 goto restart;
6985 }
6986
6987 kvm_mmu_commit_zap_page(kvm, &invalid_list);
6988
6989 if (tdp_mmu_enabled)
6990 kvm_tdp_mmu_zap_all(kvm);
6991
6992 write_unlock(&kvm->mmu_lock);
6993 }
6994
kvm_arch_flush_shadow_all(struct kvm * kvm)6995 void kvm_arch_flush_shadow_all(struct kvm *kvm)
6996 {
6997 kvm_mmu_zap_all(kvm);
6998 }
6999
kvm_arch_flush_shadow_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)7000 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
7001 struct kvm_memory_slot *slot)
7002 {
7003 kvm_mmu_zap_all_fast(kvm);
7004 }
7005
kvm_mmu_invalidate_mmio_sptes(struct kvm * kvm,u64 gen)7006 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
7007 {
7008 WARN_ON_ONCE(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
7009
7010 gen &= MMIO_SPTE_GEN_MASK;
7011
7012 /*
7013 * Generation numbers are incremented in multiples of the number of
7014 * address spaces in order to provide unique generations across all
7015 * address spaces. Strip what is effectively the address space
7016 * modifier prior to checking for a wrap of the MMIO generation so
7017 * that a wrap in any address space is detected.
7018 */
7019 gen &= ~((u64)kvm_arch_nr_memslot_as_ids(kvm) - 1);
7020
7021 /*
7022 * The very rare case: if the MMIO generation number has wrapped,
7023 * zap all shadow pages.
7024 */
7025 if (unlikely(gen == 0)) {
7026 kvm_debug_ratelimited("zapping shadow pages for mmio generation wraparound\n");
7027 kvm_mmu_zap_all_fast(kvm);
7028 }
7029 }
7030
mmu_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)7031 static unsigned long mmu_shrink_scan(struct shrinker *shrink,
7032 struct shrink_control *sc)
7033 {
7034 struct kvm *kvm;
7035 int nr_to_scan = sc->nr_to_scan;
7036 unsigned long freed = 0;
7037
7038 mutex_lock(&kvm_lock);
7039
7040 list_for_each_entry(kvm, &vm_list, vm_list) {
7041 int idx;
7042
7043 /*
7044 * Never scan more than sc->nr_to_scan VM instances.
7045 * Will not hit this condition practically since we do not try
7046 * to shrink more than one VM and it is very unlikely to see
7047 * !n_used_mmu_pages so many times.
7048 */
7049 if (!nr_to_scan--)
7050 break;
7051 /*
7052 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
7053 * here. We may skip a VM instance errorneosly, but we do not
7054 * want to shrink a VM that only started to populate its MMU
7055 * anyway.
7056 */
7057 if (!kvm->arch.n_used_mmu_pages &&
7058 !kvm_has_zapped_obsolete_pages(kvm))
7059 continue;
7060
7061 idx = srcu_read_lock(&kvm->srcu);
7062 write_lock(&kvm->mmu_lock);
7063
7064 if (kvm_has_zapped_obsolete_pages(kvm)) {
7065 kvm_mmu_commit_zap_page(kvm,
7066 &kvm->arch.zapped_obsolete_pages);
7067 goto unlock;
7068 }
7069
7070 freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
7071
7072 unlock:
7073 write_unlock(&kvm->mmu_lock);
7074 srcu_read_unlock(&kvm->srcu, idx);
7075
7076 /*
7077 * unfair on small ones
7078 * per-vm shrinkers cry out
7079 * sadness comes quickly
7080 */
7081 list_move_tail(&kvm->vm_list, &vm_list);
7082 break;
7083 }
7084
7085 mutex_unlock(&kvm_lock);
7086 return freed;
7087 }
7088
mmu_shrink_count(struct shrinker * shrink,struct shrink_control * sc)7089 static unsigned long mmu_shrink_count(struct shrinker *shrink,
7090 struct shrink_control *sc)
7091 {
7092 return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
7093 }
7094
7095 static struct shrinker *mmu_shrinker;
7096
mmu_destroy_caches(void)7097 static void mmu_destroy_caches(void)
7098 {
7099 kmem_cache_destroy(pte_list_desc_cache);
7100 kmem_cache_destroy(mmu_page_header_cache);
7101 }
7102
get_nx_huge_pages(char * buffer,const struct kernel_param * kp)7103 static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
7104 {
7105 if (nx_hugepage_mitigation_hard_disabled)
7106 return sysfs_emit(buffer, "never\n");
7107
7108 return param_get_bool(buffer, kp);
7109 }
7110
get_nx_auto_mode(void)7111 static bool get_nx_auto_mode(void)
7112 {
7113 /* Return true when CPU has the bug, and mitigations are ON */
7114 return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
7115 }
7116
__set_nx_huge_pages(bool val)7117 static void __set_nx_huge_pages(bool val)
7118 {
7119 nx_huge_pages = itlb_multihit_kvm_mitigation = val;
7120 }
7121
set_nx_huge_pages(const char * val,const struct kernel_param * kp)7122 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
7123 {
7124 bool old_val = nx_huge_pages;
7125 bool new_val;
7126
7127 if (nx_hugepage_mitigation_hard_disabled)
7128 return -EPERM;
7129
7130 /* In "auto" mode deploy workaround only if CPU has the bug. */
7131 if (sysfs_streq(val, "off")) {
7132 new_val = 0;
7133 } else if (sysfs_streq(val, "force")) {
7134 new_val = 1;
7135 } else if (sysfs_streq(val, "auto")) {
7136 new_val = get_nx_auto_mode();
7137 } else if (sysfs_streq(val, "never")) {
7138 new_val = 0;
7139
7140 mutex_lock(&kvm_lock);
7141 if (!list_empty(&vm_list)) {
7142 mutex_unlock(&kvm_lock);
7143 return -EBUSY;
7144 }
7145 nx_hugepage_mitigation_hard_disabled = true;
7146 mutex_unlock(&kvm_lock);
7147 } else if (kstrtobool(val, &new_val) < 0) {
7148 return -EINVAL;
7149 }
7150
7151 __set_nx_huge_pages(new_val);
7152
7153 if (new_val != old_val) {
7154 struct kvm *kvm;
7155
7156 mutex_lock(&kvm_lock);
7157
7158 list_for_each_entry(kvm, &vm_list, vm_list) {
7159 mutex_lock(&kvm->slots_lock);
7160 kvm_mmu_zap_all_fast(kvm);
7161 mutex_unlock(&kvm->slots_lock);
7162
7163 wake_up_process(kvm->arch.nx_huge_page_recovery_thread);
7164 }
7165 mutex_unlock(&kvm_lock);
7166 }
7167
7168 return 0;
7169 }
7170
7171 /*
7172 * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
7173 * its default value of -1 is technically undefined behavior for a boolean.
7174 * Forward the module init call to SPTE code so that it too can handle module
7175 * params that need to be resolved/snapshot.
7176 */
kvm_mmu_x86_module_init(void)7177 void __init kvm_mmu_x86_module_init(void)
7178 {
7179 if (nx_huge_pages == -1)
7180 __set_nx_huge_pages(get_nx_auto_mode());
7181
7182 /*
7183 * Snapshot userspace's desire to enable the TDP MMU. Whether or not the
7184 * TDP MMU is actually enabled is determined in kvm_configure_mmu()
7185 * when the vendor module is loaded.
7186 */
7187 tdp_mmu_allowed = tdp_mmu_enabled;
7188
7189 kvm_mmu_spte_module_init();
7190 }
7191
7192 /*
7193 * The bulk of the MMU initialization is deferred until the vendor module is
7194 * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
7195 * to be reset when a potentially different vendor module is loaded.
7196 */
kvm_mmu_vendor_module_init(void)7197 int kvm_mmu_vendor_module_init(void)
7198 {
7199 int ret = -ENOMEM;
7200
7201 /*
7202 * MMU roles use union aliasing which is, generally speaking, an
7203 * undefined behavior. However, we supposedly know how compilers behave
7204 * and the current status quo is unlikely to change. Guardians below are
7205 * supposed to let us know if the assumption becomes false.
7206 */
7207 BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
7208 BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
7209 BUILD_BUG_ON(sizeof(union kvm_cpu_role) != sizeof(u64));
7210
7211 kvm_mmu_reset_all_pte_masks();
7212
7213 pte_list_desc_cache = KMEM_CACHE(pte_list_desc, SLAB_ACCOUNT);
7214 if (!pte_list_desc_cache)
7215 goto out;
7216
7217 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
7218 sizeof(struct kvm_mmu_page),
7219 0, SLAB_ACCOUNT, NULL);
7220 if (!mmu_page_header_cache)
7221 goto out;
7222
7223 if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
7224 goto out;
7225
7226 mmu_shrinker = shrinker_alloc(0, "x86-mmu");
7227 if (!mmu_shrinker)
7228 goto out_shrinker;
7229
7230 mmu_shrinker->count_objects = mmu_shrink_count;
7231 mmu_shrinker->scan_objects = mmu_shrink_scan;
7232 mmu_shrinker->seeks = DEFAULT_SEEKS * 10;
7233
7234 shrinker_register(mmu_shrinker);
7235
7236 return 0;
7237
7238 out_shrinker:
7239 percpu_counter_destroy(&kvm_total_used_mmu_pages);
7240 out:
7241 mmu_destroy_caches();
7242 return ret;
7243 }
7244
kvm_mmu_destroy(struct kvm_vcpu * vcpu)7245 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
7246 {
7247 kvm_mmu_unload(vcpu);
7248 free_mmu_pages(&vcpu->arch.root_mmu);
7249 free_mmu_pages(&vcpu->arch.guest_mmu);
7250 mmu_free_memory_caches(vcpu);
7251 }
7252
kvm_mmu_vendor_module_exit(void)7253 void kvm_mmu_vendor_module_exit(void)
7254 {
7255 mmu_destroy_caches();
7256 percpu_counter_destroy(&kvm_total_used_mmu_pages);
7257 shrinker_free(mmu_shrinker);
7258 }
7259
7260 /*
7261 * Calculate the effective recovery period, accounting for '0' meaning "let KVM
7262 * select a halving time of 1 hour". Returns true if recovery is enabled.
7263 */
calc_nx_huge_pages_recovery_period(uint * period)7264 static bool calc_nx_huge_pages_recovery_period(uint *period)
7265 {
7266 /*
7267 * Use READ_ONCE to get the params, this may be called outside of the
7268 * param setters, e.g. by the kthread to compute its next timeout.
7269 */
7270 bool enabled = READ_ONCE(nx_huge_pages);
7271 uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
7272
7273 if (!enabled || !ratio)
7274 return false;
7275
7276 *period = READ_ONCE(nx_huge_pages_recovery_period_ms);
7277 if (!*period) {
7278 /* Make sure the period is not less than one second. */
7279 ratio = min(ratio, 3600u);
7280 *period = 60 * 60 * 1000 / ratio;
7281 }
7282 return true;
7283 }
7284
set_nx_huge_pages_recovery_param(const char * val,const struct kernel_param * kp)7285 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
7286 {
7287 bool was_recovery_enabled, is_recovery_enabled;
7288 uint old_period, new_period;
7289 int err;
7290
7291 if (nx_hugepage_mitigation_hard_disabled)
7292 return -EPERM;
7293
7294 was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
7295
7296 err = param_set_uint(val, kp);
7297 if (err)
7298 return err;
7299
7300 is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
7301
7302 if (is_recovery_enabled &&
7303 (!was_recovery_enabled || old_period > new_period)) {
7304 struct kvm *kvm;
7305
7306 mutex_lock(&kvm_lock);
7307
7308 list_for_each_entry(kvm, &vm_list, vm_list)
7309 wake_up_process(kvm->arch.nx_huge_page_recovery_thread);
7310
7311 mutex_unlock(&kvm_lock);
7312 }
7313
7314 return err;
7315 }
7316
kvm_recover_nx_huge_pages(struct kvm * kvm)7317 static void kvm_recover_nx_huge_pages(struct kvm *kvm)
7318 {
7319 unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
7320 struct kvm_memory_slot *slot;
7321 int rcu_idx;
7322 struct kvm_mmu_page *sp;
7323 unsigned int ratio;
7324 LIST_HEAD(invalid_list);
7325 bool flush = false;
7326 ulong to_zap;
7327
7328 rcu_idx = srcu_read_lock(&kvm->srcu);
7329 write_lock(&kvm->mmu_lock);
7330
7331 /*
7332 * Zapping TDP MMU shadow pages, including the remote TLB flush, must
7333 * be done under RCU protection, because the pages are freed via RCU
7334 * callback.
7335 */
7336 rcu_read_lock();
7337
7338 ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
7339 to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
7340 for ( ; to_zap; --to_zap) {
7341 if (list_empty(&kvm->arch.possible_nx_huge_pages))
7342 break;
7343
7344 /*
7345 * We use a separate list instead of just using active_mmu_pages
7346 * because the number of shadow pages that be replaced with an
7347 * NX huge page is expected to be relatively small compared to
7348 * the total number of shadow pages. And because the TDP MMU
7349 * doesn't use active_mmu_pages.
7350 */
7351 sp = list_first_entry(&kvm->arch.possible_nx_huge_pages,
7352 struct kvm_mmu_page,
7353 possible_nx_huge_page_link);
7354 WARN_ON_ONCE(!sp->nx_huge_page_disallowed);
7355 WARN_ON_ONCE(!sp->role.direct);
7356
7357 /*
7358 * Unaccount and do not attempt to recover any NX Huge Pages
7359 * that are being dirty tracked, as they would just be faulted
7360 * back in as 4KiB pages. The NX Huge Pages in this slot will be
7361 * recovered, along with all the other huge pages in the slot,
7362 * when dirty logging is disabled.
7363 *
7364 * Since gfn_to_memslot() is relatively expensive, it helps to
7365 * skip it if it the test cannot possibly return true. On the
7366 * other hand, if any memslot has logging enabled, chances are
7367 * good that all of them do, in which case unaccount_nx_huge_page()
7368 * is much cheaper than zapping the page.
7369 *
7370 * If a memslot update is in progress, reading an incorrect value
7371 * of kvm->nr_memslots_dirty_logging is not a problem: if it is
7372 * becoming zero, gfn_to_memslot() will be done unnecessarily; if
7373 * it is becoming nonzero, the page will be zapped unnecessarily.
7374 * Either way, this only affects efficiency in racy situations,
7375 * and not correctness.
7376 */
7377 slot = NULL;
7378 if (atomic_read(&kvm->nr_memslots_dirty_logging)) {
7379 struct kvm_memslots *slots;
7380
7381 slots = kvm_memslots_for_spte_role(kvm, sp->role);
7382 slot = __gfn_to_memslot(slots, sp->gfn);
7383 WARN_ON_ONCE(!slot);
7384 }
7385
7386 if (slot && kvm_slot_dirty_track_enabled(slot))
7387 unaccount_nx_huge_page(kvm, sp);
7388 else if (is_tdp_mmu_page(sp))
7389 flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
7390 else
7391 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
7392 WARN_ON_ONCE(sp->nx_huge_page_disallowed);
7393
7394 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7395 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7396 rcu_read_unlock();
7397
7398 cond_resched_rwlock_write(&kvm->mmu_lock);
7399 flush = false;
7400
7401 rcu_read_lock();
7402 }
7403 }
7404 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7405
7406 rcu_read_unlock();
7407
7408 write_unlock(&kvm->mmu_lock);
7409 srcu_read_unlock(&kvm->srcu, rcu_idx);
7410 }
7411
get_nx_huge_page_recovery_timeout(u64 start_time)7412 static long get_nx_huge_page_recovery_timeout(u64 start_time)
7413 {
7414 bool enabled;
7415 uint period;
7416
7417 enabled = calc_nx_huge_pages_recovery_period(&period);
7418
7419 return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
7420 : MAX_SCHEDULE_TIMEOUT;
7421 }
7422
kvm_nx_huge_page_recovery_worker(struct kvm * kvm,uintptr_t data)7423 static int kvm_nx_huge_page_recovery_worker(struct kvm *kvm, uintptr_t data)
7424 {
7425 u64 start_time;
7426 long remaining_time;
7427
7428 while (true) {
7429 start_time = get_jiffies_64();
7430 remaining_time = get_nx_huge_page_recovery_timeout(start_time);
7431
7432 set_current_state(TASK_INTERRUPTIBLE);
7433 while (!kthread_should_stop() && remaining_time > 0) {
7434 schedule_timeout(remaining_time);
7435 remaining_time = get_nx_huge_page_recovery_timeout(start_time);
7436 set_current_state(TASK_INTERRUPTIBLE);
7437 }
7438
7439 set_current_state(TASK_RUNNING);
7440
7441 if (kthread_should_stop())
7442 return 0;
7443
7444 kvm_recover_nx_huge_pages(kvm);
7445 }
7446 }
7447
kvm_mmu_post_init_vm(struct kvm * kvm)7448 int kvm_mmu_post_init_vm(struct kvm *kvm)
7449 {
7450 int err;
7451
7452 if (nx_hugepage_mitigation_hard_disabled)
7453 return 0;
7454
7455 err = kvm_vm_create_worker_thread(kvm, kvm_nx_huge_page_recovery_worker, 0,
7456 "kvm-nx-lpage-recovery",
7457 &kvm->arch.nx_huge_page_recovery_thread);
7458 if (!err)
7459 kthread_unpark(kvm->arch.nx_huge_page_recovery_thread);
7460
7461 return err;
7462 }
7463
kvm_mmu_pre_destroy_vm(struct kvm * kvm)7464 void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
7465 {
7466 if (kvm->arch.nx_huge_page_recovery_thread)
7467 kthread_stop(kvm->arch.nx_huge_page_recovery_thread);
7468 }
7469
7470 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
kvm_arch_pre_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range)7471 bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
7472 struct kvm_gfn_range *range)
7473 {
7474 /*
7475 * Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
7476 * supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
7477 * can simply ignore such slots. But if userspace is making memory
7478 * PRIVATE, then KVM must prevent the guest from accessing the memory
7479 * as shared. And if userspace is making memory SHARED and this point
7480 * is reached, then at least one page within the range was previously
7481 * PRIVATE, i.e. the slot's possible hugepage ranges are changing.
7482 * Zapping SPTEs in this case ensures KVM will reassess whether or not
7483 * a hugepage can be used for affected ranges.
7484 */
7485 if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
7486 return false;
7487
7488 return kvm_unmap_gfn_range(kvm, range);
7489 }
7490
hugepage_test_mixed(struct kvm_memory_slot * slot,gfn_t gfn,int level)7491 static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7492 int level)
7493 {
7494 return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
7495 }
7496
hugepage_clear_mixed(struct kvm_memory_slot * slot,gfn_t gfn,int level)7497 static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7498 int level)
7499 {
7500 lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
7501 }
7502
hugepage_set_mixed(struct kvm_memory_slot * slot,gfn_t gfn,int level)7503 static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7504 int level)
7505 {
7506 lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
7507 }
7508
hugepage_has_attrs(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,int level,unsigned long attrs)7509 static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
7510 gfn_t gfn, int level, unsigned long attrs)
7511 {
7512 const unsigned long start = gfn;
7513 const unsigned long end = start + KVM_PAGES_PER_HPAGE(level);
7514
7515 if (level == PG_LEVEL_2M)
7516 return kvm_range_has_memory_attributes(kvm, start, end, ~0, attrs);
7517
7518 for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
7519 if (hugepage_test_mixed(slot, gfn, level - 1) ||
7520 attrs != kvm_get_memory_attributes(kvm, gfn))
7521 return false;
7522 }
7523 return true;
7524 }
7525
kvm_arch_post_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range)7526 bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
7527 struct kvm_gfn_range *range)
7528 {
7529 unsigned long attrs = range->arg.attributes;
7530 struct kvm_memory_slot *slot = range->slot;
7531 int level;
7532
7533 lockdep_assert_held_write(&kvm->mmu_lock);
7534 lockdep_assert_held(&kvm->slots_lock);
7535
7536 /*
7537 * Calculate which ranges can be mapped with hugepages even if the slot
7538 * can't map memory PRIVATE. KVM mustn't create a SHARED hugepage over
7539 * a range that has PRIVATE GFNs, and conversely converting a range to
7540 * SHARED may now allow hugepages.
7541 */
7542 if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
7543 return false;
7544
7545 /*
7546 * The sequence matters here: upper levels consume the result of lower
7547 * level's scanning.
7548 */
7549 for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
7550 gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
7551 gfn_t gfn = gfn_round_for_level(range->start, level);
7552
7553 /* Process the head page if it straddles the range. */
7554 if (gfn != range->start || gfn + nr_pages > range->end) {
7555 /*
7556 * Skip mixed tracking if the aligned gfn isn't covered
7557 * by the memslot, KVM can't use a hugepage due to the
7558 * misaligned address regardless of memory attributes.
7559 */
7560 if (gfn >= slot->base_gfn &&
7561 gfn + nr_pages <= slot->base_gfn + slot->npages) {
7562 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7563 hugepage_clear_mixed(slot, gfn, level);
7564 else
7565 hugepage_set_mixed(slot, gfn, level);
7566 }
7567 gfn += nr_pages;
7568 }
7569
7570 /*
7571 * Pages entirely covered by the range are guaranteed to have
7572 * only the attributes which were just set.
7573 */
7574 for ( ; gfn + nr_pages <= range->end; gfn += nr_pages)
7575 hugepage_clear_mixed(slot, gfn, level);
7576
7577 /*
7578 * Process the last tail page if it straddles the range and is
7579 * contained by the memslot. Like the head page, KVM can't
7580 * create a hugepage if the slot size is misaligned.
7581 */
7582 if (gfn < range->end &&
7583 (gfn + nr_pages) <= (slot->base_gfn + slot->npages)) {
7584 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7585 hugepage_clear_mixed(slot, gfn, level);
7586 else
7587 hugepage_set_mixed(slot, gfn, level);
7588 }
7589 }
7590 return false;
7591 }
7592
kvm_mmu_init_memslot_memory_attributes(struct kvm * kvm,struct kvm_memory_slot * slot)7593 void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
7594 struct kvm_memory_slot *slot)
7595 {
7596 int level;
7597
7598 if (!kvm_arch_has_private_mem(kvm))
7599 return;
7600
7601 for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
7602 /*
7603 * Don't bother tracking mixed attributes for pages that can't
7604 * be huge due to alignment, i.e. process only pages that are
7605 * entirely contained by the memslot.
7606 */
7607 gfn_t end = gfn_round_for_level(slot->base_gfn + slot->npages, level);
7608 gfn_t start = gfn_round_for_level(slot->base_gfn, level);
7609 gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
7610 gfn_t gfn;
7611
7612 if (start < slot->base_gfn)
7613 start += nr_pages;
7614
7615 /*
7616 * Unlike setting attributes, every potential hugepage needs to
7617 * be manually checked as the attributes may already be mixed.
7618 */
7619 for (gfn = start; gfn < end; gfn += nr_pages) {
7620 unsigned long attrs = kvm_get_memory_attributes(kvm, gfn);
7621
7622 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7623 hugepage_clear_mixed(slot, gfn, level);
7624 else
7625 hugepage_set_mixed(slot, gfn, level);
7626 }
7627 }
7628 }
7629 #endif
7630