mmu.c (6f8e65a60168567cc59f9b99980ea9112d4152f5) mmu.c (897218ff7cf19290ec2d69652ce673d8ed6fedeb)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * MMU support

--- 1211 unchanged lines hidden (view full) ---

1220 * logging we do not have any such mappings.
1221 */
1222static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1223 struct kvm_memory_slot *slot,
1224 gfn_t gfn_offset, unsigned long mask)
1225{
1226 struct kvm_rmap_head *rmap_head;
1227
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * MMU support

--- 1211 unchanged lines hidden (view full) ---

1220 * logging we do not have any such mappings.
1221 */
1222static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1223 struct kvm_memory_slot *slot,
1224 gfn_t gfn_offset, unsigned long mask)
1225{
1226 struct kvm_rmap_head *rmap_head;
1227
1228 if (kvm->arch.tdp_mmu_enabled)
1228 if (is_tdp_mmu_enabled(kvm))
1229 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1230 slot->base_gfn + gfn_offset, mask, true);
1231 while (mask) {
1232 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1233 PG_LEVEL_4K, slot);
1234 __rmap_write_protect(kvm, rmap_head, false);
1235
1236 /* clear the first set bit */

--- 12 unchanged lines hidden (view full) ---

1249 * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1250 */
1251void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1252 struct kvm_memory_slot *slot,
1253 gfn_t gfn_offset, unsigned long mask)
1254{
1255 struct kvm_rmap_head *rmap_head;
1256
1229 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1230 slot->base_gfn + gfn_offset, mask, true);
1231 while (mask) {
1232 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1233 PG_LEVEL_4K, slot);
1234 __rmap_write_protect(kvm, rmap_head, false);
1235
1236 /* clear the first set bit */

--- 12 unchanged lines hidden (view full) ---

1249 * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1250 */
1251void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1252 struct kvm_memory_slot *slot,
1253 gfn_t gfn_offset, unsigned long mask)
1254{
1255 struct kvm_rmap_head *rmap_head;
1256
1257 if (kvm->arch.tdp_mmu_enabled)
1257 if (is_tdp_mmu_enabled(kvm))
1258 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1259 slot->base_gfn + gfn_offset, mask, false);
1260 while (mask) {
1261 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1262 PG_LEVEL_4K, slot);
1263 __rmap_clear_dirty(kvm, rmap_head);
1264
1265 /* clear the first set bit */

--- 39 unchanged lines hidden (view full) ---

1305 int i;
1306 bool write_protected = false;
1307
1308 for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1309 rmap_head = __gfn_to_rmap(gfn, i, slot);
1310 write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1311 }
1312
1258 kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1259 slot->base_gfn + gfn_offset, mask, false);
1260 while (mask) {
1261 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1262 PG_LEVEL_4K, slot);
1263 __rmap_clear_dirty(kvm, rmap_head);
1264
1265 /* clear the first set bit */

--- 39 unchanged lines hidden (view full) ---

1305 int i;
1306 bool write_protected = false;
1307
1308 for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1309 rmap_head = __gfn_to_rmap(gfn, i, slot);
1310 write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1311 }
1312
1313 if (kvm->arch.tdp_mmu_enabled)
1313 if (is_tdp_mmu_enabled(kvm))
1314 write_protected |=
1315 kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
1316
1317 return write_protected;
1318}
1319
1320static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
1321{

--- 195 unchanged lines hidden (view full) ---

1517
1518int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
1519 unsigned flags)
1520{
1521 int r;
1522
1523 r = kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
1524
1314 write_protected |=
1315 kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
1316
1317 return write_protected;
1318}
1319
1320static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
1321{

--- 195 unchanged lines hidden (view full) ---

1517
1518int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
1519 unsigned flags)
1520{
1521 int r;
1522
1523 r = kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
1524
1525 if (kvm->arch.tdp_mmu_enabled)
1525 if (is_tdp_mmu_enabled(kvm))
1526 r |= kvm_tdp_mmu_zap_hva_range(kvm, start, end);
1527
1528 return r;
1529}
1530
1531int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1532{
1533 int r;
1534
1535 r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
1536
1526 r |= kvm_tdp_mmu_zap_hva_range(kvm, start, end);
1527
1528 return r;
1529}
1530
1531int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1532{
1533 int r;
1534
1535 r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
1536
1537 if (kvm->arch.tdp_mmu_enabled)
1537 if (is_tdp_mmu_enabled(kvm))
1538 r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte);
1539
1540 return r;
1541}
1542
1543static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1544 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1545 unsigned long data)

--- 38 unchanged lines hidden (view full) ---

1584 KVM_PAGES_PER_HPAGE(sp->role.level));
1585}
1586
1587int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1588{
1589 int young = false;
1590
1591 young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
1538 r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte);
1539
1540 return r;
1541}
1542
1543static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1544 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1545 unsigned long data)

--- 38 unchanged lines hidden (view full) ---

1584 KVM_PAGES_PER_HPAGE(sp->role.level));
1585}
1586
1587int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1588{
1589 int young = false;
1590
1591 young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
1592 if (kvm->arch.tdp_mmu_enabled)
1592 if (is_tdp_mmu_enabled(kvm))
1593 young |= kvm_tdp_mmu_age_hva_range(kvm, start, end);
1594
1595 return young;
1596}
1597
1598int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1599{
1600 int young = false;
1601
1602 young = kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
1593 young |= kvm_tdp_mmu_age_hva_range(kvm, start, end);
1594
1595 return young;
1596}
1597
1598int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1599{
1600 int young = false;
1601
1602 young = kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
1603 if (kvm->arch.tdp_mmu_enabled)
1603 if (is_tdp_mmu_enabled(kvm))
1604 young |= kvm_tdp_mmu_test_age_hva(kvm, hva);
1605
1606 return young;
1607}
1608
1609#ifdef MMU_DEBUG
1610static int is_empty_shadow_page(u64 *spt)
1611{

--- 1538 unchanged lines hidden (view full) ---

3150 struct kvm_mmu_page *sp;
3151
3152 if (!VALID_PAGE(*root_hpa))
3153 return;
3154
3155 sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
3156
3157 if (kvm_mmu_put_root(kvm, sp)) {
1604 young |= kvm_tdp_mmu_test_age_hva(kvm, hva);
1605
1606 return young;
1607}
1608
1609#ifdef MMU_DEBUG
1610static int is_empty_shadow_page(u64 *spt)
1611{

--- 1538 unchanged lines hidden (view full) ---

3150 struct kvm_mmu_page *sp;
3151
3152 if (!VALID_PAGE(*root_hpa))
3153 return;
3154
3155 sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
3156
3157 if (kvm_mmu_put_root(kvm, sp)) {
3158 if (sp->tdp_mmu_page)
3158 if (is_tdp_mmu_page(sp))
3159 kvm_tdp_mmu_free_root(kvm, sp);
3160 else if (sp->role.invalid)
3161 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3162 }
3163
3164 *root_hpa = INVALID_PAGE;
3165}
3166

--- 77 unchanged lines hidden (view full) ---

3244}
3245
3246static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3247{
3248 u8 shadow_root_level = vcpu->arch.mmu->shadow_root_level;
3249 hpa_t root;
3250 unsigned i;
3251
3159 kvm_tdp_mmu_free_root(kvm, sp);
3160 else if (sp->role.invalid)
3161 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3162 }
3163
3164 *root_hpa = INVALID_PAGE;
3165}
3166

--- 77 unchanged lines hidden (view full) ---

3244}
3245
3246static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3247{
3248 u8 shadow_root_level = vcpu->arch.mmu->shadow_root_level;
3249 hpa_t root;
3250 unsigned i;
3251
3252 if (vcpu->kvm->arch.tdp_mmu_enabled) {
3252 if (is_tdp_mmu_enabled(vcpu->kvm)) {
3253 root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3254
3255 if (!VALID_PAGE(root))
3256 return -ENOSPC;
3257 vcpu->arch.mmu->root_hpa = root;
3258 } else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3259 root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level,
3260 true);

--- 2145 unchanged lines hidden (view full) ---

5406 *
5407 * Note: we need to do this under the protection of mmu_lock,
5408 * otherwise, vcpu would purge shadow page but miss tlb flush.
5409 */
5410 kvm_reload_remote_mmus(kvm);
5411
5412 kvm_zap_obsolete_pages(kvm);
5413
3253 root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3254
3255 if (!VALID_PAGE(root))
3256 return -ENOSPC;
3257 vcpu->arch.mmu->root_hpa = root;
3258 } else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3259 root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level,
3260 true);

--- 2145 unchanged lines hidden (view full) ---

5406 *
5407 * Note: we need to do this under the protection of mmu_lock,
5408 * otherwise, vcpu would purge shadow page but miss tlb flush.
5409 */
5410 kvm_reload_remote_mmus(kvm);
5411
5412 kvm_zap_obsolete_pages(kvm);
5413
5414 if (kvm->arch.tdp_mmu_enabled)
5414 if (is_tdp_mmu_enabled(kvm))
5415 kvm_tdp_mmu_zap_all(kvm);
5416
5417 write_unlock(&kvm->mmu_lock);
5418}
5419
5420static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5421{
5422 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));

--- 46 unchanged lines hidden (view full) ---

5469
5470 slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
5471 PG_LEVEL_4K,
5472 KVM_MAX_HUGEPAGE_LEVEL,
5473 start, end - 1, true);
5474 }
5475 }
5476
5415 kvm_tdp_mmu_zap_all(kvm);
5416
5417 write_unlock(&kvm->mmu_lock);
5418}
5419
5420static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5421{
5422 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));

--- 46 unchanged lines hidden (view full) ---

5469
5470 slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
5471 PG_LEVEL_4K,
5472 KVM_MAX_HUGEPAGE_LEVEL,
5473 start, end - 1, true);
5474 }
5475 }
5476
5477 if (kvm->arch.tdp_mmu_enabled) {
5477 if (is_tdp_mmu_enabled(kvm)) {
5478 flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end);
5479 if (flush)
5480 kvm_flush_remote_tlbs(kvm);
5481 }
5482
5483 write_unlock(&kvm->mmu_lock);
5484}
5485

--- 7 unchanged lines hidden (view full) ---

5493 struct kvm_memory_slot *memslot,
5494 int start_level)
5495{
5496 bool flush;
5497
5498 write_lock(&kvm->mmu_lock);
5499 flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
5500 start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
5478 flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end);
5479 if (flush)
5480 kvm_flush_remote_tlbs(kvm);
5481 }
5482
5483 write_unlock(&kvm->mmu_lock);
5484}
5485

--- 7 unchanged lines hidden (view full) ---

5493 struct kvm_memory_slot *memslot,
5494 int start_level)
5495{
5496 bool flush;
5497
5498 write_lock(&kvm->mmu_lock);
5499 flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
5500 start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
5501 if (kvm->arch.tdp_mmu_enabled)
5501 if (is_tdp_mmu_enabled(kvm))
5502 flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K);
5503 write_unlock(&kvm->mmu_lock);
5504
5505 /*
5506 * We can flush all the TLBs out of the mmu lock without TLB
5507 * corruption since we just change the spte from writable to
5508 * readonly so that we only need to care the case of changing
5509 * spte from present to present (changing the spte from present

--- 49 unchanged lines hidden (view full) ---

5559void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5560 const struct kvm_memory_slot *memslot)
5561{
5562 /* FIXME: const-ify all uses of struct kvm_memory_slot. */
5563 write_lock(&kvm->mmu_lock);
5564 slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
5565 kvm_mmu_zap_collapsible_spte, true);
5566
5502 flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K);
5503 write_unlock(&kvm->mmu_lock);
5504
5505 /*
5506 * We can flush all the TLBs out of the mmu lock without TLB
5507 * corruption since we just change the spte from writable to
5508 * readonly so that we only need to care the case of changing
5509 * spte from present to present (changing the spte from present

--- 49 unchanged lines hidden (view full) ---

5559void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5560 const struct kvm_memory_slot *memslot)
5561{
5562 /* FIXME: const-ify all uses of struct kvm_memory_slot. */
5563 write_lock(&kvm->mmu_lock);
5564 slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
5565 kvm_mmu_zap_collapsible_spte, true);
5566
5567 if (kvm->arch.tdp_mmu_enabled)
5567 if (is_tdp_mmu_enabled(kvm))
5568 kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot);
5569 write_unlock(&kvm->mmu_lock);
5570}
5571
5572void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
5573 struct kvm_memory_slot *memslot)
5574{
5575 /*

--- 10 unchanged lines hidden (view full) ---

5586
5587void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
5588 struct kvm_memory_slot *memslot)
5589{
5590 bool flush;
5591
5592 write_lock(&kvm->mmu_lock);
5593 flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
5568 kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot);
5569 write_unlock(&kvm->mmu_lock);
5570}
5571
5572void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
5573 struct kvm_memory_slot *memslot)
5574{
5575 /*

--- 10 unchanged lines hidden (view full) ---

5586
5587void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
5588 struct kvm_memory_slot *memslot)
5589{
5590 bool flush;
5591
5592 write_lock(&kvm->mmu_lock);
5593 flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
5594 if (kvm->arch.tdp_mmu_enabled)
5594 if (is_tdp_mmu_enabled(kvm))
5595 flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
5596 write_unlock(&kvm->mmu_lock);
5597
5598 /*
5599 * It's also safe to flush TLBs out of mmu lock here as currently this
5600 * function is only used for dirty logging, in which case flushing TLB
5601 * out of mmu lock also guarantees no dirty pages will be lost in
5602 * dirty_bitmap.

--- 6 unchanged lines hidden (view full) ---

5609void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
5610 struct kvm_memory_slot *memslot)
5611{
5612 bool flush;
5613
5614 write_lock(&kvm->mmu_lock);
5615 flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
5616 false);
5595 flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
5596 write_unlock(&kvm->mmu_lock);
5597
5598 /*
5599 * It's also safe to flush TLBs out of mmu lock here as currently this
5600 * function is only used for dirty logging, in which case flushing TLB
5601 * out of mmu lock also guarantees no dirty pages will be lost in
5602 * dirty_bitmap.

--- 6 unchanged lines hidden (view full) ---

5609void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
5610 struct kvm_memory_slot *memslot)
5611{
5612 bool flush;
5613
5614 write_lock(&kvm->mmu_lock);
5615 flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
5616 false);
5617 if (kvm->arch.tdp_mmu_enabled)
5617 if (is_tdp_mmu_enabled(kvm))
5618 flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M);
5619 write_unlock(&kvm->mmu_lock);
5620
5621 if (flush)
5622 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5623}
5624EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
5625
5626void kvm_mmu_slot_set_dirty(struct kvm *kvm,
5627 struct kvm_memory_slot *memslot)
5628{
5629 bool flush;
5630
5631 write_lock(&kvm->mmu_lock);
5632 flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
5618 flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M);
5619 write_unlock(&kvm->mmu_lock);
5620
5621 if (flush)
5622 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5623}
5624EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
5625
5626void kvm_mmu_slot_set_dirty(struct kvm *kvm,
5627 struct kvm_memory_slot *memslot)
5628{
5629 bool flush;
5630
5631 write_lock(&kvm->mmu_lock);
5632 flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
5633 if (kvm->arch.tdp_mmu_enabled)
5633 if (is_tdp_mmu_enabled(kvm))
5634 flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot);
5635 write_unlock(&kvm->mmu_lock);
5636
5637 if (flush)
5638 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5639}
5640EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
5641

--- 11 unchanged lines hidden (view full) ---

5653 if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
5654 goto restart;
5655 if (cond_resched_rwlock_write(&kvm->mmu_lock))
5656 goto restart;
5657 }
5658
5659 kvm_mmu_commit_zap_page(kvm, &invalid_list);
5660
5634 flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot);
5635 write_unlock(&kvm->mmu_lock);
5636
5637 if (flush)
5638 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5639}
5640EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
5641

--- 11 unchanged lines hidden (view full) ---

5653 if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
5654 goto restart;
5655 if (cond_resched_rwlock_write(&kvm->mmu_lock))
5656 goto restart;
5657 }
5658
5659 kvm_mmu_commit_zap_page(kvm, &invalid_list);
5660
5661 if (kvm->arch.tdp_mmu_enabled)
5661 if (is_tdp_mmu_enabled(kvm))
5662 kvm_tdp_mmu_zap_all(kvm);
5663
5664 write_unlock(&kvm->mmu_lock);
5665}
5666
5667void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
5668{
5669 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);

--- 294 unchanged lines hidden (view full) ---

5964 * We use a separate list instead of just using active_mmu_pages
5965 * because the number of lpage_disallowed pages is expected to
5966 * be relatively small compared to the total.
5967 */
5968 sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
5969 struct kvm_mmu_page,
5970 lpage_disallowed_link);
5971 WARN_ON_ONCE(!sp->lpage_disallowed);
5662 kvm_tdp_mmu_zap_all(kvm);
5663
5664 write_unlock(&kvm->mmu_lock);
5665}
5666
5667void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
5668{
5669 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);

--- 294 unchanged lines hidden (view full) ---

5964 * We use a separate list instead of just using active_mmu_pages
5965 * because the number of lpage_disallowed pages is expected to
5966 * be relatively small compared to the total.
5967 */
5968 sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
5969 struct kvm_mmu_page,
5970 lpage_disallowed_link);
5971 WARN_ON_ONCE(!sp->lpage_disallowed);
5972 if (sp->tdp_mmu_page) {
5972 if (is_tdp_mmu_page(sp)) {
5973 kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
5974 sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
5975 } else {
5976 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
5977 WARN_ON_ONCE(sp->lpage_disallowed);
5978 }
5979
5980 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {

--- 60 unchanged lines hidden ---
5973 kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
5974 sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
5975 } else {
5976 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
5977 WARN_ON_ONCE(sp->lpage_disallowed);
5978 }
5979
5980 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {

--- 60 unchanged lines hidden ---