1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited 4 */ 5 6 #include <linux/highmem.h> 7 #include <linux/hugetlb.h> 8 #include <linux/kvm_host.h> 9 #include <linux/page-flags.h> 10 #include <linux/uaccess.h> 11 #include <asm/mmu_context.h> 12 #include <asm/pgalloc.h> 13 #include <asm/tlb.h> 14 #include <asm/kvm_mmu.h> 15 16 static inline bool kvm_hugepage_capable(struct kvm_memory_slot *slot) 17 { 18 return slot->arch.flags & KVM_MEM_HUGEPAGE_CAPABLE; 19 } 20 21 static inline bool kvm_hugepage_incapable(struct kvm_memory_slot *slot) 22 { 23 return slot->arch.flags & KVM_MEM_HUGEPAGE_INCAPABLE; 24 } 25 26 static inline void kvm_ptw_prepare(struct kvm *kvm, kvm_ptw_ctx *ctx) 27 { 28 ctx->level = kvm->arch.root_level; 29 /* pte table */ 30 ctx->invalid_ptes = kvm->arch.invalid_ptes; 31 ctx->pte_shifts = kvm->arch.pte_shifts; 32 ctx->pgtable_shift = ctx->pte_shifts[ctx->level]; 33 ctx->invalid_entry = ctx->invalid_ptes[ctx->level]; 34 ctx->opaque = kvm; 35 } 36 37 /* 38 * Mark a range of guest physical address space old (all accesses fault) in the 39 * VM's GPA page table to allow detection of commonly used pages. 40 */ 41 static int kvm_mkold_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx) 42 { 43 if (kvm_pte_young(*pte)) { 44 *pte = kvm_pte_mkold(*pte); 45 return 1; 46 } 47 48 return 0; 49 } 50 51 /* 52 * Mark a range of guest physical address space clean (writes fault) in the VM's 53 * GPA page table to allow dirty page tracking. 54 */ 55 static int kvm_mkclean_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx) 56 { 57 gfn_t offset; 58 kvm_pte_t val; 59 60 val = *pte; 61 /* 62 * For kvm_arch_mmu_enable_log_dirty_pt_masked with mask, start and end 63 * may cross hugepage, for first huge page parameter addr is equal to 64 * start, however for the second huge page addr is base address of 65 * this huge page, rather than start or end address 66 */ 67 if ((ctx->flag & _KVM_HAS_PGMASK) && !kvm_pte_huge(val)) { 68 offset = (addr >> PAGE_SHIFT) - ctx->gfn; 69 if (!(BIT(offset) & ctx->mask)) 70 return 0; 71 } 72 73 /* 74 * Need not split huge page now, just set write-proect pte bit 75 * Split huge page until next write fault 76 */ 77 if (kvm_pte_dirty(val)) { 78 *pte = kvm_pte_mkclean(val); 79 return 1; 80 } 81 82 return 0; 83 } 84 85 /* 86 * Clear pte entry 87 */ 88 static int kvm_flush_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx) 89 { 90 struct kvm *kvm; 91 92 kvm = ctx->opaque; 93 if (ctx->level) 94 kvm->stat.hugepages--; 95 else 96 kvm->stat.pages--; 97 98 *pte = ctx->invalid_entry; 99 100 return 1; 101 } 102 103 /* 104 * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory. 105 * 106 * Allocate a blank KVM GPA page directory (PGD) for representing guest physical 107 * to host physical page mappings. 108 * 109 * Returns: Pointer to new KVM GPA page directory. 110 * NULL on allocation failure. 111 */ 112 kvm_pte_t *kvm_pgd_alloc(void) 113 { 114 kvm_pte_t *pgd; 115 116 pgd = (kvm_pte_t *)__get_free_pages(GFP_KERNEL, 0); 117 if (pgd) 118 pgd_init((void *)pgd); 119 120 return pgd; 121 } 122 123 static void _kvm_pte_init(void *addr, unsigned long val) 124 { 125 unsigned long *p, *end; 126 127 p = (unsigned long *)addr; 128 end = p + PTRS_PER_PTE; 129 do { 130 p[0] = val; 131 p[1] = val; 132 p[2] = val; 133 p[3] = val; 134 p[4] = val; 135 p += 8; 136 p[-3] = val; 137 p[-2] = val; 138 p[-1] = val; 139 } while (p != end); 140 } 141 142 /* 143 * Caller must hold kvm->mm_lock 144 * 145 * Walk the page tables of kvm to find the PTE corresponding to the 146 * address @addr. If page tables don't exist for @addr, they will be created 147 * from the MMU cache if @cache is not NULL. 148 */ 149 static kvm_pte_t *kvm_populate_gpa(struct kvm *kvm, 150 struct kvm_mmu_memory_cache *cache, 151 unsigned long addr, int level) 152 { 153 kvm_ptw_ctx ctx; 154 kvm_pte_t *entry, *child; 155 156 kvm_ptw_prepare(kvm, &ctx); 157 child = kvm->arch.pgd; 158 while (ctx.level > level) { 159 entry = kvm_pgtable_offset(&ctx, child, addr); 160 if (kvm_pte_none(&ctx, entry)) { 161 if (!cache) 162 return NULL; 163 164 child = kvm_mmu_memory_cache_alloc(cache); 165 _kvm_pte_init(child, ctx.invalid_ptes[ctx.level - 1]); 166 kvm_set_pte(entry, __pa(child)); 167 } else if (kvm_pte_huge(*entry)) { 168 return entry; 169 } else 170 child = (kvm_pte_t *)__va(PHYSADDR(*entry)); 171 kvm_ptw_enter(&ctx); 172 } 173 174 entry = kvm_pgtable_offset(&ctx, child, addr); 175 176 return entry; 177 } 178 179 /* 180 * Page walker for VM shadow mmu at last level 181 * The last level is small pte page or huge pmd page 182 */ 183 static int kvm_ptw_leaf(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx) 184 { 185 int ret; 186 phys_addr_t next, start, size; 187 struct list_head *list; 188 kvm_pte_t *entry, *child; 189 190 ret = 0; 191 start = addr; 192 child = (kvm_pte_t *)__va(PHYSADDR(*dir)); 193 entry = kvm_pgtable_offset(ctx, child, addr); 194 do { 195 next = addr + (0x1UL << ctx->pgtable_shift); 196 if (!kvm_pte_present(ctx, entry)) 197 continue; 198 199 ret |= ctx->ops(entry, addr, ctx); 200 } while (entry++, addr = next, addr < end); 201 202 if (kvm_need_flush(ctx)) { 203 size = 0x1UL << (ctx->pgtable_shift + PAGE_SHIFT - 3); 204 if (start + size == end) { 205 list = (struct list_head *)child; 206 list_add_tail(list, &ctx->list); 207 *dir = ctx->invalid_ptes[ctx->level + 1]; 208 } 209 } 210 211 return ret; 212 } 213 214 /* 215 * Page walker for VM shadow mmu at page table dir level 216 */ 217 static int kvm_ptw_dir(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx) 218 { 219 int ret; 220 phys_addr_t next, start, size; 221 struct list_head *list; 222 kvm_pte_t *entry, *child; 223 224 ret = 0; 225 start = addr; 226 child = (kvm_pte_t *)__va(PHYSADDR(*dir)); 227 entry = kvm_pgtable_offset(ctx, child, addr); 228 do { 229 next = kvm_pgtable_addr_end(ctx, addr, end); 230 if (!kvm_pte_present(ctx, entry)) 231 continue; 232 233 if (kvm_pte_huge(*entry)) { 234 ret |= ctx->ops(entry, addr, ctx); 235 continue; 236 } 237 238 kvm_ptw_enter(ctx); 239 if (ctx->level == 0) 240 ret |= kvm_ptw_leaf(entry, addr, next, ctx); 241 else 242 ret |= kvm_ptw_dir(entry, addr, next, ctx); 243 kvm_ptw_exit(ctx); 244 } while (entry++, addr = next, addr < end); 245 246 if (kvm_need_flush(ctx)) { 247 size = 0x1UL << (ctx->pgtable_shift + PAGE_SHIFT - 3); 248 if (start + size == end) { 249 list = (struct list_head *)child; 250 list_add_tail(list, &ctx->list); 251 *dir = ctx->invalid_ptes[ctx->level + 1]; 252 } 253 } 254 255 return ret; 256 } 257 258 /* 259 * Page walker for VM shadow mmu at page root table 260 */ 261 static int kvm_ptw_top(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx) 262 { 263 int ret; 264 phys_addr_t next; 265 kvm_pte_t *entry; 266 267 ret = 0; 268 entry = kvm_pgtable_offset(ctx, dir, addr); 269 do { 270 next = kvm_pgtable_addr_end(ctx, addr, end); 271 if (!kvm_pte_present(ctx, entry)) 272 continue; 273 274 kvm_ptw_enter(ctx); 275 ret |= kvm_ptw_dir(entry, addr, next, ctx); 276 kvm_ptw_exit(ctx); 277 } while (entry++, addr = next, addr < end); 278 279 return ret; 280 } 281 282 /* 283 * kvm_flush_range() - Flush a range of guest physical addresses. 284 * @kvm: KVM pointer. 285 * @start_gfn: Guest frame number of first page in GPA range to flush. 286 * @end_gfn: Guest frame number of last page in GPA range to flush. 287 * @lock: Whether to hold mmu_lock or not 288 * 289 * Flushes a range of GPA mappings from the GPA page tables. 290 */ 291 static void kvm_flush_range(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn, int lock) 292 { 293 int ret; 294 kvm_ptw_ctx ctx; 295 struct list_head *pos, *temp; 296 297 ctx.ops = kvm_flush_pte; 298 ctx.flag = _KVM_FLUSH_PGTABLE; 299 kvm_ptw_prepare(kvm, &ctx); 300 INIT_LIST_HEAD(&ctx.list); 301 302 if (lock) { 303 spin_lock(&kvm->mmu_lock); 304 ret = kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, 305 end_gfn << PAGE_SHIFT, &ctx); 306 spin_unlock(&kvm->mmu_lock); 307 } else 308 ret = kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, 309 end_gfn << PAGE_SHIFT, &ctx); 310 311 /* Flush vpid for each vCPU individually */ 312 if (ret) 313 kvm_flush_remote_tlbs(kvm); 314 315 /* 316 * free pte table page after mmu_lock 317 * the pte table page is linked together with ctx.list 318 */ 319 list_for_each_safe(pos, temp, &ctx.list) { 320 list_del(pos); 321 free_page((unsigned long)pos); 322 } 323 } 324 325 /* 326 * kvm_mkclean_gpa_pt() - Make a range of guest physical addresses clean. 327 * @kvm: KVM pointer. 328 * @start_gfn: Guest frame number of first page in GPA range to flush. 329 * @end_gfn: Guest frame number of last page in GPA range to flush. 330 * 331 * Make a range of GPA mappings clean so that guest writes will fault and 332 * trigger dirty page logging. 333 * 334 * The caller must hold the @kvm->mmu_lock spinlock. 335 * 336 * Returns: Whether any GPA mappings were modified, which would require 337 * derived mappings (GVA page tables & TLB enties) to be 338 * invalidated. 339 */ 340 static int kvm_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) 341 { 342 kvm_ptw_ctx ctx; 343 344 ctx.ops = kvm_mkclean_pte; 345 ctx.flag = 0; 346 kvm_ptw_prepare(kvm, &ctx); 347 return kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, end_gfn << PAGE_SHIFT, &ctx); 348 } 349 350 /* 351 * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages 352 * @kvm: The KVM pointer 353 * @slot: The memory slot associated with mask 354 * @gfn_offset: The gfn offset in memory slot 355 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory 356 * slot to be write protected 357 * 358 * Walks bits set in mask write protects the associated pte's. Caller must 359 * acquire @kvm->mmu_lock. 360 */ 361 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, 362 struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) 363 { 364 kvm_ptw_ctx ctx; 365 gfn_t base_gfn = slot->base_gfn + gfn_offset; 366 gfn_t start = base_gfn + __ffs(mask); 367 gfn_t end = base_gfn + __fls(mask) + 1; 368 369 ctx.ops = kvm_mkclean_pte; 370 ctx.flag = _KVM_HAS_PGMASK; 371 ctx.mask = mask; 372 ctx.gfn = base_gfn; 373 kvm_ptw_prepare(kvm, &ctx); 374 375 kvm_ptw_top(kvm->arch.pgd, start << PAGE_SHIFT, end << PAGE_SHIFT, &ctx); 376 } 377 378 int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, 379 struct kvm_memory_slot *new, enum kvm_mr_change change) 380 { 381 gpa_t gpa_start; 382 hva_t hva_start; 383 size_t size, gpa_offset, hva_offset; 384 385 if ((change != KVM_MR_MOVE) && (change != KVM_MR_CREATE)) 386 return 0; 387 /* 388 * Prevent userspace from creating a memory region outside of the 389 * VM GPA address space 390 */ 391 if ((new->base_gfn + new->npages) > (kvm->arch.gpa_size >> PAGE_SHIFT)) 392 return -ENOMEM; 393 394 new->arch.flags = 0; 395 size = new->npages * PAGE_SIZE; 396 gpa_start = new->base_gfn << PAGE_SHIFT; 397 hva_start = new->userspace_addr; 398 if (IS_ALIGNED(size, PMD_SIZE) && IS_ALIGNED(gpa_start, PMD_SIZE) 399 && IS_ALIGNED(hva_start, PMD_SIZE)) 400 new->arch.flags |= KVM_MEM_HUGEPAGE_CAPABLE; 401 else { 402 /* 403 * Pages belonging to memslots that don't have the same 404 * alignment within a PMD for userspace and GPA cannot be 405 * mapped with PMD entries, because we'll end up mapping 406 * the wrong pages. 407 * 408 * Consider a layout like the following: 409 * 410 * memslot->userspace_addr: 411 * +-----+--------------------+--------------------+---+ 412 * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| 413 * +-----+--------------------+--------------------+---+ 414 * 415 * memslot->base_gfn << PAGE_SIZE: 416 * +---+--------------------+--------------------+-----+ 417 * |abc|def Stage-2 block | Stage-2 block |tvxyz| 418 * +---+--------------------+--------------------+-----+ 419 * 420 * If we create those stage-2 blocks, we'll end up with this 421 * incorrect mapping: 422 * d -> f 423 * e -> g 424 * f -> h 425 */ 426 gpa_offset = gpa_start & (PMD_SIZE - 1); 427 hva_offset = hva_start & (PMD_SIZE - 1); 428 if (gpa_offset != hva_offset) { 429 new->arch.flags |= KVM_MEM_HUGEPAGE_INCAPABLE; 430 } else { 431 if (gpa_offset == 0) 432 gpa_offset = PMD_SIZE; 433 if ((size + gpa_offset) < (PMD_SIZE * 2)) 434 new->arch.flags |= KVM_MEM_HUGEPAGE_INCAPABLE; 435 } 436 } 437 438 return 0; 439 } 440 441 void kvm_arch_commit_memory_region(struct kvm *kvm, 442 struct kvm_memory_slot *old, 443 const struct kvm_memory_slot *new, 444 enum kvm_mr_change change) 445 { 446 int needs_flush; 447 448 /* 449 * If dirty page logging is enabled, write protect all pages in the slot 450 * ready for dirty logging. 451 * 452 * There is no need to do this in any of the following cases: 453 * CREATE: No dirty mappings will already exist. 454 * MOVE/DELETE: The old mappings will already have been cleaned up by 455 * kvm_arch_flush_shadow_memslot() 456 */ 457 if (change == KVM_MR_FLAGS_ONLY && 458 (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && 459 new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { 460 spin_lock(&kvm->mmu_lock); 461 /* Write protect GPA page table entries */ 462 needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn, 463 new->base_gfn + new->npages); 464 spin_unlock(&kvm->mmu_lock); 465 if (needs_flush) 466 kvm_flush_remote_tlbs(kvm); 467 } 468 } 469 470 void kvm_arch_flush_shadow_all(struct kvm *kvm) 471 { 472 kvm_flush_range(kvm, 0, kvm->arch.gpa_size >> PAGE_SHIFT, 0); 473 } 474 475 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 476 { 477 /* 478 * The slot has been made invalid (ready for moving or deletion), so we 479 * need to ensure that it can no longer be accessed by any guest vCPUs. 480 */ 481 kvm_flush_range(kvm, slot->base_gfn, slot->base_gfn + slot->npages, 1); 482 } 483 484 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 485 { 486 kvm_ptw_ctx ctx; 487 488 ctx.flag = 0; 489 ctx.ops = kvm_flush_pte; 490 kvm_ptw_prepare(kvm, &ctx); 491 INIT_LIST_HEAD(&ctx.list); 492 493 return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT, 494 range->end << PAGE_SHIFT, &ctx); 495 } 496 497 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 498 { 499 kvm_ptw_ctx ctx; 500 501 ctx.flag = 0; 502 ctx.ops = kvm_mkold_pte; 503 kvm_ptw_prepare(kvm, &ctx); 504 505 return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT, 506 range->end << PAGE_SHIFT, &ctx); 507 } 508 509 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 510 { 511 gpa_t gpa = range->start << PAGE_SHIFT; 512 kvm_pte_t *ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); 513 514 if (ptep && kvm_pte_present(NULL, ptep) && kvm_pte_young(*ptep)) 515 return true; 516 517 return false; 518 } 519 520 /* 521 * kvm_map_page_fast() - Fast path GPA fault handler. 522 * @vcpu: vCPU pointer. 523 * @gpa: Guest physical address of fault. 524 * @write: Whether the fault was due to a write. 525 * 526 * Perform fast path GPA fault handling, doing all that can be done without 527 * calling into KVM. This handles marking old pages young (for idle page 528 * tracking), and dirtying of clean pages (for dirty page logging). 529 * 530 * Returns: 0 on success, in which case we can update derived mappings and 531 * resume guest execution. 532 * -EFAULT on failure due to absent GPA mapping or write to 533 * read-only page, in which case KVM must be consulted. 534 */ 535 static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) 536 { 537 int ret = 0; 538 kvm_pfn_t pfn = 0; 539 kvm_pte_t *ptep, changed, new; 540 gfn_t gfn = gpa >> PAGE_SHIFT; 541 struct kvm *kvm = vcpu->kvm; 542 struct kvm_memory_slot *slot; 543 544 spin_lock(&kvm->mmu_lock); 545 546 /* Fast path - just check GPA page table for an existing entry */ 547 ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); 548 if (!ptep || !kvm_pte_present(NULL, ptep)) { 549 ret = -EFAULT; 550 goto out; 551 } 552 553 /* Track access to pages marked old */ 554 new = *ptep; 555 if (!kvm_pte_young(new)) 556 new = kvm_pte_mkyoung(new); 557 /* call kvm_set_pfn_accessed() after unlock */ 558 559 if (write && !kvm_pte_dirty(new)) { 560 if (!kvm_pte_write(new)) { 561 ret = -EFAULT; 562 goto out; 563 } 564 565 if (kvm_pte_huge(new)) { 566 /* 567 * Do not set write permission when dirty logging is 568 * enabled for HugePages 569 */ 570 slot = gfn_to_memslot(kvm, gfn); 571 if (kvm_slot_dirty_track_enabled(slot)) { 572 ret = -EFAULT; 573 goto out; 574 } 575 } 576 577 /* Track dirtying of writeable pages */ 578 new = kvm_pte_mkdirty(new); 579 } 580 581 changed = new ^ (*ptep); 582 if (changed) { 583 kvm_set_pte(ptep, new); 584 pfn = kvm_pte_pfn(new); 585 } 586 spin_unlock(&kvm->mmu_lock); 587 588 /* 589 * Fixme: pfn may be freed after mmu_lock 590 * kvm_try_get_pfn(pfn)/kvm_release_pfn pair to prevent this? 591 */ 592 if (kvm_pte_young(changed)) 593 kvm_set_pfn_accessed(pfn); 594 595 if (kvm_pte_dirty(changed)) { 596 mark_page_dirty(kvm, gfn); 597 kvm_set_pfn_dirty(pfn); 598 } 599 return ret; 600 out: 601 spin_unlock(&kvm->mmu_lock); 602 return ret; 603 } 604 605 static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot, 606 unsigned long hva, bool write) 607 { 608 hva_t start, end; 609 610 /* Disable dirty logging on HugePages */ 611 if (kvm_slot_dirty_track_enabled(memslot) && write) 612 return false; 613 614 if (kvm_hugepage_capable(memslot)) 615 return true; 616 617 if (kvm_hugepage_incapable(memslot)) 618 return false; 619 620 start = memslot->userspace_addr; 621 end = start + memslot->npages * PAGE_SIZE; 622 623 /* 624 * Next, let's make sure we're not trying to map anything not covered 625 * by the memslot. This means we have to prohibit block size mappings 626 * for the beginning and end of a non-block aligned and non-block sized 627 * memory slot (illustrated by the head and tail parts of the 628 * userspace view above containing pages 'abcde' and 'xyz', 629 * respectively). 630 * 631 * Note that it doesn't matter if we do the check using the 632 * userspace_addr or the base_gfn, as both are equally aligned (per 633 * the check above) and equally sized. 634 */ 635 return (hva >= ALIGN(start, PMD_SIZE)) && (hva < ALIGN_DOWN(end, PMD_SIZE)); 636 } 637 638 /* 639 * Lookup the mapping level for @gfn in the current mm. 640 * 641 * WARNING! Use of host_pfn_mapping_level() requires the caller and the end 642 * consumer to be tied into KVM's handlers for MMU notifier events! 643 * 644 * There are several ways to safely use this helper: 645 * 646 * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before 647 * consuming it. In this case, mmu_lock doesn't need to be held during the 648 * lookup, but it does need to be held while checking the MMU notifier. 649 * 650 * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation 651 * event for the hva. This can be done by explicit checking the MMU notifier 652 * or by ensuring that KVM already has a valid mapping that covers the hva. 653 * 654 * - Do not use the result to install new mappings, e.g. use the host mapping 655 * level only to decide whether or not to zap an entry. In this case, it's 656 * not required to hold mmu_lock (though it's highly likely the caller will 657 * want to hold mmu_lock anyways, e.g. to modify SPTEs). 658 * 659 * Note! The lookup can still race with modifications to host page tables, but 660 * the above "rules" ensure KVM will not _consume_ the result of the walk if a 661 * race with the primary MMU occurs. 662 */ 663 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, 664 const struct kvm_memory_slot *slot) 665 { 666 int level = 0; 667 unsigned long hva; 668 unsigned long flags; 669 pgd_t pgd; 670 p4d_t p4d; 671 pud_t pud; 672 pmd_t pmd; 673 674 /* 675 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() 676 * is not solely for performance, it's also necessary to avoid the 677 * "writable" check in __gfn_to_hva_many(), which will always fail on 678 * read-only memslots due to gfn_to_hva() assuming writes. Earlier 679 * page fault steps have already verified the guest isn't writing a 680 * read-only memslot. 681 */ 682 hva = __gfn_to_hva_memslot(slot, gfn); 683 684 /* 685 * Disable IRQs to prevent concurrent tear down of host page tables, 686 * e.g. if the primary MMU promotes a P*D to a huge page and then frees 687 * the original page table. 688 */ 689 local_irq_save(flags); 690 691 /* 692 * Read each entry once. As above, a non-leaf entry can be promoted to 693 * a huge page _during_ this walk. Re-reading the entry could send the 694 * walk into the weeks, e.g. p*d_leaf() returns false (sees the old 695 * value) and then p*d_offset() walks into the target huge page instead 696 * of the old page table (sees the new value). 697 */ 698 pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); 699 if (pgd_none(pgd)) 700 goto out; 701 702 p4d = READ_ONCE(*p4d_offset(&pgd, hva)); 703 if (p4d_none(p4d) || !p4d_present(p4d)) 704 goto out; 705 706 pud = READ_ONCE(*pud_offset(&p4d, hva)); 707 if (pud_none(pud) || !pud_present(pud)) 708 goto out; 709 710 pmd = READ_ONCE(*pmd_offset(&pud, hva)); 711 if (pmd_none(pmd) || !pmd_present(pmd)) 712 goto out; 713 714 if (kvm_pte_huge(pmd_val(pmd))) 715 level = 1; 716 717 out: 718 local_irq_restore(flags); 719 return level; 720 } 721 722 /* 723 * Split huge page 724 */ 725 static kvm_pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, kvm_pte_t *ptep, gfn_t gfn) 726 { 727 int i; 728 kvm_pte_t val, *child; 729 struct kvm *kvm = vcpu->kvm; 730 struct kvm_mmu_memory_cache *memcache; 731 732 memcache = &vcpu->arch.mmu_page_cache; 733 child = kvm_mmu_memory_cache_alloc(memcache); 734 val = kvm_pte_mksmall(*ptep); 735 for (i = 0; i < PTRS_PER_PTE; i++) { 736 kvm_set_pte(child + i, val); 737 val += PAGE_SIZE; 738 } 739 740 /* The later kvm_flush_tlb_gpa() will flush hugepage tlb */ 741 kvm_set_pte(ptep, __pa(child)); 742 743 kvm->stat.hugepages--; 744 kvm->stat.pages += PTRS_PER_PTE; 745 746 return child + (gfn & (PTRS_PER_PTE - 1)); 747 } 748 749 /* 750 * kvm_map_page() - Map a guest physical page. 751 * @vcpu: vCPU pointer. 752 * @gpa: Guest physical address of fault. 753 * @write: Whether the fault was due to a write. 754 * 755 * Handle GPA faults by creating a new GPA mapping (or updating an existing 756 * one). 757 * 758 * This takes care of marking pages young or dirty (idle/dirty page tracking), 759 * asking KVM for the corresponding PFN, and creating a mapping in the GPA page 760 * tables. Derived mappings (GVA page tables and TLBs) must be handled by the 761 * caller. 762 * 763 * Returns: 0 on success 764 * -EFAULT if there is no memory region at @gpa or a write was 765 * attempted to a read-only memory region. This is usually handled 766 * as an MMIO access. 767 */ 768 static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) 769 { 770 bool writeable; 771 int srcu_idx, err, retry_no = 0, level; 772 unsigned long hva, mmu_seq, prot_bits; 773 kvm_pfn_t pfn; 774 kvm_pte_t *ptep, new_pte; 775 gfn_t gfn = gpa >> PAGE_SHIFT; 776 struct kvm *kvm = vcpu->kvm; 777 struct kvm_memory_slot *memslot; 778 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; 779 780 /* Try the fast path to handle old / clean pages */ 781 srcu_idx = srcu_read_lock(&kvm->srcu); 782 err = kvm_map_page_fast(vcpu, gpa, write); 783 if (!err) 784 goto out; 785 786 memslot = gfn_to_memslot(kvm, gfn); 787 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writeable); 788 if (kvm_is_error_hva(hva) || (write && !writeable)) { 789 err = -EFAULT; 790 goto out; 791 } 792 793 /* We need a minimum of cached pages ready for page table creation */ 794 err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); 795 if (err) 796 goto out; 797 798 retry: 799 /* 800 * Used to check for invalidations in progress, of the pfn that is 801 * returned by pfn_to_pfn_prot below. 802 */ 803 mmu_seq = kvm->mmu_invalidate_seq; 804 /* 805 * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads in 806 * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't 807 * risk the page we get a reference to getting unmapped before we have a 808 * chance to grab the mmu_lock without mmu_invalidate_retry() noticing. 809 * 810 * This smp_rmb() pairs with the effective smp_wmb() of the combination 811 * of the pte_unmap_unlock() after the PTE is zapped, and the 812 * spin_lock() in kvm_mmu_invalidate_invalidate_<page|range_end>() before 813 * mmu_invalidate_seq is incremented. 814 */ 815 smp_rmb(); 816 817 /* Slow path - ask KVM core whether we can access this GPA */ 818 pfn = gfn_to_pfn_prot(kvm, gfn, write, &writeable); 819 if (is_error_noslot_pfn(pfn)) { 820 err = -EFAULT; 821 goto out; 822 } 823 824 /* Check if an invalidation has taken place since we got pfn */ 825 spin_lock(&kvm->mmu_lock); 826 if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) { 827 /* 828 * This can happen when mappings are changed asynchronously, but 829 * also synchronously if a COW is triggered by 830 * gfn_to_pfn_prot(). 831 */ 832 spin_unlock(&kvm->mmu_lock); 833 kvm_release_pfn_clean(pfn); 834 if (retry_no > 100) { 835 retry_no = 0; 836 schedule(); 837 } 838 retry_no++; 839 goto retry; 840 } 841 842 /* 843 * For emulated devices such virtio device, actual cache attribute is 844 * determined by physical machine. 845 * For pass through physical device, it should be uncachable 846 */ 847 prot_bits = _PAGE_PRESENT | __READABLE; 848 if (pfn_valid(pfn)) 849 prot_bits |= _CACHE_CC; 850 else 851 prot_bits |= _CACHE_SUC; 852 853 if (writeable) { 854 prot_bits |= _PAGE_WRITE; 855 if (write) 856 prot_bits |= __WRITEABLE; 857 } 858 859 /* Disable dirty logging on HugePages */ 860 level = 0; 861 if (!fault_supports_huge_mapping(memslot, hva, write)) { 862 level = 0; 863 } else { 864 level = host_pfn_mapping_level(kvm, gfn, memslot); 865 if (level == 1) { 866 gfn = gfn & ~(PTRS_PER_PTE - 1); 867 pfn = pfn & ~(PTRS_PER_PTE - 1); 868 } 869 } 870 871 /* Ensure page tables are allocated */ 872 ptep = kvm_populate_gpa(kvm, memcache, gpa, level); 873 new_pte = kvm_pfn_pte(pfn, __pgprot(prot_bits)); 874 if (level == 1) { 875 new_pte = kvm_pte_mkhuge(new_pte); 876 /* 877 * previous pmd entry is invalid_pte_table 878 * there is invalid tlb with small page 879 * need flush these invalid tlbs for current vcpu 880 */ 881 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 882 ++kvm->stat.hugepages; 883 } else if (kvm_pte_huge(*ptep) && write) 884 ptep = kvm_split_huge(vcpu, ptep, gfn); 885 else 886 ++kvm->stat.pages; 887 kvm_set_pte(ptep, new_pte); 888 spin_unlock(&kvm->mmu_lock); 889 890 if (prot_bits & _PAGE_DIRTY) { 891 mark_page_dirty_in_slot(kvm, memslot, gfn); 892 kvm_set_pfn_dirty(pfn); 893 } 894 895 kvm_set_pfn_accessed(pfn); 896 kvm_release_pfn_clean(pfn); 897 out: 898 srcu_read_unlock(&kvm->srcu, srcu_idx); 899 return err; 900 } 901 902 int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) 903 { 904 int ret; 905 906 ret = kvm_map_page(vcpu, gpa, write); 907 if (ret) 908 return ret; 909 910 /* Invalidate this entry in the TLB */ 911 kvm_flush_tlb_gpa(vcpu, gpa); 912 913 return 0; 914 } 915 916 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 917 { 918 } 919 920 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, 921 const struct kvm_memory_slot *memslot) 922 { 923 kvm_flush_remote_tlbs(kvm); 924 } 925