1edf88417SAvi Kivity #ifndef __KVM_X86_MMU_H 2edf88417SAvi Kivity #define __KVM_X86_MMU_H 3edf88417SAvi Kivity 4edf88417SAvi Kivity #include <linux/kvm_host.h> 5fc78f519SAvi Kivity #include "kvm_cache_regs.h" 6edf88417SAvi Kivity 78c6d6adcSSheng Yang #define PT64_PT_BITS 9 88c6d6adcSSheng Yang #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) 98c6d6adcSSheng Yang #define PT32_PT_BITS 10 108c6d6adcSSheng Yang #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS) 118c6d6adcSSheng Yang 128c6d6adcSSheng Yang #define PT_WRITABLE_SHIFT 1 13be94f6b7SHuaitong Han #define PT_USER_SHIFT 2 148c6d6adcSSheng Yang 158c6d6adcSSheng Yang #define PT_PRESENT_MASK (1ULL << 0) 168c6d6adcSSheng Yang #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT) 17be94f6b7SHuaitong Han #define PT_USER_MASK (1ULL << PT_USER_SHIFT) 188c6d6adcSSheng Yang #define PT_PWT_MASK (1ULL << 3) 198c6d6adcSSheng Yang #define PT_PCD_MASK (1ULL << 4) 201b7fcd32SAvi Kivity #define PT_ACCESSED_SHIFT 5 211b7fcd32SAvi Kivity #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT) 228ea667f2SAvi Kivity #define PT_DIRTY_SHIFT 6 238ea667f2SAvi Kivity #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT) 246fd01b71SAvi Kivity #define PT_PAGE_SIZE_SHIFT 7 256fd01b71SAvi Kivity #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT) 268c6d6adcSSheng Yang #define PT_PAT_MASK (1ULL << 7) 278c6d6adcSSheng Yang #define PT_GLOBAL_MASK (1ULL << 8) 288c6d6adcSSheng Yang #define PT64_NX_SHIFT 63 298c6d6adcSSheng Yang #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT) 308c6d6adcSSheng Yang 318c6d6adcSSheng Yang #define PT_PAT_SHIFT 7 328c6d6adcSSheng Yang #define PT_DIR_PAT_SHIFT 12 338c6d6adcSSheng Yang #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT) 348c6d6adcSSheng Yang 358c6d6adcSSheng Yang #define PT32_DIR_PSE36_SIZE 4 368c6d6adcSSheng Yang #define PT32_DIR_PSE36_SHIFT 13 378c6d6adcSSheng Yang #define PT32_DIR_PSE36_MASK \ 388c6d6adcSSheng Yang (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) 398c6d6adcSSheng Yang 408c6d6adcSSheng Yang #define PT64_ROOT_LEVEL 4 418c6d6adcSSheng Yang #define PT32_ROOT_LEVEL 2 428c6d6adcSSheng Yang #define PT32E_ROOT_LEVEL 3 438c6d6adcSSheng Yang 44c9c54174SSheng Yang #define PT_PDPE_LEVEL 3 45c9c54174SSheng Yang #define PT_DIRECTORY_LEVEL 2 46c9c54174SSheng Yang #define PT_PAGE_TABLE_LEVEL 1 478a3d08f1SXiao Guangrong #define PT_MAX_HUGEPAGE_LEVEL (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1) 48c9c54174SSheng Yang 49d1431483STiejun Chen static inline u64 rsvd_bits(int s, int e) 50d1431483STiejun Chen { 51d1431483STiejun Chen return ((1ULL << (e - s + 1)) - 1) << s; 52d1431483STiejun Chen } 53d1431483STiejun Chen 54dcdca5feSPeter Feiner void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value); 55b37fbea6SXiao Guangrong 56c258b62bSXiao Guangrong void 57c258b62bSXiao Guangrong reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); 58c258b62bSXiao Guangrong 59b37fbea6SXiao Guangrong /* 60450869d6SPaolo Bonzini * Return values of handle_mmio_page_fault: 61b37fbea6SXiao Guangrong * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction 62b37fbea6SXiao Guangrong * directly. 63f8f55942SXiao Guangrong * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page 64f8f55942SXiao Guangrong * fault path update the mmio spte. 65b37fbea6SXiao Guangrong * RET_MMIO_PF_RETRY: let CPU fault again on the address. 66450869d6SPaolo Bonzini * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed). 67b37fbea6SXiao Guangrong */ 68b37fbea6SXiao Guangrong enum { 69b37fbea6SXiao Guangrong RET_MMIO_PF_EMULATE = 1, 70f8f55942SXiao Guangrong RET_MMIO_PF_INVALID = 2, 71b37fbea6SXiao Guangrong RET_MMIO_PF_RETRY = 0, 72b37fbea6SXiao Guangrong RET_MMIO_PF_BUG = -1 73b37fbea6SXiao Guangrong }; 74b37fbea6SXiao Guangrong 75450869d6SPaolo Bonzini int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct); 76ad896af0SPaolo Bonzini void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); 77ae1e2d10SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, 78ae1e2d10SPaolo Bonzini bool accessed_dirty); 799bc1f09fSWanpeng Li bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); 80*1261bfa3SWanpeng Li int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, 81*1261bfa3SWanpeng Li u64 fault_address, char *insn, int insn_len, 82*1261bfa3SWanpeng Li bool need_unprotect); 8394d8b056SMarcelo Tosatti 84e0df7b9fSDave Hansen static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 85e0df7b9fSDave Hansen { 865d218814SMarcelo Tosatti if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) 8749d5ca26SDave Hansen return kvm->arch.n_max_mmu_pages - 8849d5ca26SDave Hansen kvm->arch.n_used_mmu_pages; 895d218814SMarcelo Tosatti 905d218814SMarcelo Tosatti return 0; 91e0df7b9fSDave Hansen } 92e0df7b9fSDave Hansen 93edf88417SAvi Kivity static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) 94edf88417SAvi Kivity { 95edf88417SAvi Kivity if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) 96edf88417SAvi Kivity return 0; 97edf88417SAvi Kivity 98edf88417SAvi Kivity return kvm_mmu_load(vcpu); 99edf88417SAvi Kivity } 100edf88417SAvi Kivity 101198c74f4SXiao Guangrong /* 102198c74f4SXiao Guangrong * Currently, we have two sorts of write-protection, a) the first one 103198c74f4SXiao Guangrong * write-protects guest page to sync the guest modification, b) another one is 104198c74f4SXiao Guangrong * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences 105198c74f4SXiao Guangrong * between these two sorts are: 106198c74f4SXiao Guangrong * 1) the first case clears SPTE_MMU_WRITEABLE bit. 107198c74f4SXiao Guangrong * 2) the first case requires flushing tlb immediately avoiding corrupting 108198c74f4SXiao Guangrong * shadow page table between all vcpus so it should be in the protection of 109198c74f4SXiao Guangrong * mmu-lock. And the another case does not need to flush tlb until returning 110198c74f4SXiao Guangrong * the dirty bitmap to userspace since it only write-protects the page 111198c74f4SXiao Guangrong * logged in the bitmap, that means the page in the dirty bitmap is not 112198c74f4SXiao Guangrong * missed, so it can flush tlb out of mmu-lock. 113198c74f4SXiao Guangrong * 114198c74f4SXiao Guangrong * So, there is the problem: the first case can meet the corrupted tlb caused 115198c74f4SXiao Guangrong * by another case which write-protects pages but without flush tlb 116198c74f4SXiao Guangrong * immediately. In order to making the first case be aware this problem we let 117198c74f4SXiao Guangrong * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit 118198c74f4SXiao Guangrong * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit. 119198c74f4SXiao Guangrong * 120198c74f4SXiao Guangrong * Anyway, whenever a spte is updated (only permission and status bits are 121198c74f4SXiao Guangrong * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes 122198c74f4SXiao Guangrong * readonly, if that happens, we need to flush tlb. Fortunately, 123198c74f4SXiao Guangrong * mmu_spte_update() has already handled it perfectly. 124198c74f4SXiao Guangrong * 125198c74f4SXiao Guangrong * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK: 126198c74f4SXiao Guangrong * - if we want to see if it has writable tlb entry or if the spte can be 127198c74f4SXiao Guangrong * writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most 128198c74f4SXiao Guangrong * case, otherwise 129198c74f4SXiao Guangrong * - if we fix page fault on the spte or do write-protection by dirty logging, 130198c74f4SXiao Guangrong * check PT_WRITABLE_MASK. 131198c74f4SXiao Guangrong * 132198c74f4SXiao Guangrong * TODO: introduce APIs to split these two cases. 133198c74f4SXiao Guangrong */ 134bebb106aSXiao Guangrong static inline int is_writable_pte(unsigned long pte) 135bebb106aSXiao Guangrong { 136bebb106aSXiao Guangrong return pte & PT_WRITABLE_MASK; 137bebb106aSXiao Guangrong } 138bebb106aSXiao Guangrong 139bebb106aSXiao Guangrong static inline bool is_write_protection(struct kvm_vcpu *vcpu) 140bebb106aSXiao Guangrong { 141bebb106aSXiao Guangrong return kvm_read_cr0_bits(vcpu, X86_CR0_WP); 142bebb106aSXiao Guangrong } 143bebb106aSXiao Guangrong 14497d64b78SAvi Kivity /* 145f13577e8SPaolo Bonzini * Check if a given access (described through the I/D, W/R and U/S bits of a 146f13577e8SPaolo Bonzini * page fault error code pfec) causes a permission fault with the given PTE 147f13577e8SPaolo Bonzini * access rights (in ACC_* format). 148f13577e8SPaolo Bonzini * 149f13577e8SPaolo Bonzini * Return zero if the access does not fault; return the page fault error code 150f13577e8SPaolo Bonzini * if the access faults. 15197d64b78SAvi Kivity */ 152f13577e8SPaolo Bonzini static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 153be94f6b7SHuaitong Han unsigned pte_access, unsigned pte_pkey, 154be94f6b7SHuaitong Han unsigned pfec) 155bebb106aSXiao Guangrong { 15697ec8c06SFeng Wu int cpl = kvm_x86_ops->get_cpl(vcpu); 15797ec8c06SFeng Wu unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); 15897ec8c06SFeng Wu 15997ec8c06SFeng Wu /* 16097ec8c06SFeng Wu * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1. 16197ec8c06SFeng Wu * 16297ec8c06SFeng Wu * If CPL = 3, SMAP applies to all supervisor-mode data accesses 16397ec8c06SFeng Wu * (these are implicit supervisor accesses) regardless of the value 16497ec8c06SFeng Wu * of EFLAGS.AC. 16597ec8c06SFeng Wu * 16697ec8c06SFeng Wu * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving 16797ec8c06SFeng Wu * the result in X86_EFLAGS_AC. We then insert it in place of 16897ec8c06SFeng Wu * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec, 16997ec8c06SFeng Wu * but it will be one in index if SMAP checks are being overridden. 17097ec8c06SFeng Wu * It is important to keep this branchless. 17197ec8c06SFeng Wu */ 17297ec8c06SFeng Wu unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC); 17397ec8c06SFeng Wu int index = (pfec >> 1) + 17497ec8c06SFeng Wu (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); 175be94f6b7SHuaitong Han bool fault = (mmu->permissions[index] >> pte_access) & 1; 1767a98205dSXiao Guangrong u32 errcode = PFERR_PRESENT_MASK; 17797ec8c06SFeng Wu 178be94f6b7SHuaitong Han WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK)); 179be94f6b7SHuaitong Han if (unlikely(mmu->pkru_mask)) { 180be94f6b7SHuaitong Han u32 pkru_bits, offset; 181be94f6b7SHuaitong Han 182be94f6b7SHuaitong Han /* 183be94f6b7SHuaitong Han * PKRU defines 32 bits, there are 16 domains and 2 184be94f6b7SHuaitong Han * attribute bits per domain in pkru. pte_pkey is the 185be94f6b7SHuaitong Han * index of the protection domain, so pte_pkey * 2 is 186be94f6b7SHuaitong Han * is the index of the first bit for the domain. 187be94f6b7SHuaitong Han */ 188be94f6b7SHuaitong Han pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3; 189be94f6b7SHuaitong Han 190be94f6b7SHuaitong Han /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */ 1917a98205dSXiao Guangrong offset = (pfec & ~1) + 192be94f6b7SHuaitong Han ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT)); 193be94f6b7SHuaitong Han 194be94f6b7SHuaitong Han pkru_bits &= mmu->pkru_mask >> offset; 1957a98205dSXiao Guangrong errcode |= -pkru_bits & PFERR_PK_MASK; 196be94f6b7SHuaitong Han fault |= (pkru_bits != 0); 197be94f6b7SHuaitong Han } 198be94f6b7SHuaitong Han 1997a98205dSXiao Guangrong return -(u32)fault & errcode; 200bebb106aSXiao Guangrong } 20197d64b78SAvi Kivity 2025304b8d3SXiao Guangrong void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); 203efdfe536SXiao Guangrong void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end); 204547ffaedSXiao Guangrong 205547ffaedSXiao Guangrong void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); 206547ffaedSXiao Guangrong void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); 207aeecee2eSXiao Guangrong bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, 208aeecee2eSXiao Guangrong struct kvm_memory_slot *slot, u64 gfn); 209bab4165eSBandan Das int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); 210edf88417SAvi Kivity #endif 211