xref: /linux/arch/x86/kvm/mmu.h (revision 0a7b73559b39497fae1b7b3d4bab69895097c37e)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2edf88417SAvi Kivity #ifndef __KVM_X86_MMU_H
3edf88417SAvi Kivity #define __KVM_X86_MMU_H
4edf88417SAvi Kivity 
5edf88417SAvi Kivity #include <linux/kvm_host.h>
6fc78f519SAvi Kivity #include "kvm_cache_regs.h"
789786147SMohammed Gamal #include "cpuid.h"
8edf88417SAvi Kivity 
90c29397aSSean Christopherson extern bool __read_mostly enable_mmio_caching;
100c29397aSSean Christopherson 
118c6d6adcSSheng Yang #define PT_WRITABLE_SHIFT 1
12be94f6b7SHuaitong Han #define PT_USER_SHIFT 2
138c6d6adcSSheng Yang 
148c6d6adcSSheng Yang #define PT_PRESENT_MASK (1ULL << 0)
158c6d6adcSSheng Yang #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
16be94f6b7SHuaitong Han #define PT_USER_MASK (1ULL << PT_USER_SHIFT)
178c6d6adcSSheng Yang #define PT_PWT_MASK (1ULL << 3)
188c6d6adcSSheng Yang #define PT_PCD_MASK (1ULL << 4)
191b7fcd32SAvi Kivity #define PT_ACCESSED_SHIFT 5
201b7fcd32SAvi Kivity #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
218ea667f2SAvi Kivity #define PT_DIRTY_SHIFT 6
228ea667f2SAvi Kivity #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
236fd01b71SAvi Kivity #define PT_PAGE_SIZE_SHIFT 7
246fd01b71SAvi Kivity #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
258c6d6adcSSheng Yang #define PT_PAT_MASK (1ULL << 7)
268c6d6adcSSheng Yang #define PT_GLOBAL_MASK (1ULL << 8)
278c6d6adcSSheng Yang #define PT64_NX_SHIFT 63
288c6d6adcSSheng Yang #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
298c6d6adcSSheng Yang 
308c6d6adcSSheng Yang #define PT_PAT_SHIFT 7
318c6d6adcSSheng Yang #define PT_DIR_PAT_SHIFT 12
328c6d6adcSSheng Yang #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
338c6d6adcSSheng Yang 
34855feb67SYu Zhang #define PT64_ROOT_5LEVEL 5
352a7266a8SYu Zhang #define PT64_ROOT_4LEVEL 4
368c6d6adcSSheng Yang #define PT32_ROOT_LEVEL 2
378c6d6adcSSheng Yang #define PT32E_ROOT_LEVEL 3
388c6d6adcSSheng Yang 
39a91a7c70SLai Jiangshan #define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_LA57 | \
40a91a7c70SLai Jiangshan 			       X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE)
4120f632bdSSean Christopherson 
4220f632bdSSean Christopherson #define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP)
43d6174299SPaolo Bonzini #define KVM_MMU_EFER_ROLE_BITS (EFER_LME | EFER_NX)
4420f632bdSSean Christopherson 
45eb79cd00SSean Christopherson static __always_inline u64 rsvd_bits(int s, int e)
46d1431483STiejun Chen {
47eb79cd00SSean Christopherson 	BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
48eb79cd00SSean Christopherson 
49eb79cd00SSean Christopherson 	if (__builtin_constant_p(e))
50eb79cd00SSean Christopherson 		BUILD_BUG_ON(e > 63);
51eb79cd00SSean Christopherson 	else
52eb79cd00SSean Christopherson 		e &= 63;
53eb79cd00SSean Christopherson 
54d1cd3ce9SYu Zhang 	if (e < s)
55d1cd3ce9SYu Zhang 		return 0;
56d1cd3ce9SYu Zhang 
572f80d502SPaolo Bonzini 	return ((2ULL << (e - s)) - 1) << s;
58d1431483STiejun Chen }
59d1431483STiejun Chen 
6086931ff7SSean Christopherson /*
6186931ff7SSean Christopherson  * The number of non-reserved physical address bits irrespective of features
6286931ff7SSean Christopherson  * that repurpose legal bits, e.g. MKTME.
6386931ff7SSean Christopherson  */
6486931ff7SSean Christopherson extern u8 __read_mostly shadow_phys_bits;
6586931ff7SSean Christopherson 
6686931ff7SSean Christopherson static inline gfn_t kvm_mmu_max_gfn(void)
6786931ff7SSean Christopherson {
6886931ff7SSean Christopherson 	/*
6986931ff7SSean Christopherson 	 * Note that this uses the host MAXPHYADDR, not the guest's.
7086931ff7SSean Christopherson 	 * EPT/NPT cannot support GPAs that would exceed host.MAXPHYADDR;
7186931ff7SSean Christopherson 	 * assuming KVM is running on bare metal, guest accesses beyond
7286931ff7SSean Christopherson 	 * host.MAXPHYADDR will hit a #PF(RSVD) and never cause a vmexit
7386931ff7SSean Christopherson 	 * (either EPT Violation/Misconfig or #NPF), and so KVM will never
7486931ff7SSean Christopherson 	 * install a SPTE for such addresses.  If KVM is running as a VM
7586931ff7SSean Christopherson 	 * itself, on the other hand, it might see a MAXPHYADDR that is less
7686931ff7SSean Christopherson 	 * than hardware's real MAXPHYADDR.  Using the host MAXPHYADDR
7786931ff7SSean Christopherson 	 * disallows such SPTEs entirely and simplifies the TDP MMU.
7886931ff7SSean Christopherson 	 */
7986931ff7SSean Christopherson 	int max_gpa_bits = likely(tdp_enabled) ? shadow_phys_bits : 52;
8086931ff7SSean Christopherson 
8186931ff7SSean Christopherson 	return (1ULL << (max_gpa_bits - PAGE_SHIFT)) - 1;
8286931ff7SSean Christopherson }
8386931ff7SSean Christopherson 
843c5c3245SKai Huang static inline u8 kvm_get_shadow_phys_bits(void)
853c5c3245SKai Huang {
863c5c3245SKai Huang 	/*
873c5c3245SKai Huang 	 * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected
883c5c3245SKai Huang 	 * in CPU detection code, but the processor treats those reduced bits as
893c5c3245SKai Huang 	 * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at
903c5c3245SKai Huang 	 * the physical address bits reported by CPUID.
913c5c3245SKai Huang 	 */
923c5c3245SKai Huang 	if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008))
933c5c3245SKai Huang 		return cpuid_eax(0x80000008) & 0xff;
943c5c3245SKai Huang 
953c5c3245SKai Huang 	/*
963c5c3245SKai Huang 	 * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with
973c5c3245SKai Huang 	 * custom CPUID.  Proceed with whatever the kernel found since these features
983c5c3245SKai Huang 	 * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008).
993c5c3245SKai Huang 	 */
1003c5c3245SKai Huang 	return boot_cpu_data.x86_phys_bits;
1013c5c3245SKai Huang }
1023c5c3245SKai Huang 
103b628cb52SGerd Hoffmann u8 kvm_mmu_get_max_tdp_level(void);
104b628cb52SGerd Hoffmann 
1058120337aSSean Christopherson void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
106e54f1ff2SKai Huang void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask);
107e7b7bdeaSSean Christopherson void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
108b37fbea6SXiao Guangrong 
109c9060662SSean Christopherson void kvm_init_mmu(struct kvm_vcpu *vcpu);
110dbc4739bSSean Christopherson void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
111dbc4739bSSean Christopherson 			     unsigned long cr4, u64 efer, gpa_t nested_cr3);
112ae1e2d10SPaolo Bonzini void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
113cc022ae1SLai Jiangshan 			     int huge_page_level, bool accessed_dirty,
114cc022ae1SLai Jiangshan 			     gpa_t new_eptp);
1159bc1f09fSWanpeng Li bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
1161261bfa3SWanpeng Li int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
117d0006530SPaolo Bonzini 				u64 fault_address, char *insn, int insn_len);
118cf9f4c0eSSean Christopherson void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
119cf9f4c0eSSean Christopherson 					struct kvm_mmu *mmu);
12094d8b056SMarcelo Tosatti 
12161a1773eSSean Christopherson int kvm_mmu_load(struct kvm_vcpu *vcpu);
12261a1773eSSean Christopherson void kvm_mmu_unload(struct kvm_vcpu *vcpu);
123527d5cd7SSean Christopherson void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu);
12461a1773eSSean Christopherson void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
12561b05a9fSLai Jiangshan void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu);
12693284446SSean Christopherson void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
12793284446SSean Christopherson 			 int bytes);
12861a1773eSSean Christopherson 
129edf88417SAvi Kivity static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
130edf88417SAvi Kivity {
131b9e5603cSPaolo Bonzini 	if (likely(vcpu->arch.mmu->root.hpa != INVALID_PAGE))
132edf88417SAvi Kivity 		return 0;
133edf88417SAvi Kivity 
134edf88417SAvi Kivity 	return kvm_mmu_load(vcpu);
135edf88417SAvi Kivity }
136edf88417SAvi Kivity 
137c9470a2eSJunaid Shahid static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
138c9470a2eSJunaid Shahid {
139c9470a2eSJunaid Shahid 	BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
140c9470a2eSJunaid Shahid 
141607475cfSBinbin Wu 	return kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)
142c9470a2eSJunaid Shahid 	       ? cr3 & X86_CR3_PCID_MASK
143c9470a2eSJunaid Shahid 	       : 0;
144c9470a2eSJunaid Shahid }
145c9470a2eSJunaid Shahid 
146c9470a2eSJunaid Shahid static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
147c9470a2eSJunaid Shahid {
148c9470a2eSJunaid Shahid 	return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
149c9470a2eSJunaid Shahid }
150c9470a2eSJunaid Shahid 
1513098e6ecSRobert Hoo static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu)
1523098e6ecSRobert Hoo {
153183bdd16SBinbin Wu 	if (!guest_can_use(vcpu, X86_FEATURE_LAM))
1543098e6ecSRobert Hoo 		return 0;
1553098e6ecSRobert Hoo 
1563098e6ecSRobert Hoo 	return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
1573098e6ecSRobert Hoo }
1583098e6ecSRobert Hoo 
159689f3bf2SPaolo Bonzini static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
1606e42782fSJunaid Shahid {
161b9e5603cSPaolo Bonzini 	u64 root_hpa = vcpu->arch.mmu->root.hpa;
1622a40b900SSean Christopherson 
1632a40b900SSean Christopherson 	if (!VALID_PAGE(root_hpa))
1642a40b900SSean Christopherson 		return;
1652a40b900SSean Christopherson 
166e83bc09cSSean Christopherson 	static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa,
167a972e29cSPaolo Bonzini 					  vcpu->arch.mmu->root_role.level);
1686e42782fSJunaid Shahid }
1696e42782fSJunaid Shahid 
170cf9f4c0eSSean Christopherson static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
171cf9f4c0eSSean Christopherson 						    struct kvm_mmu *mmu)
172cf9f4c0eSSean Christopherson {
173cf9f4c0eSSean Christopherson 	/*
174cf9f4c0eSSean Christopherson 	 * When EPT is enabled, KVM may passthrough CR0.WP to the guest, i.e.
175cf9f4c0eSSean Christopherson 	 * @mmu's snapshot of CR0.WP and thus all related paging metadata may
176cf9f4c0eSSean Christopherson 	 * be stale.  Refresh CR0.WP and the metadata on-demand when checking
177cf9f4c0eSSean Christopherson 	 * for permission faults.  Exempt nested MMUs, i.e. MMUs for shadowing
178cf9f4c0eSSean Christopherson 	 * nEPT and nNPT, as CR0.WP is ignored in both cases.  Note, KVM does
179cf9f4c0eSSean Christopherson 	 * need to refresh nested_mmu, a.k.a. the walker used to translate L2
180cf9f4c0eSSean Christopherson 	 * GVAs to GPAs, as that "MMU" needs to honor L2's CR0.WP.
181cf9f4c0eSSean Christopherson 	 */
182cf9f4c0eSSean Christopherson 	if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu)
183cf9f4c0eSSean Christopherson 		return;
184cf9f4c0eSSean Christopherson 
185cf9f4c0eSSean Christopherson 	__kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
186cf9f4c0eSSean Christopherson }
187cf9f4c0eSSean Christopherson 
188198c74f4SXiao Guangrong /*
189f13577e8SPaolo Bonzini  * Check if a given access (described through the I/D, W/R and U/S bits of a
190f13577e8SPaolo Bonzini  * page fault error code pfec) causes a permission fault with the given PTE
191f13577e8SPaolo Bonzini  * access rights (in ACC_* format).
192f13577e8SPaolo Bonzini  *
193f13577e8SPaolo Bonzini  * Return zero if the access does not fault; return the page fault error code
194f13577e8SPaolo Bonzini  * if the access faults.
19597d64b78SAvi Kivity  */
196f13577e8SPaolo Bonzini static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
197be94f6b7SHuaitong Han 				  unsigned pte_access, unsigned pte_pkey,
1985b22bbe7SLai Jiangshan 				  u64 access)
199bebb106aSXiao Guangrong {
2005b22bbe7SLai Jiangshan 	/* strip nested paging fault error codes */
2015b22bbe7SLai Jiangshan 	unsigned int pfec = access;
202b3646477SJason Baron 	unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
20397ec8c06SFeng Wu 
20497ec8c06SFeng Wu 	/*
2054f4aa80eSLai Jiangshan 	 * For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1.
2064f4aa80eSLai Jiangshan 	 * For implicit supervisor accesses, SMAP cannot be overridden.
20797ec8c06SFeng Wu 	 *
2084f4aa80eSLai Jiangshan 	 * SMAP works on supervisor accesses only, and not_smap can
2094f4aa80eSLai Jiangshan 	 * be set or not set when user access with neither has any bearing
2104f4aa80eSLai Jiangshan 	 * on the result.
21197ec8c06SFeng Wu 	 *
2124f4aa80eSLai Jiangshan 	 * We put the SMAP checking bit in place of the PFERR_RSVD_MASK bit;
2134f4aa80eSLai Jiangshan 	 * this bit will always be zero in pfec, but it will be one in index
2144f4aa80eSLai Jiangshan 	 * if SMAP checks are being disabled.
21597ec8c06SFeng Wu 	 */
2164f4aa80eSLai Jiangshan 	u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
2174f4aa80eSLai Jiangshan 	bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
21863b6206eSSean Christopherson 	int index = (pfec | (not_smap ? PFERR_RSVD_MASK : 0)) >> 1;
2197a98205dSXiao Guangrong 	u32 errcode = PFERR_PRESENT_MASK;
220cf9f4c0eSSean Christopherson 	bool fault;
221cf9f4c0eSSean Christopherson 
222cf9f4c0eSSean Christopherson 	kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
223cf9f4c0eSSean Christopherson 
224cf9f4c0eSSean Christopherson 	fault = (mmu->permissions[index] >> pte_access) & 1;
22597ec8c06SFeng Wu 
226be94f6b7SHuaitong Han 	WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
227be94f6b7SHuaitong Han 	if (unlikely(mmu->pkru_mask)) {
228be94f6b7SHuaitong Han 		u32 pkru_bits, offset;
229be94f6b7SHuaitong Han 
230be94f6b7SHuaitong Han 		/*
231be94f6b7SHuaitong Han 		* PKRU defines 32 bits, there are 16 domains and 2
232be94f6b7SHuaitong Han 		* attribute bits per domain in pkru.  pte_pkey is the
233be94f6b7SHuaitong Han 		* index of the protection domain, so pte_pkey * 2 is
234be94f6b7SHuaitong Han 		* is the index of the first bit for the domain.
235be94f6b7SHuaitong Han 		*/
236b9dd21e1SPaolo Bonzini 		pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
237be94f6b7SHuaitong Han 
238be94f6b7SHuaitong Han 		/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
23963b6206eSSean Christopherson 		offset = (pfec & ~1) | ((pte_access & PT_USER_MASK) ? PFERR_RSVD_MASK : 0);
240be94f6b7SHuaitong Han 
241be94f6b7SHuaitong Han 		pkru_bits &= mmu->pkru_mask >> offset;
2427a98205dSXiao Guangrong 		errcode |= -pkru_bits & PFERR_PK_MASK;
243be94f6b7SHuaitong Han 		fault |= (pkru_bits != 0);
244be94f6b7SHuaitong Han 	}
245be94f6b7SHuaitong Han 
2467a98205dSXiao Guangrong 	return -(u32)fault & errcode;
247bebb106aSXiao Guangrong }
24897d64b78SAvi Kivity 
249*0a7b7355SSean Christopherson bool kvm_mmu_may_ignore_guest_pat(void);
2501affe455SYan Zhao 
251efdfe536SXiao Guangrong void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
252547ffaedSXiao Guangrong 
2536ca9a6f3SSean Christopherson int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
2541aa9b957SJunaid Shahid 
2551aa9b957SJunaid Shahid int kvm_mmu_post_init_vm(struct kvm *kvm);
2561aa9b957SJunaid Shahid void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
2571aa9b957SJunaid Shahid 
2581e76a3ceSDavid Stevens static inline bool kvm_shadow_root_allocated(struct kvm *kvm)
259e2209710SBen Gardon {
260d501f747SBen Gardon 	/*
2611e76a3ceSDavid Stevens 	 * Read shadow_root_allocated before related pointers. Hence, threads
2621e76a3ceSDavid Stevens 	 * reading shadow_root_allocated in any lock context are guaranteed to
2631e76a3ceSDavid Stevens 	 * see the pointers. Pairs with smp_store_release in
2641e76a3ceSDavid Stevens 	 * mmu_first_shadow_root_alloc.
265d501f747SBen Gardon 	 */
2661e76a3ceSDavid Stevens 	return smp_load_acquire(&kvm->arch.shadow_root_allocated);
2671e76a3ceSDavid Stevens }
2681e76a3ceSDavid Stevens 
2691e76a3ceSDavid Stevens #ifdef CONFIG_X86_64
2701f98f2bdSDavid Matlack extern bool tdp_mmu_enabled;
2711e76a3ceSDavid Stevens #else
2721f98f2bdSDavid Matlack #define tdp_mmu_enabled false
2731e76a3ceSDavid Stevens #endif
2741e76a3ceSDavid Stevens 
2751e76a3ceSDavid Stevens static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
2761e76a3ceSDavid Stevens {
2771f98f2bdSDavid Matlack 	return !tdp_mmu_enabled || kvm_shadow_root_allocated(kvm);
278e2209710SBen Gardon }
279e2209710SBen Gardon 
2804139b197SPeter Xu static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
2814139b197SPeter Xu {
2824139b197SPeter Xu 	/* KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K) must be 0. */
2834139b197SPeter Xu 	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
2844139b197SPeter Xu 		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
2854139b197SPeter Xu }
2864139b197SPeter Xu 
2874139b197SPeter Xu static inline unsigned long
2884139b197SPeter Xu __kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, unsigned long npages,
2894139b197SPeter Xu 		      int level)
2904139b197SPeter Xu {
2914139b197SPeter Xu 	return gfn_to_index(slot->base_gfn + npages - 1,
2924139b197SPeter Xu 			    slot->base_gfn, level) + 1;
2934139b197SPeter Xu }
2944139b197SPeter Xu 
2954139b197SPeter Xu static inline unsigned long
2964139b197SPeter Xu kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level)
2974139b197SPeter Xu {
2984139b197SPeter Xu 	return __kvm_mmu_slot_lpages(slot, slot->npages, level);
2994139b197SPeter Xu }
3004139b197SPeter Xu 
30171f51d2cSMingwei Zhang static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
30271f51d2cSMingwei Zhang {
30371f51d2cSMingwei Zhang 	atomic64_add(count, &kvm->stat.pages[level - 1]);
30471f51d2cSMingwei Zhang }
305c59a0f57SLai Jiangshan 
3065b22bbe7SLai Jiangshan gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
307c59a0f57SLai Jiangshan 			   struct x86_exception *exception);
308c59a0f57SLai Jiangshan 
309c59a0f57SLai Jiangshan static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
310c59a0f57SLai Jiangshan 				      struct kvm_mmu *mmu,
3115b22bbe7SLai Jiangshan 				      gpa_t gpa, u64 access,
312c59a0f57SLai Jiangshan 				      struct x86_exception *exception)
313c59a0f57SLai Jiangshan {
314c59a0f57SLai Jiangshan 	if (mmu != &vcpu->arch.nested_mmu)
315c59a0f57SLai Jiangshan 		return gpa;
316c59a0f57SLai Jiangshan 	return translate_nested_gpa(vcpu, gpa, access, exception);
317c59a0f57SLai Jiangshan }
318edf88417SAvi Kivity #endif
319