xref: /linux/arch/x86/kvm/kvm_cache_regs.h (revision d003d772e64df08af04ee63609d47169ee82ae0e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ASM_KVM_CACHE_REGS_H
3 #define ASM_KVM_CACHE_REGS_H
4 
5 #include <linux/kvm_host.h>
6 
7 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
8 #define KVM_POSSIBLE_CR4_GUEST_BITS				  \
9 	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
10 	 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
11 
12 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
13 					      enum kvm_reg reg)
14 {
15 	if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
16 		kvm_x86_ops->cache_reg(vcpu, reg);
17 
18 	return vcpu->arch.regs[reg];
19 }
20 
21 static inline void kvm_register_write(struct kvm_vcpu *vcpu,
22 				      enum kvm_reg reg,
23 				      unsigned long val)
24 {
25 	vcpu->arch.regs[reg] = val;
26 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
27 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
28 }
29 
30 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
31 {
32 	return kvm_register_read(vcpu, VCPU_REGS_RIP);
33 }
34 
35 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
36 {
37 	kvm_register_write(vcpu, VCPU_REGS_RIP, val);
38 }
39 
40 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
41 {
42 	might_sleep();  /* on svm */
43 
44 	if (!test_bit(VCPU_EXREG_PDPTR,
45 		      (unsigned long *)&vcpu->arch.regs_avail))
46 		kvm_x86_ops->cache_reg(vcpu, (enum kvm_reg)VCPU_EXREG_PDPTR);
47 
48 	return vcpu->arch.walk_mmu->pdptrs[index];
49 }
50 
51 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
52 {
53 	ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
54 	if (tmask & vcpu->arch.cr0_guest_owned_bits)
55 		kvm_x86_ops->decache_cr0_guest_bits(vcpu);
56 	return vcpu->arch.cr0 & mask;
57 }
58 
59 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
60 {
61 	return kvm_read_cr0_bits(vcpu, ~0UL);
62 }
63 
64 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
65 {
66 	ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
67 	if (tmask & vcpu->arch.cr4_guest_owned_bits)
68 		kvm_x86_ops->decache_cr4_guest_bits(vcpu);
69 	return vcpu->arch.cr4 & mask;
70 }
71 
72 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
73 {
74 	if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
75 		kvm_x86_ops->decache_cr3(vcpu);
76 	return vcpu->arch.cr3;
77 }
78 
79 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
80 {
81 	return kvm_read_cr4_bits(vcpu, ~0UL);
82 }
83 
84 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
85 {
86 	return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
87 		| ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
88 }
89 
90 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
91 {
92 	vcpu->arch.hflags |= HF_GUEST_MASK;
93 }
94 
95 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
96 {
97 	vcpu->arch.hflags &= ~HF_GUEST_MASK;
98 
99 	if (vcpu->arch.load_eoi_exitmap_pending) {
100 		vcpu->arch.load_eoi_exitmap_pending = false;
101 		kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
102 	}
103 }
104 
105 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
106 {
107 	return vcpu->arch.hflags & HF_GUEST_MASK;
108 }
109 
110 static inline bool is_smm(struct kvm_vcpu *vcpu)
111 {
112 	return vcpu->arch.hflags & HF_SMM_MASK;
113 }
114 
115 #endif
116