xref: /linux/arch/x86/kvm/kvm_cache_regs.h (revision 0d3b051adbb72ed81956447d0d1e54d5943ee6f5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ASM_KVM_CACHE_REGS_H
3 #define ASM_KVM_CACHE_REGS_H
4 
5 #include <linux/kvm_host.h>
6 
7 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
8 #define KVM_POSSIBLE_CR4_GUEST_BITS				  \
9 	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
10 	 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
11 
12 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
13 					     enum kvm_reg reg)
14 {
15 	return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
16 }
17 
18 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
19 					 enum kvm_reg reg)
20 {
21 	return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
22 }
23 
24 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
25 					       enum kvm_reg reg)
26 {
27 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
28 }
29 
30 static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
31 					   enum kvm_reg reg)
32 {
33 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
34 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
35 }
36 
37 #define BUILD_KVM_GPR_ACCESSORS(lname, uname)				      \
38 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
39 {									      \
40 	return vcpu->arch.regs[VCPU_REGS_##uname];			      \
41 }									      \
42 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu,	      \
43 						unsigned long val)	      \
44 {									      \
45 	vcpu->arch.regs[VCPU_REGS_##uname] = val;			      \
46 	kvm_register_mark_dirty(vcpu, VCPU_REGS_##uname);		      \
47 }
48 BUILD_KVM_GPR_ACCESSORS(rax, RAX)
49 BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
50 BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
51 BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
52 BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
53 BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
54 BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
55 #ifdef CONFIG_X86_64
56 BUILD_KVM_GPR_ACCESSORS(r8,  R8)
57 BUILD_KVM_GPR_ACCESSORS(r9,  R9)
58 BUILD_KVM_GPR_ACCESSORS(r10, R10)
59 BUILD_KVM_GPR_ACCESSORS(r11, R11)
60 BUILD_KVM_GPR_ACCESSORS(r12, R12)
61 BUILD_KVM_GPR_ACCESSORS(r13, R13)
62 BUILD_KVM_GPR_ACCESSORS(r14, R14)
63 BUILD_KVM_GPR_ACCESSORS(r15, R15)
64 #endif
65 
66 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
67 {
68 	if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
69 		return 0;
70 
71 	if (!kvm_register_is_available(vcpu, reg))
72 		kvm_x86_ops.cache_reg(vcpu, reg);
73 
74 	return vcpu->arch.regs[reg];
75 }
76 
77 static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg,
78 				      unsigned long val)
79 {
80 	if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
81 		return;
82 
83 	vcpu->arch.regs[reg] = val;
84 	kvm_register_mark_dirty(vcpu, reg);
85 }
86 
87 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
88 {
89 	return kvm_register_read(vcpu, VCPU_REGS_RIP);
90 }
91 
92 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
93 {
94 	kvm_register_write(vcpu, VCPU_REGS_RIP, val);
95 }
96 
97 static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
98 {
99 	return kvm_register_read(vcpu, VCPU_REGS_RSP);
100 }
101 
102 static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
103 {
104 	kvm_register_write(vcpu, VCPU_REGS_RSP, val);
105 }
106 
107 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
108 {
109 	might_sleep();  /* on svm */
110 
111 	if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
112 		kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR);
113 
114 	return vcpu->arch.walk_mmu->pdptrs[index];
115 }
116 
117 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
118 {
119 	ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
120 	if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
121 	    !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
122 		kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0);
123 	return vcpu->arch.cr0 & mask;
124 }
125 
126 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
127 {
128 	return kvm_read_cr0_bits(vcpu, ~0UL);
129 }
130 
131 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
132 {
133 	ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
134 	if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
135 	    !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
136 		kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4);
137 	return vcpu->arch.cr4 & mask;
138 }
139 
140 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
141 {
142 	if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
143 		kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3);
144 	return vcpu->arch.cr3;
145 }
146 
147 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
148 {
149 	return kvm_read_cr4_bits(vcpu, ~0UL);
150 }
151 
152 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
153 {
154 	return (kvm_rax_read(vcpu) & -1u)
155 		| ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
156 }
157 
158 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
159 {
160 	vcpu->arch.hflags |= HF_GUEST_MASK;
161 }
162 
163 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
164 {
165 	vcpu->arch.hflags &= ~HF_GUEST_MASK;
166 
167 	if (vcpu->arch.load_eoi_exitmap_pending) {
168 		vcpu->arch.load_eoi_exitmap_pending = false;
169 		kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
170 	}
171 }
172 
173 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
174 {
175 	return vcpu->arch.hflags & HF_GUEST_MASK;
176 }
177 
178 static inline bool is_smm(struct kvm_vcpu *vcpu)
179 {
180 	return vcpu->arch.hflags & HF_SMM_MASK;
181 }
182 
183 #endif
184