xref: /linux/arch/x86/kvm/kvm_cache_regs.h (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 #ifndef ASM_KVM_CACHE_REGS_H
2 #define ASM_KVM_CACHE_REGS_H
3 
4 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
5 #define KVM_POSSIBLE_CR4_GUEST_BITS				  \
6 	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
7 	 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE)
8 
9 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
10 					      enum kvm_reg reg)
11 {
12 	if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
13 		kvm_x86_ops->cache_reg(vcpu, reg);
14 
15 	return vcpu->arch.regs[reg];
16 }
17 
18 static inline void kvm_register_write(struct kvm_vcpu *vcpu,
19 				      enum kvm_reg reg,
20 				      unsigned long val)
21 {
22 	vcpu->arch.regs[reg] = val;
23 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
24 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
25 }
26 
27 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
28 {
29 	return kvm_register_read(vcpu, VCPU_REGS_RIP);
30 }
31 
32 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
33 {
34 	kvm_register_write(vcpu, VCPU_REGS_RIP, val);
35 }
36 
37 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
38 {
39 	might_sleep();  /* on svm */
40 
41 	if (!test_bit(VCPU_EXREG_PDPTR,
42 		      (unsigned long *)&vcpu->arch.regs_avail))
43 		kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
44 
45 	return vcpu->arch.walk_mmu->pdptrs[index];
46 }
47 
48 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
49 {
50 	ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
51 	if (tmask & vcpu->arch.cr0_guest_owned_bits)
52 		kvm_x86_ops->decache_cr0_guest_bits(vcpu);
53 	return vcpu->arch.cr0 & mask;
54 }
55 
56 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
57 {
58 	return kvm_read_cr0_bits(vcpu, ~0UL);
59 }
60 
61 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
62 {
63 	ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
64 	if (tmask & vcpu->arch.cr4_guest_owned_bits)
65 		kvm_x86_ops->decache_cr4_guest_bits(vcpu);
66 	return vcpu->arch.cr4 & mask;
67 }
68 
69 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
70 {
71 	if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
72 		kvm_x86_ops->decache_cr3(vcpu);
73 	return vcpu->arch.cr3;
74 }
75 
76 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
77 {
78 	return kvm_read_cr4_bits(vcpu, ~0UL);
79 }
80 
81 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
82 {
83 	return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
84 		| ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
85 }
86 
87 static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
88 {
89 	return kvm_x86_ops->get_pkru(vcpu);
90 }
91 
92 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
93 {
94 	vcpu->arch.hflags |= HF_GUEST_MASK;
95 }
96 
97 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
98 {
99 	vcpu->arch.hflags &= ~HF_GUEST_MASK;
100 }
101 
102 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
103 {
104 	return vcpu->arch.hflags & HF_GUEST_MASK;
105 }
106 
107 static inline bool is_smm(struct kvm_vcpu *vcpu)
108 {
109 	return vcpu->arch.hflags & HF_SMM_MASK;
110 }
111 
112 #endif
113