Lines Matching full:vcpu

20 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
22 return vcpu->arch.regs[VCPU_REGS_##uname]; \
24 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
27 vcpu->arch.regs[VCPU_REGS_##uname] = val; \
51 * the vCPU task is in the process of updating the cache. The exception is if in BUILD_KVM_GPR_ACCESSORS()
56 #define kvm_assert_register_caching_allowed(vcpu) \ in BUILD_KVM_GPR_ACCESSORS() argument
57 lockdep_assert_once(in_task() || kvm_arch_pmi_in_guest(vcpu)) in BUILD_KVM_GPR_ACCESSORS()
63 * 1 0 register in vcpu->arch in BUILD_KVM_GPR_ACCESSORS()
64 * 1 1 register in vcpu->arch, needs to be stored back in BUILD_KVM_GPR_ACCESSORS()
66 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, in BUILD_KVM_GPR_ACCESSORS()
69 kvm_assert_register_caching_allowed(vcpu); in BUILD_KVM_GPR_ACCESSORS()
70 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in BUILD_KVM_GPR_ACCESSORS()
73 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu, in kvm_register_is_dirty() argument
76 kvm_assert_register_caching_allowed(vcpu); in kvm_register_is_dirty()
77 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_is_dirty()
80 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu, in kvm_register_mark_available() argument
83 kvm_assert_register_caching_allowed(vcpu); in kvm_register_mark_available()
84 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_mark_available()
87 static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu, in kvm_register_mark_dirty() argument
90 kvm_assert_register_caching_allowed(vcpu); in kvm_register_mark_dirty()
91 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_mark_dirty()
92 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_mark_dirty()
101 static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu, in kvm_register_test_and_mark_available() argument
104 kvm_assert_register_caching_allowed(vcpu); in kvm_register_test_and_mark_available()
105 return arch___test_and_set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_test_and_mark_available()
110 * register are read/written irrespective of current vCPU mode. In other words,
113 static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg) in kvm_register_read_raw() argument
118 if (!kvm_register_is_available(vcpu, reg)) in kvm_register_read_raw()
119 kvm_x86_call(cache_reg)(vcpu, reg); in kvm_register_read_raw()
121 return vcpu->arch.regs[reg]; in kvm_register_read_raw()
124 static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg, in kvm_register_write_raw() argument
130 vcpu->arch.regs[reg] = val; in kvm_register_write_raw()
131 kvm_register_mark_dirty(vcpu, reg); in kvm_register_write_raw()
134 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu) in kvm_rip_read() argument
136 return kvm_register_read_raw(vcpu, VCPU_REGS_RIP); in kvm_rip_read()
139 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val) in kvm_rip_write() argument
141 kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val); in kvm_rip_write()
144 static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu) in kvm_rsp_read() argument
146 return kvm_register_read_raw(vcpu, VCPU_REGS_RSP); in kvm_rsp_read()
149 static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val) in kvm_rsp_write() argument
151 kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val); in kvm_rsp_write()
154 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) in kvm_pdptr_read() argument
158 if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR)) in kvm_pdptr_read()
159 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_PDPTR); in kvm_pdptr_read()
161 return vcpu->arch.walk_mmu->pdptrs[index]; in kvm_pdptr_read()
164 static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value) in kvm_pdptr_write() argument
166 vcpu->arch.walk_mmu->pdptrs[index] = value; in kvm_pdptr_write()
169 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) in kvm_read_cr0_bits() argument
172 if ((tmask & vcpu->arch.cr0_guest_owned_bits) && in kvm_read_cr0_bits()
173 !kvm_register_is_available(vcpu, VCPU_EXREG_CR0)) in kvm_read_cr0_bits()
174 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR0); in kvm_read_cr0_bits()
175 return vcpu->arch.cr0 & mask; in kvm_read_cr0_bits()
178 static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu, in kvm_is_cr0_bit_set() argument
183 return !!kvm_read_cr0_bits(vcpu, cr0_bit); in kvm_is_cr0_bit_set()
186 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu) in kvm_read_cr0() argument
188 return kvm_read_cr0_bits(vcpu, ~0UL); in kvm_read_cr0()
191 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) in kvm_read_cr4_bits() argument
194 if ((tmask & vcpu->arch.cr4_guest_owned_bits) && in kvm_read_cr4_bits()
195 !kvm_register_is_available(vcpu, VCPU_EXREG_CR4)) in kvm_read_cr4_bits()
196 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR4); in kvm_read_cr4_bits()
197 return vcpu->arch.cr4 & mask; in kvm_read_cr4_bits()
200 static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu, in kvm_is_cr4_bit_set() argument
205 return !!kvm_read_cr4_bits(vcpu, cr4_bit); in kvm_is_cr4_bit_set()
208 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) in kvm_read_cr3() argument
210 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) in kvm_read_cr3()
211 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR3); in kvm_read_cr3()
212 return vcpu->arch.cr3; in kvm_read_cr3()
215 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu) in kvm_read_cr4() argument
217 return kvm_read_cr4_bits(vcpu, ~0UL); in kvm_read_cr4()
220 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu) in kvm_read_edx_eax() argument
222 return (kvm_rax_read(vcpu) & -1u) in kvm_read_edx_eax()
223 | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32); in kvm_read_edx_eax()
226 static inline void enter_guest_mode(struct kvm_vcpu *vcpu) in enter_guest_mode() argument
228 vcpu->arch.hflags |= HF_GUEST_MASK; in enter_guest_mode()
229 vcpu->stat.guest_mode = 1; in enter_guest_mode()
232 static inline void leave_guest_mode(struct kvm_vcpu *vcpu) in leave_guest_mode() argument
234 vcpu->arch.hflags &= ~HF_GUEST_MASK; in leave_guest_mode()
236 if (vcpu->arch.load_eoi_exitmap_pending) { in leave_guest_mode()
237 vcpu->arch.load_eoi_exitmap_pending = false; in leave_guest_mode()
238 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); in leave_guest_mode()
241 vcpu->stat.guest_mode = 0; in leave_guest_mode()
244 static inline bool is_guest_mode(struct kvm_vcpu *vcpu) in is_guest_mode() argument
246 return vcpu->arch.hflags & HF_GUEST_MASK; in is_guest_mode()