1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. 4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 5 * 6 * KVM Xen emulation 7 */ 8 9 #ifndef __ARCH_X86_KVM_XEN_H__ 10 #define __ARCH_X86_KVM_XEN_H__ 11 12 #ifdef CONFIG_KVM_XEN 13 #include <linux/jump_label_ratelimit.h> 14 15 extern struct static_key_false_deferred kvm_xen_enabled; 16 17 int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu); 18 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); 19 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); 20 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); 21 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); 22 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data); 23 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc); 24 void kvm_xen_init_vm(struct kvm *kvm); 25 void kvm_xen_destroy_vm(struct kvm *kvm); 26 27 static inline bool kvm_xen_msr_enabled(struct kvm *kvm) 28 { 29 return static_branch_unlikely(&kvm_xen_enabled.key) && 30 kvm->arch.xen_hvm_config.msr; 31 } 32 33 static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) 34 { 35 return static_branch_unlikely(&kvm_xen_enabled.key) && 36 (kvm->arch.xen_hvm_config.flags & 37 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL); 38 } 39 40 static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) 41 { 42 if (static_branch_unlikely(&kvm_xen_enabled.key) && 43 vcpu->arch.xen.vcpu_info_set && vcpu->kvm->arch.xen.upcall_vector) 44 return __kvm_xen_has_interrupt(vcpu); 45 46 return 0; 47 } 48 #else 49 static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) 50 { 51 return 1; 52 } 53 54 static inline void kvm_xen_init_vm(struct kvm *kvm) 55 { 56 } 57 58 static inline void kvm_xen_destroy_vm(struct kvm *kvm) 59 { 60 } 61 62 static inline bool kvm_xen_msr_enabled(struct kvm *kvm) 63 { 64 return false; 65 } 66 67 static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) 68 { 69 return false; 70 } 71 72 static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) 73 { 74 return 0; 75 } 76 #endif 77 78 int kvm_xen_hypercall(struct kvm_vcpu *vcpu); 79 80 #include <asm/pvclock-abi.h> 81 #include <asm/xen/interface.h> 82 #include <xen/interface/vcpu.h> 83 84 void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state); 85 86 static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu) 87 { 88 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running); 89 } 90 91 static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu) 92 { 93 /* 94 * If the vCPU wasn't preempted but took a normal exit for 95 * some reason (hypercalls, I/O, etc.), that is accounted as 96 * still RUNSTATE_running, as the VMM is still operating on 97 * behalf of the vCPU. Only if the VMM does actually block 98 * does it need to enter RUNSTATE_blocked. 99 */ 100 if (vcpu->preempted) 101 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable); 102 } 103 104 /* 32-bit compatibility definitions, also used natively in 32-bit build */ 105 struct compat_arch_vcpu_info { 106 unsigned int cr2; 107 unsigned int pad[5]; 108 }; 109 110 struct compat_vcpu_info { 111 uint8_t evtchn_upcall_pending; 112 uint8_t evtchn_upcall_mask; 113 uint16_t pad; 114 uint32_t evtchn_pending_sel; 115 struct compat_arch_vcpu_info arch; 116 struct pvclock_vcpu_time_info time; 117 }; /* 64 bytes (x86) */ 118 119 struct compat_arch_shared_info { 120 unsigned int max_pfn; 121 unsigned int pfn_to_mfn_frame_list_list; 122 unsigned int nmi_reason; 123 unsigned int p2m_cr3; 124 unsigned int p2m_vaddr; 125 unsigned int p2m_generation; 126 uint32_t wc_sec_hi; 127 }; 128 129 struct compat_shared_info { 130 struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS]; 131 uint32_t evtchn_pending[32]; 132 uint32_t evtchn_mask[32]; 133 struct pvclock_wall_clock wc; 134 struct compat_arch_shared_info arch; 135 }; 136 137 struct compat_vcpu_runstate_info { 138 int state; 139 uint64_t state_entry_time; 140 uint64_t time[4]; 141 } __attribute__((packed)); 142 143 #endif /* __ARCH_X86_KVM_XEN_H__ */ 144