1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. 4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 5 * 6 * KVM Xen emulation 7 */ 8 9 #ifndef __ARCH_X86_KVM_XEN_H__ 10 #define __ARCH_X86_KVM_XEN_H__ 11 12 #ifdef CONFIG_KVM_XEN 13 #include <linux/jump_label_ratelimit.h> 14 15 extern struct static_key_false_deferred kvm_xen_enabled; 16 17 int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu); 18 void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu); 19 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); 20 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); 21 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); 22 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); 23 int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *evt); 24 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data); 25 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc); 26 void kvm_xen_init_vm(struct kvm *kvm); 27 void kvm_xen_destroy_vm(struct kvm *kvm); 28 void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu); 29 void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu); 30 int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, 31 struct kvm *kvm); 32 int kvm_xen_setup_evtchn(struct kvm *kvm, 33 struct kvm_kernel_irq_routing_entry *e, 34 const struct kvm_irq_routing_entry *ue); 35 36 static inline bool kvm_xen_msr_enabled(struct kvm *kvm) 37 { 38 return static_branch_unlikely(&kvm_xen_enabled.key) && 39 kvm->arch.xen_hvm_config.msr; 40 } 41 42 static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) 43 { 44 return static_branch_unlikely(&kvm_xen_enabled.key) && 45 (kvm->arch.xen_hvm_config.flags & 46 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL); 47 } 48 49 static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) 50 { 51 if (static_branch_unlikely(&kvm_xen_enabled.key) && 52 vcpu->arch.xen.vcpu_info_cache.active && 53 vcpu->kvm->arch.xen.upcall_vector) 54 return __kvm_xen_has_interrupt(vcpu); 55 56 return 0; 57 } 58 59 static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu) 60 { 61 return static_branch_unlikely(&kvm_xen_enabled.key) && 62 vcpu->arch.xen.evtchn_pending_sel; 63 } 64 65 static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu) 66 { 67 return !!vcpu->arch.xen.timer_virq; 68 } 69 70 static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu) 71 { 72 if (kvm_xen_hypercall_enabled(vcpu->kvm) && kvm_xen_timer_enabled(vcpu)) 73 return atomic_read(&vcpu->arch.xen.timer_pending); 74 75 return 0; 76 } 77 78 void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu); 79 #else 80 static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) 81 { 82 return 1; 83 } 84 85 static inline void kvm_xen_init_vm(struct kvm *kvm) 86 { 87 } 88 89 static inline void kvm_xen_destroy_vm(struct kvm *kvm) 90 { 91 } 92 93 static inline void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu) 94 { 95 } 96 97 static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) 98 { 99 } 100 101 static inline bool kvm_xen_msr_enabled(struct kvm *kvm) 102 { 103 return false; 104 } 105 106 static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) 107 { 108 return false; 109 } 110 111 static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) 112 { 113 return 0; 114 } 115 116 static inline void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu) 117 { 118 } 119 120 static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu) 121 { 122 return false; 123 } 124 125 static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu) 126 { 127 return 0; 128 } 129 130 static inline void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu) 131 { 132 } 133 134 static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu) 135 { 136 return false; 137 } 138 #endif 139 140 int kvm_xen_hypercall(struct kvm_vcpu *vcpu); 141 142 #include <asm/pvclock-abi.h> 143 #include <asm/xen/interface.h> 144 #include <xen/interface/vcpu.h> 145 146 void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state); 147 148 static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu) 149 { 150 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running); 151 } 152 153 static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu) 154 { 155 /* 156 * If the vCPU wasn't preempted but took a normal exit for 157 * some reason (hypercalls, I/O, etc.), that is accounted as 158 * still RUNSTATE_running, as the VMM is still operating on 159 * behalf of the vCPU. Only if the VMM does actually block 160 * does it need to enter RUNSTATE_blocked. 161 */ 162 if (WARN_ON_ONCE(!vcpu->preempted)) 163 return; 164 165 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable); 166 } 167 168 /* 32-bit compatibility definitions, also used natively in 32-bit build */ 169 struct compat_arch_vcpu_info { 170 unsigned int cr2; 171 unsigned int pad[5]; 172 }; 173 174 struct compat_vcpu_info { 175 uint8_t evtchn_upcall_pending; 176 uint8_t evtchn_upcall_mask; 177 uint16_t pad; 178 uint32_t evtchn_pending_sel; 179 struct compat_arch_vcpu_info arch; 180 struct pvclock_vcpu_time_info time; 181 }; /* 64 bytes (x86) */ 182 183 struct compat_arch_shared_info { 184 unsigned int max_pfn; 185 unsigned int pfn_to_mfn_frame_list_list; 186 unsigned int nmi_reason; 187 unsigned int p2m_cr3; 188 unsigned int p2m_vaddr; 189 unsigned int p2m_generation; 190 uint32_t wc_sec_hi; 191 }; 192 193 struct compat_shared_info { 194 struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS]; 195 uint32_t evtchn_pending[32]; 196 uint32_t evtchn_mask[32]; 197 struct pvclock_wall_clock wc; 198 struct compat_arch_shared_info arch; 199 }; 200 201 #define COMPAT_EVTCHN_2L_NR_CHANNELS (8 * \ 202 sizeof_field(struct compat_shared_info, \ 203 evtchn_pending)) 204 struct compat_vcpu_runstate_info { 205 int state; 206 uint64_t state_entry_time; 207 uint64_t time[4]; 208 } __attribute__((packed)); 209 210 #endif /* __ARCH_X86_KVM_XEN_H__ */ 211