1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 3 #ifndef __ARCH_X86_KVM_VMX_ONHYPERV_H__ 4 #define __ARCH_X86_KVM_VMX_ONHYPERV_H__ 5 6 #include <asm/hyperv-tlfs.h> 7 #include <asm/mshyperv.h> 8 9 #include <linux/jump_label.h> 10 11 #include "capabilities.h" 12 #include "hyperv_evmcs.h" 13 #include "vmcs12.h" 14 15 #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs)) 16 17 #if IS_ENABLED(CONFIG_HYPERV) 18 19 DECLARE_STATIC_KEY_FALSE(__kvm_is_using_evmcs); 20 21 static __always_inline bool kvm_is_using_evmcs(void) 22 { 23 return static_branch_unlikely(&__kvm_is_using_evmcs); 24 } 25 26 static __always_inline int get_evmcs_offset(unsigned long field, 27 u16 *clean_field) 28 { 29 int offset = evmcs_field_offset(field, clean_field); 30 31 WARN_ONCE(offset < 0, "accessing unsupported EVMCS field %lx\n", field); 32 return offset; 33 } 34 35 static __always_inline void evmcs_write64(unsigned long field, u64 value) 36 { 37 u16 clean_field; 38 int offset = get_evmcs_offset(field, &clean_field); 39 40 if (offset < 0) 41 return; 42 43 *(u64 *)((char *)current_evmcs + offset) = value; 44 45 current_evmcs->hv_clean_fields &= ~clean_field; 46 } 47 48 static __always_inline void evmcs_write32(unsigned long field, u32 value) 49 { 50 u16 clean_field; 51 int offset = get_evmcs_offset(field, &clean_field); 52 53 if (offset < 0) 54 return; 55 56 *(u32 *)((char *)current_evmcs + offset) = value; 57 current_evmcs->hv_clean_fields &= ~clean_field; 58 } 59 60 static __always_inline void evmcs_write16(unsigned long field, u16 value) 61 { 62 u16 clean_field; 63 int offset = get_evmcs_offset(field, &clean_field); 64 65 if (offset < 0) 66 return; 67 68 *(u16 *)((char *)current_evmcs + offset) = value; 69 current_evmcs->hv_clean_fields &= ~clean_field; 70 } 71 72 static __always_inline u64 evmcs_read64(unsigned long field) 73 { 74 int offset = get_evmcs_offset(field, NULL); 75 76 if (offset < 0) 77 return 0; 78 79 return *(u64 *)((char *)current_evmcs + offset); 80 } 81 82 static __always_inline u32 evmcs_read32(unsigned long field) 83 { 84 int offset = get_evmcs_offset(field, NULL); 85 86 if (offset < 0) 87 return 0; 88 89 return *(u32 *)((char *)current_evmcs + offset); 90 } 91 92 static __always_inline u16 evmcs_read16(unsigned long field) 93 { 94 int offset = get_evmcs_offset(field, NULL); 95 96 if (offset < 0) 97 return 0; 98 99 return *(u16 *)((char *)current_evmcs + offset); 100 } 101 102 static inline void evmcs_load(u64 phys_addr) 103 { 104 struct hv_vp_assist_page *vp_ap = 105 hv_get_vp_assist_page(smp_processor_id()); 106 107 /* 108 * When enabling eVMCS, KVM verifies that every CPU has a valid hv_vp_assist_page() 109 * and aborts enabling the feature otherwise. CPU onlining path is also checked in 110 * vmx_hardware_enable(). 111 */ 112 if (KVM_BUG_ON(!vp_ap, kvm_get_running_vcpu()->kvm)) 113 return; 114 115 if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall) 116 vp_ap->nested_control.features.directhypercall = 1; 117 vp_ap->current_nested_vmcs = phys_addr; 118 vp_ap->enlighten_vmentry = 1; 119 } 120 121 void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf); 122 #else /* !IS_ENABLED(CONFIG_HYPERV) */ 123 static __always_inline bool kvm_is_using_evmcs(void) { return false; } 124 static __always_inline void evmcs_write64(unsigned long field, u64 value) {} 125 static __always_inline void evmcs_write32(unsigned long field, u32 value) {} 126 static __always_inline void evmcs_write16(unsigned long field, u16 value) {} 127 static __always_inline u64 evmcs_read64(unsigned long field) { return 0; } 128 static __always_inline u32 evmcs_read32(unsigned long field) { return 0; } 129 static __always_inline u16 evmcs_read16(unsigned long field) { return 0; } 130 static inline void evmcs_load(u64 phys_addr) {} 131 #endif /* IS_ENABLED(CONFIG_HYPERV) */ 132 133 #endif /* __ARCH_X86_KVM_VMX_ONHYPERV_H__ */ 134