1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_MSHYPER_H 3 #define _ASM_X86_MSHYPER_H 4 5 #include <linux/types.h> 6 #include <linux/nmi.h> 7 #include <linux/msi.h> 8 #include <linux/io.h> 9 #include <linux/static_call.h> 10 #include <asm/nospec-branch.h> 11 #include <asm/msr.h> 12 #include <hyperv/hvhdk.h> 13 #include <asm/fpu/types.h> 14 15 /* 16 * Hyper-V always provides a single IO-APIC at this MMIO address. 17 * Ideally, the value should be looked up in ACPI tables, but it 18 * is needed for mapping the IO-APIC early in boot on Confidential 19 * VMs, before ACPI functions can be used. 20 */ 21 #define HV_IOAPIC_BASE_ADDRESS 0xfec00000 22 23 #define HV_VTL_NORMAL 0x0 24 #define HV_VTL_SECURE 0x1 25 #define HV_VTL_MGMT 0x2 26 27 union hv_ghcb; 28 29 DECLARE_STATIC_KEY_FALSE(isolation_type_snp); 30 DECLARE_STATIC_KEY_FALSE(isolation_type_tdx); 31 32 typedef int (*hyperv_fill_flush_list_func)( 33 struct hv_guest_mapping_flush_list *flush, 34 void *data); 35 36 void hyperv_vector_handler(struct pt_regs *regs); 37 38 static inline unsigned char hv_get_nmi_reason(void) 39 { 40 return 0; 41 } 42 43 extern u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2); 44 extern u64 hv_snp_hypercall(u64 control, u64 param1, u64 param2); 45 extern u64 hv_std_hypercall(u64 control, u64 param1, u64 param2); 46 47 #if IS_ENABLED(CONFIG_HYPERV) 48 extern void *hv_hypercall_pg; 49 50 extern union hv_ghcb * __percpu *hv_ghcb_pg; 51 52 bool hv_isolation_type_snp(void); 53 bool hv_isolation_type_tdx(void); 54 55 #ifdef CONFIG_X86_64 56 DECLARE_STATIC_CALL(hv_hypercall, hv_std_hypercall); 57 #endif 58 59 /* 60 * DEFAULT INIT GPAT and SEGMENT LIMIT value in struct VMSA 61 * to start AP in enlightened SEV guest. 62 */ 63 #define HV_AP_INIT_GPAT_DEFAULT 0x0007040600070406ULL 64 #define HV_AP_SEGMENT_LIMIT 0xffffffff 65 66 /* 67 * If the hypercall involves no input or output parameters, the hypervisor 68 * ignores the corresponding GPA pointer. 69 */ 70 static inline u64 hv_do_hypercall(u64 control, void *input, void *output) 71 { 72 u64 input_address = input ? virt_to_phys(input) : 0; 73 u64 output_address = output ? virt_to_phys(output) : 0; 74 75 #ifdef CONFIG_X86_64 76 return static_call_mod(hv_hypercall)(control, input_address, output_address); 77 #else 78 u32 input_address_hi = upper_32_bits(input_address); 79 u32 input_address_lo = lower_32_bits(input_address); 80 u32 output_address_hi = upper_32_bits(output_address); 81 u32 output_address_lo = lower_32_bits(output_address); 82 u64 hv_status; 83 84 if (!hv_hypercall_pg) 85 return U64_MAX; 86 87 __asm__ __volatile__(CALL_NOSPEC 88 : "=A" (hv_status), 89 "+c" (input_address_lo), ASM_CALL_CONSTRAINT 90 : "A" (control), 91 "b" (input_address_hi), 92 "D"(output_address_hi), "S"(output_address_lo), 93 THUNK_TARGET(hv_hypercall_pg) 94 : "cc", "memory"); 95 return hv_status; 96 #endif /* !x86_64 */ 97 } 98 99 /* Fast hypercall with 8 bytes of input and no output */ 100 static inline u64 _hv_do_fast_hypercall8(u64 control, u64 input1) 101 { 102 #ifdef CONFIG_X86_64 103 return static_call_mod(hv_hypercall)(control, input1, 0); 104 #else 105 u32 input1_hi = upper_32_bits(input1); 106 u32 input1_lo = lower_32_bits(input1); 107 u64 hv_status; 108 109 __asm__ __volatile__ (CALL_NOSPEC 110 : "=A"(hv_status), 111 "+c"(input1_lo), 112 ASM_CALL_CONSTRAINT 113 : "A" (control), 114 "b" (input1_hi), 115 THUNK_TARGET(hv_hypercall_pg) 116 : "cc", "edi", "esi"); 117 return hv_status; 118 #endif 119 } 120 121 static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) 122 { 123 u64 control = (u64)code | HV_HYPERCALL_FAST_BIT; 124 125 return _hv_do_fast_hypercall8(control, input1); 126 } 127 128 /* Fast hypercall with 16 bytes of input */ 129 static inline u64 _hv_do_fast_hypercall16(u64 control, u64 input1, u64 input2) 130 { 131 #ifdef CONFIG_X86_64 132 return static_call_mod(hv_hypercall)(control, input1, input2); 133 #else 134 u32 input1_hi = upper_32_bits(input1); 135 u32 input1_lo = lower_32_bits(input1); 136 u32 input2_hi = upper_32_bits(input2); 137 u32 input2_lo = lower_32_bits(input2); 138 u64 hv_status; 139 140 __asm__ __volatile__ (CALL_NOSPEC 141 : "=A"(hv_status), 142 "+c"(input1_lo), ASM_CALL_CONSTRAINT 143 : "A" (control), "b" (input1_hi), 144 "D"(input2_hi), "S"(input2_lo), 145 THUNK_TARGET(hv_hypercall_pg) 146 : "cc"); 147 return hv_status; 148 #endif 149 } 150 151 static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2) 152 { 153 u64 control = (u64)code | HV_HYPERCALL_FAST_BIT; 154 155 return _hv_do_fast_hypercall16(control, input1, input2); 156 } 157 158 extern struct hv_vp_assist_page **hv_vp_assist_page; 159 160 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) 161 { 162 if (!hv_vp_assist_page) 163 return NULL; 164 165 return hv_vp_assist_page[cpu]; 166 } 167 168 void __init hyperv_init(void); 169 void hyperv_setup_mmu_ops(void); 170 void set_hv_tscchange_cb(void (*cb)(void)); 171 void clear_hv_tscchange_cb(void); 172 void hyperv_stop_tsc_emulation(void); 173 int hyperv_flush_guest_mapping(u64 as); 174 int hyperv_flush_guest_mapping_range(u64 as, 175 hyperv_fill_flush_list_func fill_func, void *data); 176 int hyperv_fill_flush_guest_mapping_list( 177 struct hv_guest_mapping_flush_list *flush, 178 u64 start_gfn, u64 end_gfn); 179 void hv_sleep_notifiers_register(void); 180 void hv_machine_power_off(void); 181 182 #ifdef CONFIG_X86_64 183 void hv_apic_init(void); 184 void __init hv_init_spinlocks(void); 185 bool hv_vcpu_is_preempted(int vcpu); 186 #else 187 static inline void hv_apic_init(void) {} 188 #endif 189 190 struct irq_domain *hv_create_pci_msi_domain(void); 191 192 int hv_map_msi_interrupt(struct irq_data *data, 193 struct hv_interrupt_entry *out_entry); 194 int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector, 195 struct hv_interrupt_entry *entry); 196 int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry); 197 198 #ifdef CONFIG_AMD_MEM_ENCRYPT 199 bool hv_ghcb_negotiate_protocol(void); 200 void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason); 201 int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, unsigned int cpu); 202 #else 203 static inline bool hv_ghcb_negotiate_protocol(void) { return false; } 204 static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {} 205 static inline int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, 206 unsigned int cpu) { return 0; } 207 #endif 208 209 #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) 210 void hv_vtom_init(void); 211 void hv_ivm_msr_write(u64 msr, u64 value); 212 void hv_ivm_msr_read(u64 msr, u64 *value); 213 #else 214 static inline void hv_vtom_init(void) {} 215 static inline void hv_ivm_msr_write(u64 msr, u64 value) {} 216 static inline void hv_ivm_msr_read(u64 msr, u64 *value) {} 217 #endif 218 219 static inline bool hv_is_synic_msr(unsigned int reg) 220 { 221 return (reg >= HV_X64_MSR_SCONTROL) && 222 (reg <= HV_X64_MSR_SINT15); 223 } 224 225 static inline bool hv_is_sint_msr(unsigned int reg) 226 { 227 return (reg >= HV_X64_MSR_SINT0) && 228 (reg <= HV_X64_MSR_SINT15); 229 } 230 231 u64 hv_get_msr(unsigned int reg); 232 void hv_set_msr(unsigned int reg, u64 value); 233 u64 hv_get_non_nested_msr(unsigned int reg); 234 void hv_set_non_nested_msr(unsigned int reg, u64 value); 235 236 static __always_inline u64 hv_raw_get_msr(unsigned int reg) 237 { 238 return native_rdmsrq(reg); 239 } 240 int hv_apicid_to_vp_index(u32 apic_id); 241 242 #if IS_ENABLED(CONFIG_MSHV_ROOT) && IS_ENABLED(CONFIG_CRASH_DUMP) 243 void hv_root_crash_init(void); 244 void hv_crash_asm32(void); 245 void hv_crash_asm64(void); 246 void hv_crash_asm_end(void); 247 #else /* CONFIG_MSHV_ROOT && CONFIG_CRASH_DUMP */ 248 static inline void hv_root_crash_init(void) {} 249 #endif /* CONFIG_MSHV_ROOT && CONFIG_CRASH_DUMP */ 250 251 #else /* CONFIG_HYPERV */ 252 static inline void hyperv_init(void) {} 253 static inline void hyperv_setup_mmu_ops(void) {} 254 static inline void set_hv_tscchange_cb(void (*cb)(void)) {} 255 static inline void clear_hv_tscchange_cb(void) {} 256 static inline void hyperv_stop_tsc_emulation(void) {}; 257 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) 258 { 259 return NULL; 260 } 261 static inline int hyperv_flush_guest_mapping(u64 as) { return -1; } 262 static inline int hyperv_flush_guest_mapping_range(u64 as, 263 hyperv_fill_flush_list_func fill_func, void *data) 264 { 265 return -1; 266 } 267 static inline void hv_set_msr(unsigned int reg, u64 value) { } 268 static inline u64 hv_get_msr(unsigned int reg) { return 0; } 269 static inline void hv_set_non_nested_msr(unsigned int reg, u64 value) { } 270 static inline u64 hv_get_non_nested_msr(unsigned int reg) { return 0; } 271 static inline int hv_apicid_to_vp_index(u32 apic_id) { return -EINVAL; } 272 #endif /* CONFIG_HYPERV */ 273 274 struct mshv_vtl_cpu_context { 275 union { 276 struct { 277 u64 rax; 278 u64 rcx; 279 u64 rdx; 280 u64 rbx; 281 u64 cr2; 282 u64 rbp; 283 u64 rsi; 284 u64 rdi; 285 u64 r8; 286 u64 r9; 287 u64 r10; 288 u64 r11; 289 u64 r12; 290 u64 r13; 291 u64 r14; 292 u64 r15; 293 }; 294 u64 gp_regs[16]; 295 }; 296 297 struct fxregs_state fx_state; 298 }; 299 300 #ifdef CONFIG_HYPERV_VTL_MODE 301 void __init hv_vtl_init_platform(void); 302 int __init hv_vtl_early_init(void); 303 void mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0); 304 void mshv_vtl_return_call_init(u64 vtl_return_offset); 305 void mshv_vtl_return_hypercall(void); 306 void __mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0); 307 #else 308 static inline void __init hv_vtl_init_platform(void) {} 309 static inline int __init hv_vtl_early_init(void) { return 0; } 310 static inline void mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0) {} 311 static inline void mshv_vtl_return_call_init(u64 vtl_return_offset) {} 312 static inline void mshv_vtl_return_hypercall(void) {} 313 static inline void __mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0) {} 314 #endif 315 316 #include <asm-generic/mshyperv.h> 317 318 #endif 319