1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_MSHYPER_H 3 #define _ASM_X86_MSHYPER_H 4 5 #include <linux/types.h> 6 #include <linux/nmi.h> 7 #include <linux/msi.h> 8 #include <linux/io.h> 9 #include <linux/static_call.h> 10 #include <asm/nospec-branch.h> 11 #include <asm/paravirt.h> 12 #include <asm/msr.h> 13 #include <hyperv/hvhdk.h> 14 #include <asm/fpu/types.h> 15 16 /* 17 * Hyper-V always provides a single IO-APIC at this MMIO address. 18 * Ideally, the value should be looked up in ACPI tables, but it 19 * is needed for mapping the IO-APIC early in boot on Confidential 20 * VMs, before ACPI functions can be used. 21 */ 22 #define HV_IOAPIC_BASE_ADDRESS 0xfec00000 23 24 #define HV_VTL_NORMAL 0x0 25 #define HV_VTL_SECURE 0x1 26 #define HV_VTL_MGMT 0x2 27 28 union hv_ghcb; 29 30 DECLARE_STATIC_KEY_FALSE(isolation_type_snp); 31 DECLARE_STATIC_KEY_FALSE(isolation_type_tdx); 32 33 typedef int (*hyperv_fill_flush_list_func)( 34 struct hv_guest_mapping_flush_list *flush, 35 void *data); 36 37 void hyperv_vector_handler(struct pt_regs *regs); 38 39 static inline unsigned char hv_get_nmi_reason(void) 40 { 41 return 0; 42 } 43 44 extern u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2); 45 extern u64 hv_snp_hypercall(u64 control, u64 param1, u64 param2); 46 extern u64 hv_std_hypercall(u64 control, u64 param1, u64 param2); 47 48 #if IS_ENABLED(CONFIG_HYPERV) 49 extern void *hv_hypercall_pg; 50 51 extern union hv_ghcb * __percpu *hv_ghcb_pg; 52 53 bool hv_isolation_type_snp(void); 54 bool hv_isolation_type_tdx(void); 55 56 #ifdef CONFIG_X86_64 57 DECLARE_STATIC_CALL(hv_hypercall, hv_std_hypercall); 58 #endif 59 60 /* 61 * DEFAULT INIT GPAT and SEGMENT LIMIT value in struct VMSA 62 * to start AP in enlightened SEV guest. 63 */ 64 #define HV_AP_INIT_GPAT_DEFAULT 0x0007040600070406ULL 65 #define HV_AP_SEGMENT_LIMIT 0xffffffff 66 67 /* 68 * If the hypercall involves no input or output parameters, the hypervisor 69 * ignores the corresponding GPA pointer. 70 */ 71 static inline u64 hv_do_hypercall(u64 control, void *input, void *output) 72 { 73 u64 input_address = input ? virt_to_phys(input) : 0; 74 u64 output_address = output ? virt_to_phys(output) : 0; 75 76 #ifdef CONFIG_X86_64 77 return static_call_mod(hv_hypercall)(control, input_address, output_address); 78 #else 79 u32 input_address_hi = upper_32_bits(input_address); 80 u32 input_address_lo = lower_32_bits(input_address); 81 u32 output_address_hi = upper_32_bits(output_address); 82 u32 output_address_lo = lower_32_bits(output_address); 83 u64 hv_status; 84 85 if (!hv_hypercall_pg) 86 return U64_MAX; 87 88 __asm__ __volatile__(CALL_NOSPEC 89 : "=A" (hv_status), 90 "+c" (input_address_lo), ASM_CALL_CONSTRAINT 91 : "A" (control), 92 "b" (input_address_hi), 93 "D"(output_address_hi), "S"(output_address_lo), 94 THUNK_TARGET(hv_hypercall_pg) 95 : "cc", "memory"); 96 return hv_status; 97 #endif /* !x86_64 */ 98 } 99 100 /* Fast hypercall with 8 bytes of input and no output */ 101 static inline u64 _hv_do_fast_hypercall8(u64 control, u64 input1) 102 { 103 #ifdef CONFIG_X86_64 104 return static_call_mod(hv_hypercall)(control, input1, 0); 105 #else 106 u32 input1_hi = upper_32_bits(input1); 107 u32 input1_lo = lower_32_bits(input1); 108 u64 hv_status; 109 110 __asm__ __volatile__ (CALL_NOSPEC 111 : "=A"(hv_status), 112 "+c"(input1_lo), 113 ASM_CALL_CONSTRAINT 114 : "A" (control), 115 "b" (input1_hi), 116 THUNK_TARGET(hv_hypercall_pg) 117 : "cc", "edi", "esi"); 118 return hv_status; 119 #endif 120 } 121 122 static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) 123 { 124 u64 control = (u64)code | HV_HYPERCALL_FAST_BIT; 125 126 return _hv_do_fast_hypercall8(control, input1); 127 } 128 129 /* Fast hypercall with 16 bytes of input */ 130 static inline u64 _hv_do_fast_hypercall16(u64 control, u64 input1, u64 input2) 131 { 132 #ifdef CONFIG_X86_64 133 return static_call_mod(hv_hypercall)(control, input1, input2); 134 #else 135 u32 input1_hi = upper_32_bits(input1); 136 u32 input1_lo = lower_32_bits(input1); 137 u32 input2_hi = upper_32_bits(input2); 138 u32 input2_lo = lower_32_bits(input2); 139 u64 hv_status; 140 141 __asm__ __volatile__ (CALL_NOSPEC 142 : "=A"(hv_status), 143 "+c"(input1_lo), ASM_CALL_CONSTRAINT 144 : "A" (control), "b" (input1_hi), 145 "D"(input2_hi), "S"(input2_lo), 146 THUNK_TARGET(hv_hypercall_pg) 147 : "cc"); 148 return hv_status; 149 #endif 150 } 151 152 static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2) 153 { 154 u64 control = (u64)code | HV_HYPERCALL_FAST_BIT; 155 156 return _hv_do_fast_hypercall16(control, input1, input2); 157 } 158 159 extern struct hv_vp_assist_page **hv_vp_assist_page; 160 161 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) 162 { 163 if (!hv_vp_assist_page) 164 return NULL; 165 166 return hv_vp_assist_page[cpu]; 167 } 168 169 void __init hyperv_init(void); 170 void hyperv_setup_mmu_ops(void); 171 void set_hv_tscchange_cb(void (*cb)(void)); 172 void clear_hv_tscchange_cb(void); 173 void hyperv_stop_tsc_emulation(void); 174 int hyperv_flush_guest_mapping(u64 as); 175 int hyperv_flush_guest_mapping_range(u64 as, 176 hyperv_fill_flush_list_func fill_func, void *data); 177 int hyperv_fill_flush_guest_mapping_list( 178 struct hv_guest_mapping_flush_list *flush, 179 u64 start_gfn, u64 end_gfn); 180 void hv_sleep_notifiers_register(void); 181 void hv_machine_power_off(void); 182 183 #ifdef CONFIG_X86_64 184 void hv_apic_init(void); 185 void __init hv_init_spinlocks(void); 186 bool hv_vcpu_is_preempted(int vcpu); 187 #else 188 static inline void hv_apic_init(void) {} 189 #endif 190 191 struct irq_domain *hv_create_pci_msi_domain(void); 192 193 int hv_map_msi_interrupt(struct irq_data *data, 194 struct hv_interrupt_entry *out_entry); 195 int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector, 196 struct hv_interrupt_entry *entry); 197 int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry); 198 199 #ifdef CONFIG_AMD_MEM_ENCRYPT 200 bool hv_ghcb_negotiate_protocol(void); 201 void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason); 202 int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, unsigned int cpu); 203 #else 204 static inline bool hv_ghcb_negotiate_protocol(void) { return false; } 205 static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {} 206 static inline int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, 207 unsigned int cpu) { return 0; } 208 #endif 209 210 #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) 211 void hv_vtom_init(void); 212 void hv_ivm_msr_write(u64 msr, u64 value); 213 void hv_ivm_msr_read(u64 msr, u64 *value); 214 #else 215 static inline void hv_vtom_init(void) {} 216 static inline void hv_ivm_msr_write(u64 msr, u64 value) {} 217 static inline void hv_ivm_msr_read(u64 msr, u64 *value) {} 218 #endif 219 220 static inline bool hv_is_synic_msr(unsigned int reg) 221 { 222 return (reg >= HV_X64_MSR_SCONTROL) && 223 (reg <= HV_X64_MSR_SINT15); 224 } 225 226 static inline bool hv_is_sint_msr(unsigned int reg) 227 { 228 return (reg >= HV_X64_MSR_SINT0) && 229 (reg <= HV_X64_MSR_SINT15); 230 } 231 232 u64 hv_get_msr(unsigned int reg); 233 void hv_set_msr(unsigned int reg, u64 value); 234 u64 hv_get_non_nested_msr(unsigned int reg); 235 void hv_set_non_nested_msr(unsigned int reg, u64 value); 236 237 static __always_inline u64 hv_raw_get_msr(unsigned int reg) 238 { 239 return native_rdmsrq(reg); 240 } 241 int hv_apicid_to_vp_index(u32 apic_id); 242 243 #if IS_ENABLED(CONFIG_MSHV_ROOT) && IS_ENABLED(CONFIG_CRASH_DUMP) 244 void hv_root_crash_init(void); 245 void hv_crash_asm32(void); 246 void hv_crash_asm64(void); 247 void hv_crash_asm_end(void); 248 #else /* CONFIG_MSHV_ROOT && CONFIG_CRASH_DUMP */ 249 static inline void hv_root_crash_init(void) {} 250 #endif /* CONFIG_MSHV_ROOT && CONFIG_CRASH_DUMP */ 251 252 #else /* CONFIG_HYPERV */ 253 static inline void hyperv_init(void) {} 254 static inline void hyperv_setup_mmu_ops(void) {} 255 static inline void set_hv_tscchange_cb(void (*cb)(void)) {} 256 static inline void clear_hv_tscchange_cb(void) {} 257 static inline void hyperv_stop_tsc_emulation(void) {}; 258 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) 259 { 260 return NULL; 261 } 262 static inline int hyperv_flush_guest_mapping(u64 as) { return -1; } 263 static inline int hyperv_flush_guest_mapping_range(u64 as, 264 hyperv_fill_flush_list_func fill_func, void *data) 265 { 266 return -1; 267 } 268 static inline void hv_set_msr(unsigned int reg, u64 value) { } 269 static inline u64 hv_get_msr(unsigned int reg) { return 0; } 270 static inline void hv_set_non_nested_msr(unsigned int reg, u64 value) { } 271 static inline u64 hv_get_non_nested_msr(unsigned int reg) { return 0; } 272 static inline int hv_apicid_to_vp_index(u32 apic_id) { return -EINVAL; } 273 #endif /* CONFIG_HYPERV */ 274 275 struct mshv_vtl_cpu_context { 276 union { 277 struct { 278 u64 rax; 279 u64 rcx; 280 u64 rdx; 281 u64 rbx; 282 u64 cr2; 283 u64 rbp; 284 u64 rsi; 285 u64 rdi; 286 u64 r8; 287 u64 r9; 288 u64 r10; 289 u64 r11; 290 u64 r12; 291 u64 r13; 292 u64 r14; 293 u64 r15; 294 }; 295 u64 gp_regs[16]; 296 }; 297 298 struct fxregs_state fx_state; 299 }; 300 301 #ifdef CONFIG_HYPERV_VTL_MODE 302 void __init hv_vtl_init_platform(void); 303 int __init hv_vtl_early_init(void); 304 void mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0); 305 void mshv_vtl_return_call_init(u64 vtl_return_offset); 306 void mshv_vtl_return_hypercall(void); 307 void __mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0); 308 #else 309 static inline void __init hv_vtl_init_platform(void) {} 310 static inline int __init hv_vtl_early_init(void) { return 0; } 311 static inline void mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0) {} 312 static inline void mshv_vtl_return_call_init(u64 vtl_return_offset) {} 313 static inline void mshv_vtl_return_hypercall(void) {} 314 static inline void __mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0) {} 315 #endif 316 317 #include <asm-generic/mshyperv.h> 318 319 #endif 320