1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_MSHYPER_H 3 #define _ASM_X86_MSHYPER_H 4 5 #include <linux/types.h> 6 #include <linux/nmi.h> 7 #include <linux/msi.h> 8 #include <linux/io.h> 9 #include <linux/static_call.h> 10 #include <asm/nospec-branch.h> 11 #include <asm/paravirt.h> 12 #include <asm/msr.h> 13 #include <hyperv/hvhdk.h> 14 #include <asm/fpu/types.h> 15 16 /* 17 * Hyper-V always provides a single IO-APIC at this MMIO address. 18 * Ideally, the value should be looked up in ACPI tables, but it 19 * is needed for mapping the IO-APIC early in boot on Confidential 20 * VMs, before ACPI functions can be used. 21 */ 22 #define HV_IOAPIC_BASE_ADDRESS 0xfec00000 23 24 #define HV_VTL_NORMAL 0x0 25 #define HV_VTL_SECURE 0x1 26 #define HV_VTL_MGMT 0x2 27 28 union hv_ghcb; 29 30 DECLARE_STATIC_KEY_FALSE(isolation_type_snp); 31 DECLARE_STATIC_KEY_FALSE(isolation_type_tdx); 32 33 typedef int (*hyperv_fill_flush_list_func)( 34 struct hv_guest_mapping_flush_list *flush, 35 void *data); 36 37 void hyperv_vector_handler(struct pt_regs *regs); 38 39 static inline unsigned char hv_get_nmi_reason(void) 40 { 41 return 0; 42 } 43 44 extern u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2); 45 extern u64 hv_snp_hypercall(u64 control, u64 param1, u64 param2); 46 extern u64 hv_std_hypercall(u64 control, u64 param1, u64 param2); 47 48 #if IS_ENABLED(CONFIG_HYPERV) 49 extern void *hv_hypercall_pg; 50 51 extern union hv_ghcb * __percpu *hv_ghcb_pg; 52 53 bool hv_isolation_type_snp(void); 54 bool hv_isolation_type_tdx(void); 55 56 #ifdef CONFIG_X86_64 57 DECLARE_STATIC_CALL(hv_hypercall, hv_std_hypercall); 58 #endif 59 60 /* 61 * DEFAULT INIT GPAT and SEGMENT LIMIT value in struct VMSA 62 * to start AP in enlightened SEV guest. 63 */ 64 #define HV_AP_INIT_GPAT_DEFAULT 0x0007040600070406ULL 65 #define HV_AP_SEGMENT_LIMIT 0xffffffff 66 67 /* 68 * If the hypercall involves no input or output parameters, the hypervisor 69 * ignores the corresponding GPA pointer. 70 */ 71 static inline u64 hv_do_hypercall(u64 control, void *input, void *output) 72 { 73 u64 input_address = input ? virt_to_phys(input) : 0; 74 u64 output_address = output ? virt_to_phys(output) : 0; 75 76 #ifdef CONFIG_X86_64 77 return static_call_mod(hv_hypercall)(control, input_address, output_address); 78 #else 79 u32 input_address_hi = upper_32_bits(input_address); 80 u32 input_address_lo = lower_32_bits(input_address); 81 u32 output_address_hi = upper_32_bits(output_address); 82 u32 output_address_lo = lower_32_bits(output_address); 83 u64 hv_status; 84 85 if (!hv_hypercall_pg) 86 return U64_MAX; 87 88 __asm__ __volatile__(CALL_NOSPEC 89 : "=A" (hv_status), 90 "+c" (input_address_lo), ASM_CALL_CONSTRAINT 91 : "A" (control), 92 "b" (input_address_hi), 93 "D"(output_address_hi), "S"(output_address_lo), 94 THUNK_TARGET(hv_hypercall_pg) 95 : "cc", "memory"); 96 return hv_status; 97 #endif /* !x86_64 */ 98 } 99 100 /* Fast hypercall with 8 bytes of input and no output */ 101 static inline u64 _hv_do_fast_hypercall8(u64 control, u64 input1) 102 { 103 #ifdef CONFIG_X86_64 104 return static_call_mod(hv_hypercall)(control, input1, 0); 105 #else 106 u32 input1_hi = upper_32_bits(input1); 107 u32 input1_lo = lower_32_bits(input1); 108 u64 hv_status; 109 110 __asm__ __volatile__ (CALL_NOSPEC 111 : "=A"(hv_status), 112 "+c"(input1_lo), 113 ASM_CALL_CONSTRAINT 114 : "A" (control), 115 "b" (input1_hi), 116 THUNK_TARGET(hv_hypercall_pg) 117 : "cc", "edi", "esi"); 118 return hv_status; 119 #endif 120 } 121 122 static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) 123 { 124 u64 control = (u64)code | HV_HYPERCALL_FAST_BIT; 125 126 return _hv_do_fast_hypercall8(control, input1); 127 } 128 129 /* Fast hypercall with 16 bytes of input */ 130 static inline u64 _hv_do_fast_hypercall16(u64 control, u64 input1, u64 input2) 131 { 132 #ifdef CONFIG_X86_64 133 return static_call_mod(hv_hypercall)(control, input1, input2); 134 #else 135 u32 input1_hi = upper_32_bits(input1); 136 u32 input1_lo = lower_32_bits(input1); 137 u32 input2_hi = upper_32_bits(input2); 138 u32 input2_lo = lower_32_bits(input2); 139 u64 hv_status; 140 141 __asm__ __volatile__ (CALL_NOSPEC 142 : "=A"(hv_status), 143 "+c"(input1_lo), ASM_CALL_CONSTRAINT 144 : "A" (control), "b" (input1_hi), 145 "D"(input2_hi), "S"(input2_lo), 146 THUNK_TARGET(hv_hypercall_pg) 147 : "cc"); 148 return hv_status; 149 #endif 150 } 151 152 static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2) 153 { 154 u64 control = (u64)code | HV_HYPERCALL_FAST_BIT; 155 156 return _hv_do_fast_hypercall16(control, input1, input2); 157 } 158 159 extern struct hv_vp_assist_page **hv_vp_assist_page; 160 161 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) 162 { 163 if (!hv_vp_assist_page) 164 return NULL; 165 166 return hv_vp_assist_page[cpu]; 167 } 168 169 void __init hyperv_init(void); 170 void hyperv_setup_mmu_ops(void); 171 void set_hv_tscchange_cb(void (*cb)(void)); 172 void clear_hv_tscchange_cb(void); 173 void hyperv_stop_tsc_emulation(void); 174 int hyperv_flush_guest_mapping(u64 as); 175 int hyperv_flush_guest_mapping_range(u64 as, 176 hyperv_fill_flush_list_func fill_func, void *data); 177 int hyperv_fill_flush_guest_mapping_list( 178 struct hv_guest_mapping_flush_list *flush, 179 u64 start_gfn, u64 end_gfn); 180 181 #ifdef CONFIG_X86_64 182 void hv_apic_init(void); 183 void __init hv_init_spinlocks(void); 184 bool hv_vcpu_is_preempted(int vcpu); 185 #else 186 static inline void hv_apic_init(void) {} 187 #endif 188 189 struct irq_domain *hv_create_pci_msi_domain(void); 190 191 int hv_map_msi_interrupt(struct irq_data *data, 192 struct hv_interrupt_entry *out_entry); 193 int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector, 194 struct hv_interrupt_entry *entry); 195 int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry); 196 197 #ifdef CONFIG_AMD_MEM_ENCRYPT 198 bool hv_ghcb_negotiate_protocol(void); 199 void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason); 200 int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, unsigned int cpu); 201 #else 202 static inline bool hv_ghcb_negotiate_protocol(void) { return false; } 203 static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {} 204 static inline int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, 205 unsigned int cpu) { return 0; } 206 #endif 207 208 #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) 209 void hv_vtom_init(void); 210 void hv_ivm_msr_write(u64 msr, u64 value); 211 void hv_ivm_msr_read(u64 msr, u64 *value); 212 #else 213 static inline void hv_vtom_init(void) {} 214 static inline void hv_ivm_msr_write(u64 msr, u64 value) {} 215 static inline void hv_ivm_msr_read(u64 msr, u64 *value) {} 216 #endif 217 218 static inline bool hv_is_synic_msr(unsigned int reg) 219 { 220 return (reg >= HV_X64_MSR_SCONTROL) && 221 (reg <= HV_X64_MSR_SINT15); 222 } 223 224 static inline bool hv_is_sint_msr(unsigned int reg) 225 { 226 return (reg >= HV_X64_MSR_SINT0) && 227 (reg <= HV_X64_MSR_SINT15); 228 } 229 230 u64 hv_get_msr(unsigned int reg); 231 void hv_set_msr(unsigned int reg, u64 value); 232 u64 hv_get_non_nested_msr(unsigned int reg); 233 void hv_set_non_nested_msr(unsigned int reg, u64 value); 234 235 static __always_inline u64 hv_raw_get_msr(unsigned int reg) 236 { 237 return native_rdmsrq(reg); 238 } 239 int hv_apicid_to_vp_index(u32 apic_id); 240 241 #if IS_ENABLED(CONFIG_MSHV_ROOT) && IS_ENABLED(CONFIG_CRASH_DUMP) 242 void hv_root_crash_init(void); 243 void hv_crash_asm32(void); 244 void hv_crash_asm64(void); 245 void hv_crash_asm_end(void); 246 #else /* CONFIG_MSHV_ROOT && CONFIG_CRASH_DUMP */ 247 static inline void hv_root_crash_init(void) {} 248 #endif /* CONFIG_MSHV_ROOT && CONFIG_CRASH_DUMP */ 249 250 #else /* CONFIG_HYPERV */ 251 static inline void hyperv_init(void) {} 252 static inline void hyperv_setup_mmu_ops(void) {} 253 static inline void set_hv_tscchange_cb(void (*cb)(void)) {} 254 static inline void clear_hv_tscchange_cb(void) {} 255 static inline void hyperv_stop_tsc_emulation(void) {}; 256 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) 257 { 258 return NULL; 259 } 260 static inline int hyperv_flush_guest_mapping(u64 as) { return -1; } 261 static inline int hyperv_flush_guest_mapping_range(u64 as, 262 hyperv_fill_flush_list_func fill_func, void *data) 263 { 264 return -1; 265 } 266 static inline void hv_set_msr(unsigned int reg, u64 value) { } 267 static inline u64 hv_get_msr(unsigned int reg) { return 0; } 268 static inline void hv_set_non_nested_msr(unsigned int reg, u64 value) { } 269 static inline u64 hv_get_non_nested_msr(unsigned int reg) { return 0; } 270 static inline int hv_apicid_to_vp_index(u32 apic_id) { return -EINVAL; } 271 #endif /* CONFIG_HYPERV */ 272 273 struct mshv_vtl_cpu_context { 274 union { 275 struct { 276 u64 rax; 277 u64 rcx; 278 u64 rdx; 279 u64 rbx; 280 u64 cr2; 281 u64 rbp; 282 u64 rsi; 283 u64 rdi; 284 u64 r8; 285 u64 r9; 286 u64 r10; 287 u64 r11; 288 u64 r12; 289 u64 r13; 290 u64 r14; 291 u64 r15; 292 }; 293 u64 gp_regs[16]; 294 }; 295 296 struct fxregs_state fx_state; 297 }; 298 299 #ifdef CONFIG_HYPERV_VTL_MODE 300 void __init hv_vtl_init_platform(void); 301 int __init hv_vtl_early_init(void); 302 void mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0); 303 void mshv_vtl_return_call_init(u64 vtl_return_offset); 304 void mshv_vtl_return_hypercall(void); 305 void __mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0); 306 #else 307 static inline void __init hv_vtl_init_platform(void) {} 308 static inline int __init hv_vtl_early_init(void) { return 0; } 309 static inline void mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0) {} 310 static inline void mshv_vtl_return_call_init(u64 vtl_return_offset) {} 311 static inline void mshv_vtl_return_hypercall(void) {} 312 static inline void __mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0) {} 313 #endif 314 315 #include <asm-generic/mshyperv.h> 316 317 #endif 318