xref: /linux/arch/x86/include/asm/mshyperv.h (revision e96204e5e96ea3cacb5686e06ed29977c023254f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_MSHYPER_H
3 #define _ASM_X86_MSHYPER_H
4 
5 #include <linux/types.h>
6 #include <linux/nmi.h>
7 #include <linux/msi.h>
8 #include <linux/io.h>
9 #include <asm/nospec-branch.h>
10 #include <asm/paravirt.h>
11 #include <hyperv/hvhdk.h>
12 
13 /*
14  * Hyper-V always provides a single IO-APIC at this MMIO address.
15  * Ideally, the value should be looked up in ACPI tables, but it
16  * is needed for mapping the IO-APIC early in boot on Confidential
17  * VMs, before ACPI functions can be used.
18  */
19 #define HV_IOAPIC_BASE_ADDRESS 0xfec00000
20 
21 #define HV_VTL_NORMAL 0x0
22 #define HV_VTL_SECURE 0x1
23 #define HV_VTL_MGMT   0x2
24 
25 union hv_ghcb;
26 
27 DECLARE_STATIC_KEY_FALSE(isolation_type_snp);
28 DECLARE_STATIC_KEY_FALSE(isolation_type_tdx);
29 
30 typedef int (*hyperv_fill_flush_list_func)(
31 		struct hv_guest_mapping_flush_list *flush,
32 		void *data);
33 
34 void hyperv_vector_handler(struct pt_regs *regs);
35 
36 static inline unsigned char hv_get_nmi_reason(void)
37 {
38 	return 0;
39 }
40 
41 #if IS_ENABLED(CONFIG_HYPERV)
42 extern bool hyperv_paravisor_present;
43 
44 extern void *hv_hypercall_pg;
45 
46 extern union hv_ghcb * __percpu *hv_ghcb_pg;
47 
48 bool hv_isolation_type_snp(void);
49 bool hv_isolation_type_tdx(void);
50 u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2);
51 
52 /*
53  * DEFAULT INIT GPAT and SEGMENT LIMIT value in struct VMSA
54  * to start AP in enlightened SEV guest.
55  */
56 #define HV_AP_INIT_GPAT_DEFAULT		0x0007040600070406ULL
57 #define HV_AP_SEGMENT_LIMIT		0xffffffff
58 
59 int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
60 int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
61 int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags);
62 
63 /*
64  * If the hypercall involves no input or output parameters, the hypervisor
65  * ignores the corresponding GPA pointer.
66  */
67 static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
68 {
69 	u64 input_address = input ? virt_to_phys(input) : 0;
70 	u64 output_address = output ? virt_to_phys(output) : 0;
71 	u64 hv_status;
72 
73 #ifdef CONFIG_X86_64
74 	if (hv_isolation_type_tdx() && !hyperv_paravisor_present)
75 		return hv_tdx_hypercall(control, input_address, output_address);
76 
77 	if (hv_isolation_type_snp() && !hyperv_paravisor_present) {
78 		__asm__ __volatile__("mov %4, %%r8\n"
79 				     "vmmcall"
80 				     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
81 				       "+c" (control), "+d" (input_address)
82 				     :  "r" (output_address)
83 				     : "cc", "memory", "r8", "r9", "r10", "r11");
84 		return hv_status;
85 	}
86 
87 	if (!hv_hypercall_pg)
88 		return U64_MAX;
89 
90 	__asm__ __volatile__("mov %4, %%r8\n"
91 			     CALL_NOSPEC
92 			     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
93 			       "+c" (control), "+d" (input_address)
94 			     :  "r" (output_address),
95 				THUNK_TARGET(hv_hypercall_pg)
96 			     : "cc", "memory", "r8", "r9", "r10", "r11");
97 #else
98 	u32 input_address_hi = upper_32_bits(input_address);
99 	u32 input_address_lo = lower_32_bits(input_address);
100 	u32 output_address_hi = upper_32_bits(output_address);
101 	u32 output_address_lo = lower_32_bits(output_address);
102 
103 	if (!hv_hypercall_pg)
104 		return U64_MAX;
105 
106 	__asm__ __volatile__(CALL_NOSPEC
107 			     : "=A" (hv_status),
108 			       "+c" (input_address_lo), ASM_CALL_CONSTRAINT
109 			     : "A" (control),
110 			       "b" (input_address_hi),
111 			       "D"(output_address_hi), "S"(output_address_lo),
112 			       THUNK_TARGET(hv_hypercall_pg)
113 			     : "cc", "memory");
114 #endif /* !x86_64 */
115 	return hv_status;
116 }
117 
118 /* Hypercall to the L0 hypervisor */
119 static inline u64 hv_do_nested_hypercall(u64 control, void *input, void *output)
120 {
121 	return hv_do_hypercall(control | HV_HYPERCALL_NESTED, input, output);
122 }
123 
124 /* Fast hypercall with 8 bytes of input and no output */
125 static inline u64 _hv_do_fast_hypercall8(u64 control, u64 input1)
126 {
127 	u64 hv_status;
128 
129 #ifdef CONFIG_X86_64
130 	if (hv_isolation_type_tdx() && !hyperv_paravisor_present)
131 		return hv_tdx_hypercall(control, input1, 0);
132 
133 	if (hv_isolation_type_snp() && !hyperv_paravisor_present) {
134 		__asm__ __volatile__(
135 				"vmmcall"
136 				: "=a" (hv_status), ASM_CALL_CONSTRAINT,
137 				"+c" (control), "+d" (input1)
138 				:: "cc", "r8", "r9", "r10", "r11");
139 	} else {
140 		__asm__ __volatile__(CALL_NOSPEC
141 				     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
142 				       "+c" (control), "+d" (input1)
143 				     : THUNK_TARGET(hv_hypercall_pg)
144 				     : "cc", "r8", "r9", "r10", "r11");
145 	}
146 #else
147 	{
148 		u32 input1_hi = upper_32_bits(input1);
149 		u32 input1_lo = lower_32_bits(input1);
150 
151 		__asm__ __volatile__ (CALL_NOSPEC
152 				      : "=A"(hv_status),
153 					"+c"(input1_lo),
154 					ASM_CALL_CONSTRAINT
155 				      :	"A" (control),
156 					"b" (input1_hi),
157 					THUNK_TARGET(hv_hypercall_pg)
158 				      : "cc", "edi", "esi");
159 	}
160 #endif
161 		return hv_status;
162 }
163 
164 static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
165 {
166 	u64 control = (u64)code | HV_HYPERCALL_FAST_BIT;
167 
168 	return _hv_do_fast_hypercall8(control, input1);
169 }
170 
171 static inline u64 hv_do_fast_nested_hypercall8(u16 code, u64 input1)
172 {
173 	u64 control = (u64)code | HV_HYPERCALL_FAST_BIT | HV_HYPERCALL_NESTED;
174 
175 	return _hv_do_fast_hypercall8(control, input1);
176 }
177 
178 /* Fast hypercall with 16 bytes of input */
179 static inline u64 _hv_do_fast_hypercall16(u64 control, u64 input1, u64 input2)
180 {
181 	u64 hv_status;
182 
183 #ifdef CONFIG_X86_64
184 	if (hv_isolation_type_tdx() && !hyperv_paravisor_present)
185 		return hv_tdx_hypercall(control, input1, input2);
186 
187 	if (hv_isolation_type_snp() && !hyperv_paravisor_present) {
188 		__asm__ __volatile__("mov %4, %%r8\n"
189 				     "vmmcall"
190 				     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
191 				       "+c" (control), "+d" (input1)
192 				     : "r" (input2)
193 				     : "cc", "r8", "r9", "r10", "r11");
194 	} else {
195 		__asm__ __volatile__("mov %4, %%r8\n"
196 				     CALL_NOSPEC
197 				     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
198 				       "+c" (control), "+d" (input1)
199 				     : "r" (input2),
200 				       THUNK_TARGET(hv_hypercall_pg)
201 				     : "cc", "r8", "r9", "r10", "r11");
202 	}
203 #else
204 	{
205 		u32 input1_hi = upper_32_bits(input1);
206 		u32 input1_lo = lower_32_bits(input1);
207 		u32 input2_hi = upper_32_bits(input2);
208 		u32 input2_lo = lower_32_bits(input2);
209 
210 		__asm__ __volatile__ (CALL_NOSPEC
211 				      : "=A"(hv_status),
212 					"+c"(input1_lo), ASM_CALL_CONSTRAINT
213 				      :	"A" (control), "b" (input1_hi),
214 					"D"(input2_hi), "S"(input2_lo),
215 					THUNK_TARGET(hv_hypercall_pg)
216 				      : "cc");
217 	}
218 #endif
219 	return hv_status;
220 }
221 
222 static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
223 {
224 	u64 control = (u64)code | HV_HYPERCALL_FAST_BIT;
225 
226 	return _hv_do_fast_hypercall16(control, input1, input2);
227 }
228 
229 static inline u64 hv_do_fast_nested_hypercall16(u16 code, u64 input1, u64 input2)
230 {
231 	u64 control = (u64)code | HV_HYPERCALL_FAST_BIT | HV_HYPERCALL_NESTED;
232 
233 	return _hv_do_fast_hypercall16(control, input1, input2);
234 }
235 
236 extern struct hv_vp_assist_page **hv_vp_assist_page;
237 
238 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
239 {
240 	if (!hv_vp_assist_page)
241 		return NULL;
242 
243 	return hv_vp_assist_page[cpu];
244 }
245 
246 void __init hyperv_init(void);
247 void hyperv_setup_mmu_ops(void);
248 void set_hv_tscchange_cb(void (*cb)(void));
249 void clear_hv_tscchange_cb(void);
250 void hyperv_stop_tsc_emulation(void);
251 int hyperv_flush_guest_mapping(u64 as);
252 int hyperv_flush_guest_mapping_range(u64 as,
253 		hyperv_fill_flush_list_func fill_func, void *data);
254 int hyperv_fill_flush_guest_mapping_list(
255 		struct hv_guest_mapping_flush_list *flush,
256 		u64 start_gfn, u64 end_gfn);
257 
258 #ifdef CONFIG_X86_64
259 void hv_apic_init(void);
260 void __init hv_init_spinlocks(void);
261 bool hv_vcpu_is_preempted(int vcpu);
262 #else
263 static inline void hv_apic_init(void) {}
264 #endif
265 
266 struct irq_domain *hv_create_pci_msi_domain(void);
267 
268 int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector,
269 		struct hv_interrupt_entry *entry);
270 int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
271 
272 #ifdef CONFIG_AMD_MEM_ENCRYPT
273 bool hv_ghcb_negotiate_protocol(void);
274 void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason);
275 int hv_snp_boot_ap(u32 cpu, unsigned long start_ip);
276 #else
277 static inline bool hv_ghcb_negotiate_protocol(void) { return false; }
278 static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {}
279 static inline int hv_snp_boot_ap(u32 cpu, unsigned long start_ip) { return 0; }
280 #endif
281 
282 #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
283 void hv_vtom_init(void);
284 void hv_ivm_msr_write(u64 msr, u64 value);
285 void hv_ivm_msr_read(u64 msr, u64 *value);
286 #else
287 static inline void hv_vtom_init(void) {}
288 static inline void hv_ivm_msr_write(u64 msr, u64 value) {}
289 static inline void hv_ivm_msr_read(u64 msr, u64 *value) {}
290 #endif
291 
292 static inline bool hv_is_synic_msr(unsigned int reg)
293 {
294 	return (reg >= HV_X64_MSR_SCONTROL) &&
295 	       (reg <= HV_X64_MSR_SINT15);
296 }
297 
298 static inline bool hv_is_sint_msr(unsigned int reg)
299 {
300 	return (reg >= HV_X64_MSR_SINT0) &&
301 	       (reg <= HV_X64_MSR_SINT15);
302 }
303 
304 u64 hv_get_msr(unsigned int reg);
305 void hv_set_msr(unsigned int reg, u64 value);
306 u64 hv_get_non_nested_msr(unsigned int reg);
307 void hv_set_non_nested_msr(unsigned int reg, u64 value);
308 
309 static __always_inline u64 hv_raw_get_msr(unsigned int reg)
310 {
311 	return __rdmsr(reg);
312 }
313 
314 #else /* CONFIG_HYPERV */
315 static inline void hyperv_init(void) {}
316 static inline void hyperv_setup_mmu_ops(void) {}
317 static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
318 static inline void clear_hv_tscchange_cb(void) {}
319 static inline void hyperv_stop_tsc_emulation(void) {};
320 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
321 {
322 	return NULL;
323 }
324 static inline int hyperv_flush_guest_mapping(u64 as) { return -1; }
325 static inline int hyperv_flush_guest_mapping_range(u64 as,
326 		hyperv_fill_flush_list_func fill_func, void *data)
327 {
328 	return -1;
329 }
330 static inline void hv_set_msr(unsigned int reg, u64 value) { }
331 static inline u64 hv_get_msr(unsigned int reg) { return 0; }
332 static inline void hv_set_non_nested_msr(unsigned int reg, u64 value) { }
333 static inline u64 hv_get_non_nested_msr(unsigned int reg) { return 0; }
334 #endif /* CONFIG_HYPERV */
335 
336 
337 #ifdef CONFIG_HYPERV_VTL_MODE
338 void __init hv_vtl_init_platform(void);
339 int __init hv_vtl_early_init(void);
340 #else
341 static inline void __init hv_vtl_init_platform(void) {}
342 static inline int __init hv_vtl_early_init(void) { return 0; }
343 #endif
344 
345 #include <asm-generic/mshyperv.h>
346 
347 #endif
348