xref: /linux/arch/x86/include/asm/mshyperv.h (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_MSHYPER_H
3 #define _ASM_X86_MSHYPER_H
4 
5 #include <linux/types.h>
6 #include <linux/nmi.h>
7 #include <linux/msi.h>
8 #include <linux/io.h>
9 #include <asm/hyperv-tlfs.h>
10 #include <asm/nospec-branch.h>
11 #include <asm/paravirt.h>
12 #include <asm/mshyperv.h>
13 
14 /*
15  * Hyper-V always provides a single IO-APIC at this MMIO address.
16  * Ideally, the value should be looked up in ACPI tables, but it
17  * is needed for mapping the IO-APIC early in boot on Confidential
18  * VMs, before ACPI functions can be used.
19  */
20 #define HV_IOAPIC_BASE_ADDRESS 0xfec00000
21 
22 #define HV_VTL_NORMAL 0x0
23 #define HV_VTL_SECURE 0x1
24 #define HV_VTL_MGMT   0x2
25 
26 union hv_ghcb;
27 
28 DECLARE_STATIC_KEY_FALSE(isolation_type_snp);
29 DECLARE_STATIC_KEY_FALSE(isolation_type_tdx);
30 
31 typedef int (*hyperv_fill_flush_list_func)(
32 		struct hv_guest_mapping_flush_list *flush,
33 		void *data);
34 
35 void hyperv_vector_handler(struct pt_regs *regs);
36 
37 static inline unsigned char hv_get_nmi_reason(void)
38 {
39 	return 0;
40 }
41 
42 #if IS_ENABLED(CONFIG_HYPERV)
43 extern int hyperv_init_cpuhp;
44 extern bool hyperv_paravisor_present;
45 
46 extern void *hv_hypercall_pg;
47 
48 extern u64 hv_current_partition_id;
49 
50 extern union hv_ghcb * __percpu *hv_ghcb_pg;
51 
52 bool hv_isolation_type_snp(void);
53 bool hv_isolation_type_tdx(void);
54 u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2);
55 
56 /*
57  * DEFAULT INIT GPAT and SEGMENT LIMIT value in struct VMSA
58  * to start AP in enlightened SEV guest.
59  */
60 #define HV_AP_INIT_GPAT_DEFAULT		0x0007040600070406ULL
61 #define HV_AP_SEGMENT_LIMIT		0xffffffff
62 
63 int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
64 int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
65 int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags);
66 
67 /*
68  * If the hypercall involves no input or output parameters, the hypervisor
69  * ignores the corresponding GPA pointer.
70  */
71 static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
72 {
73 	u64 input_address = input ? virt_to_phys(input) : 0;
74 	u64 output_address = output ? virt_to_phys(output) : 0;
75 	u64 hv_status;
76 
77 #ifdef CONFIG_X86_64
78 	if (hv_isolation_type_tdx() && !hyperv_paravisor_present)
79 		return hv_tdx_hypercall(control, input_address, output_address);
80 
81 	if (hv_isolation_type_snp() && !hyperv_paravisor_present) {
82 		__asm__ __volatile__("mov %4, %%r8\n"
83 				     "vmmcall"
84 				     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
85 				       "+c" (control), "+d" (input_address)
86 				     :  "r" (output_address)
87 				     : "cc", "memory", "r8", "r9", "r10", "r11");
88 		return hv_status;
89 	}
90 
91 	if (!hv_hypercall_pg)
92 		return U64_MAX;
93 
94 	__asm__ __volatile__("mov %4, %%r8\n"
95 			     CALL_NOSPEC
96 			     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
97 			       "+c" (control), "+d" (input_address)
98 			     :  "r" (output_address),
99 				THUNK_TARGET(hv_hypercall_pg)
100 			     : "cc", "memory", "r8", "r9", "r10", "r11");
101 #else
102 	u32 input_address_hi = upper_32_bits(input_address);
103 	u32 input_address_lo = lower_32_bits(input_address);
104 	u32 output_address_hi = upper_32_bits(output_address);
105 	u32 output_address_lo = lower_32_bits(output_address);
106 
107 	if (!hv_hypercall_pg)
108 		return U64_MAX;
109 
110 	__asm__ __volatile__(CALL_NOSPEC
111 			     : "=A" (hv_status),
112 			       "+c" (input_address_lo), ASM_CALL_CONSTRAINT
113 			     : "A" (control),
114 			       "b" (input_address_hi),
115 			       "D"(output_address_hi), "S"(output_address_lo),
116 			       THUNK_TARGET(hv_hypercall_pg)
117 			     : "cc", "memory");
118 #endif /* !x86_64 */
119 	return hv_status;
120 }
121 
122 /* Hypercall to the L0 hypervisor */
123 static inline u64 hv_do_nested_hypercall(u64 control, void *input, void *output)
124 {
125 	return hv_do_hypercall(control | HV_HYPERCALL_NESTED, input, output);
126 }
127 
128 /* Fast hypercall with 8 bytes of input and no output */
129 static inline u64 _hv_do_fast_hypercall8(u64 control, u64 input1)
130 {
131 	u64 hv_status;
132 
133 #ifdef CONFIG_X86_64
134 	if (hv_isolation_type_tdx() && !hyperv_paravisor_present)
135 		return hv_tdx_hypercall(control, input1, 0);
136 
137 	if (hv_isolation_type_snp() && !hyperv_paravisor_present) {
138 		__asm__ __volatile__(
139 				"vmmcall"
140 				: "=a" (hv_status), ASM_CALL_CONSTRAINT,
141 				"+c" (control), "+d" (input1)
142 				:: "cc", "r8", "r9", "r10", "r11");
143 	} else {
144 		__asm__ __volatile__(CALL_NOSPEC
145 				     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
146 				       "+c" (control), "+d" (input1)
147 				     : THUNK_TARGET(hv_hypercall_pg)
148 				     : "cc", "r8", "r9", "r10", "r11");
149 	}
150 #else
151 	{
152 		u32 input1_hi = upper_32_bits(input1);
153 		u32 input1_lo = lower_32_bits(input1);
154 
155 		__asm__ __volatile__ (CALL_NOSPEC
156 				      : "=A"(hv_status),
157 					"+c"(input1_lo),
158 					ASM_CALL_CONSTRAINT
159 				      :	"A" (control),
160 					"b" (input1_hi),
161 					THUNK_TARGET(hv_hypercall_pg)
162 				      : "cc", "edi", "esi");
163 	}
164 #endif
165 		return hv_status;
166 }
167 
168 static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
169 {
170 	u64 control = (u64)code | HV_HYPERCALL_FAST_BIT;
171 
172 	return _hv_do_fast_hypercall8(control, input1);
173 }
174 
175 static inline u64 hv_do_fast_nested_hypercall8(u16 code, u64 input1)
176 {
177 	u64 control = (u64)code | HV_HYPERCALL_FAST_BIT | HV_HYPERCALL_NESTED;
178 
179 	return _hv_do_fast_hypercall8(control, input1);
180 }
181 
182 /* Fast hypercall with 16 bytes of input */
183 static inline u64 _hv_do_fast_hypercall16(u64 control, u64 input1, u64 input2)
184 {
185 	u64 hv_status;
186 
187 #ifdef CONFIG_X86_64
188 	if (hv_isolation_type_tdx() && !hyperv_paravisor_present)
189 		return hv_tdx_hypercall(control, input1, input2);
190 
191 	if (hv_isolation_type_snp() && !hyperv_paravisor_present) {
192 		__asm__ __volatile__("mov %4, %%r8\n"
193 				     "vmmcall"
194 				     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
195 				       "+c" (control), "+d" (input1)
196 				     : "r" (input2)
197 				     : "cc", "r8", "r9", "r10", "r11");
198 	} else {
199 		__asm__ __volatile__("mov %4, %%r8\n"
200 				     CALL_NOSPEC
201 				     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
202 				       "+c" (control), "+d" (input1)
203 				     : "r" (input2),
204 				       THUNK_TARGET(hv_hypercall_pg)
205 				     : "cc", "r8", "r9", "r10", "r11");
206 	}
207 #else
208 	{
209 		u32 input1_hi = upper_32_bits(input1);
210 		u32 input1_lo = lower_32_bits(input1);
211 		u32 input2_hi = upper_32_bits(input2);
212 		u32 input2_lo = lower_32_bits(input2);
213 
214 		__asm__ __volatile__ (CALL_NOSPEC
215 				      : "=A"(hv_status),
216 					"+c"(input1_lo), ASM_CALL_CONSTRAINT
217 				      :	"A" (control), "b" (input1_hi),
218 					"D"(input2_hi), "S"(input2_lo),
219 					THUNK_TARGET(hv_hypercall_pg)
220 				      : "cc");
221 	}
222 #endif
223 	return hv_status;
224 }
225 
226 static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
227 {
228 	u64 control = (u64)code | HV_HYPERCALL_FAST_BIT;
229 
230 	return _hv_do_fast_hypercall16(control, input1, input2);
231 }
232 
233 static inline u64 hv_do_fast_nested_hypercall16(u16 code, u64 input1, u64 input2)
234 {
235 	u64 control = (u64)code | HV_HYPERCALL_FAST_BIT | HV_HYPERCALL_NESTED;
236 
237 	return _hv_do_fast_hypercall16(control, input1, input2);
238 }
239 
240 extern struct hv_vp_assist_page **hv_vp_assist_page;
241 
242 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
243 {
244 	if (!hv_vp_assist_page)
245 		return NULL;
246 
247 	return hv_vp_assist_page[cpu];
248 }
249 
250 void __init hyperv_init(void);
251 void hyperv_setup_mmu_ops(void);
252 void set_hv_tscchange_cb(void (*cb)(void));
253 void clear_hv_tscchange_cb(void);
254 void hyperv_stop_tsc_emulation(void);
255 int hyperv_flush_guest_mapping(u64 as);
256 int hyperv_flush_guest_mapping_range(u64 as,
257 		hyperv_fill_flush_list_func fill_func, void *data);
258 int hyperv_fill_flush_guest_mapping_list(
259 		struct hv_guest_mapping_flush_list *flush,
260 		u64 start_gfn, u64 end_gfn);
261 
262 #ifdef CONFIG_X86_64
263 void hv_apic_init(void);
264 void __init hv_init_spinlocks(void);
265 bool hv_vcpu_is_preempted(int vcpu);
266 #else
267 static inline void hv_apic_init(void) {}
268 #endif
269 
270 struct irq_domain *hv_create_pci_msi_domain(void);
271 
272 int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector,
273 		struct hv_interrupt_entry *entry);
274 int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
275 
276 #ifdef CONFIG_AMD_MEM_ENCRYPT
277 bool hv_ghcb_negotiate_protocol(void);
278 void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason);
279 int hv_snp_boot_ap(u32 cpu, unsigned long start_ip);
280 #else
281 static inline bool hv_ghcb_negotiate_protocol(void) { return false; }
282 static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {}
283 static inline int hv_snp_boot_ap(u32 cpu, unsigned long start_ip) { return 0; }
284 #endif
285 
286 #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
287 void hv_vtom_init(void);
288 void hv_ivm_msr_write(u64 msr, u64 value);
289 void hv_ivm_msr_read(u64 msr, u64 *value);
290 #else
291 static inline void hv_vtom_init(void) {}
292 static inline void hv_ivm_msr_write(u64 msr, u64 value) {}
293 static inline void hv_ivm_msr_read(u64 msr, u64 *value) {}
294 #endif
295 
296 static inline bool hv_is_synic_msr(unsigned int reg)
297 {
298 	return (reg >= HV_X64_MSR_SCONTROL) &&
299 	       (reg <= HV_X64_MSR_SINT15);
300 }
301 
302 static inline bool hv_is_sint_msr(unsigned int reg)
303 {
304 	return (reg >= HV_X64_MSR_SINT0) &&
305 	       (reg <= HV_X64_MSR_SINT15);
306 }
307 
308 u64 hv_get_msr(unsigned int reg);
309 void hv_set_msr(unsigned int reg, u64 value);
310 u64 hv_get_non_nested_msr(unsigned int reg);
311 void hv_set_non_nested_msr(unsigned int reg, u64 value);
312 
313 static __always_inline u64 hv_raw_get_msr(unsigned int reg)
314 {
315 	return __rdmsr(reg);
316 }
317 
318 #else /* CONFIG_HYPERV */
319 static inline void hyperv_init(void) {}
320 static inline void hyperv_setup_mmu_ops(void) {}
321 static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
322 static inline void clear_hv_tscchange_cb(void) {}
323 static inline void hyperv_stop_tsc_emulation(void) {};
324 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
325 {
326 	return NULL;
327 }
328 static inline int hyperv_flush_guest_mapping(u64 as) { return -1; }
329 static inline int hyperv_flush_guest_mapping_range(u64 as,
330 		hyperv_fill_flush_list_func fill_func, void *data)
331 {
332 	return -1;
333 }
334 static inline void hv_set_msr(unsigned int reg, u64 value) { }
335 static inline u64 hv_get_msr(unsigned int reg) { return 0; }
336 static inline void hv_set_non_nested_msr(unsigned int reg, u64 value) { }
337 static inline u64 hv_get_non_nested_msr(unsigned int reg) { return 0; }
338 #endif /* CONFIG_HYPERV */
339 
340 
341 #ifdef CONFIG_HYPERV_VTL_MODE
342 void __init hv_vtl_init_platform(void);
343 int __init hv_vtl_early_init(void);
344 #else
345 static inline void __init hv_vtl_init_platform(void) {}
346 static inline int __init hv_vtl_early_init(void) { return 0; }
347 #endif
348 
349 #include <asm-generic/mshyperv.h>
350 
351 #endif
352