xref: /linux/arch/x86/hyperv/hv_init.c (revision 9591fdb0611dccdeeeeacb99d89f0098737d209b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * X86 specific Hyper-V initialization code.
4  *
5  * Copyright (C) 2016, Microsoft, Inc.
6  *
7  * Author : K. Y. Srinivasan <kys@microsoft.com>
8  */
9 
10 #define pr_fmt(fmt)  "Hyper-V: " fmt
11 
12 #include <linux/efi.h>
13 #include <linux/types.h>
14 #include <linux/bitfield.h>
15 #include <linux/io.h>
16 #include <asm/apic.h>
17 #include <asm/desc.h>
18 #include <asm/e820/api.h>
19 #include <asm/sev.h>
20 #include <asm/hypervisor.h>
21 #include <hyperv/hvhdk.h>
22 #include <asm/mshyperv.h>
23 #include <asm/msr.h>
24 #include <asm/idtentry.h>
25 #include <asm/set_memory.h>
26 #include <linux/kexec.h>
27 #include <linux/version.h>
28 #include <linux/vmalloc.h>
29 #include <linux/mm.h>
30 #include <linux/slab.h>
31 #include <linux/kernel.h>
32 #include <linux/cpuhotplug.h>
33 #include <linux/syscore_ops.h>
34 #include <clocksource/hyperv_timer.h>
35 #include <linux/highmem.h>
36 #include <linux/export.h>
37 
38 void *hv_hypercall_pg;
39 
40 #ifdef CONFIG_X86_64
__hv_hyperfail(u64 control,u64 param1,u64 param2)41 static u64 __hv_hyperfail(u64 control, u64 param1, u64 param2)
42 {
43 	return U64_MAX;
44 }
45 
46 DEFINE_STATIC_CALL(__hv_hypercall, __hv_hyperfail);
47 
hv_std_hypercall(u64 control,u64 param1,u64 param2)48 u64 hv_std_hypercall(u64 control, u64 param1, u64 param2)
49 {
50 	u64 hv_status;
51 
52 	register u64 __r8 asm("r8") = param2;
53 	asm volatile ("call " STATIC_CALL_TRAMP_STR(__hv_hypercall)
54 		      : "=a" (hv_status), ASM_CALL_CONSTRAINT,
55 		        "+c" (control), "+d" (param1), "+r" (__r8)
56 		      : : "cc", "memory", "r9", "r10", "r11");
57 
58 	return hv_status;
59 }
60 
61 typedef u64 (*hv_hypercall_f)(u64 control, u64 param1, u64 param2);
62 
hv_set_hypercall_pg(void * ptr)63 static inline void hv_set_hypercall_pg(void *ptr)
64 {
65 	hv_hypercall_pg = ptr;
66 
67 	if (!ptr)
68 		ptr = &__hv_hyperfail;
69 	static_call_update(__hv_hypercall, (hv_hypercall_f)ptr);
70 }
71 #else
hv_set_hypercall_pg(void * ptr)72 static inline void hv_set_hypercall_pg(void *ptr)
73 {
74 	hv_hypercall_pg = ptr;
75 }
76 EXPORT_SYMBOL_GPL(hv_hypercall_pg);
77 #endif
78 
79 union hv_ghcb * __percpu *hv_ghcb_pg;
80 
81 /* Storage to save the hypercall page temporarily for hibernation */
82 static void *hv_hypercall_pg_saved;
83 
84 struct hv_vp_assist_page **hv_vp_assist_page;
85 EXPORT_SYMBOL_GPL(hv_vp_assist_page);
86 
hyperv_init_ghcb(void)87 static int hyperv_init_ghcb(void)
88 {
89 	u64 ghcb_gpa;
90 	void *ghcb_va;
91 	void **ghcb_base;
92 
93 	if (!ms_hyperv.paravisor_present || !hv_isolation_type_snp())
94 		return 0;
95 
96 	if (!hv_ghcb_pg)
97 		return -EINVAL;
98 
99 	/*
100 	 * GHCB page is allocated by paravisor. The address
101 	 * returned by MSR_AMD64_SEV_ES_GHCB is above shared
102 	 * memory boundary and map it here.
103 	 */
104 	rdmsrq(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa);
105 
106 	/* Mask out vTOM bit. ioremap_cache() maps decrypted */
107 	ghcb_gpa &= ~ms_hyperv.shared_gpa_boundary;
108 	ghcb_va = (void *)ioremap_cache(ghcb_gpa, HV_HYP_PAGE_SIZE);
109 	if (!ghcb_va)
110 		return -ENOMEM;
111 
112 	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
113 	*ghcb_base = ghcb_va;
114 
115 	return 0;
116 }
117 
hv_cpu_init(unsigned int cpu)118 static int hv_cpu_init(unsigned int cpu)
119 {
120 	union hv_vp_assist_msr_contents msr = { 0 };
121 	struct hv_vp_assist_page **hvp;
122 	int ret;
123 
124 	ret = hv_common_cpu_init(cpu);
125 	if (ret)
126 		return ret;
127 
128 	if (!hv_vp_assist_page)
129 		return 0;
130 
131 	hvp = &hv_vp_assist_page[cpu];
132 	if (hv_root_partition()) {
133 		/*
134 		 * For root partition we get the hypervisor provided VP assist
135 		 * page, instead of allocating a new page.
136 		 */
137 		rdmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
138 		*hvp = memremap(msr.pfn << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT,
139 				PAGE_SIZE, MEMREMAP_WB);
140 	} else {
141 		/*
142 		 * The VP assist page is an "overlay" page (see Hyper-V TLFS's
143 		 * Section 5.2.1 "GPA Overlay Pages"). Here it must be zeroed
144 		 * out to make sure we always write the EOI MSR in
145 		 * hv_apic_eoi_write() *after* the EOI optimization is disabled
146 		 * in hv_cpu_die(), otherwise a CPU may not be stopped in the
147 		 * case of CPU offlining and the VM will hang.
148 		 */
149 		if (!*hvp) {
150 			*hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO);
151 
152 			/*
153 			 * Hyper-V should never specify a VM that is a Confidential
154 			 * VM and also running in the root partition. Root partition
155 			 * is blocked to run in Confidential VM. So only decrypt assist
156 			 * page in non-root partition here.
157 			 */
158 			if (*hvp && !ms_hyperv.paravisor_present && hv_isolation_type_snp()) {
159 				WARN_ON_ONCE(set_memory_decrypted((unsigned long)(*hvp), 1));
160 				memset(*hvp, 0, PAGE_SIZE);
161 			}
162 		}
163 
164 		if (*hvp)
165 			msr.pfn = vmalloc_to_pfn(*hvp);
166 
167 	}
168 	if (!WARN_ON(!(*hvp))) {
169 		msr.enable = 1;
170 		wrmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
171 	}
172 
173 	return hyperv_init_ghcb();
174 }
175 
176 static void (*hv_reenlightenment_cb)(void);
177 
hv_reenlightenment_notify(struct work_struct * dummy)178 static void hv_reenlightenment_notify(struct work_struct *dummy)
179 {
180 	struct hv_tsc_emulation_status emu_status;
181 
182 	rdmsrq(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
183 
184 	/* Don't issue the callback if TSC accesses are not emulated */
185 	if (hv_reenlightenment_cb && emu_status.inprogress)
186 		hv_reenlightenment_cb();
187 }
188 static DECLARE_DELAYED_WORK(hv_reenlightenment_work, hv_reenlightenment_notify);
189 
hyperv_stop_tsc_emulation(void)190 void hyperv_stop_tsc_emulation(void)
191 {
192 	u64 freq;
193 	struct hv_tsc_emulation_status emu_status;
194 
195 	rdmsrq(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
196 	emu_status.inprogress = 0;
197 	wrmsrq(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
198 
199 	rdmsrq(HV_X64_MSR_TSC_FREQUENCY, freq);
200 	tsc_khz = div64_u64(freq, 1000);
201 }
202 EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation);
203 
hv_reenlightenment_available(void)204 static inline bool hv_reenlightenment_available(void)
205 {
206 	/*
207 	 * Check for required features and privileges to make TSC frequency
208 	 * change notifications work.
209 	 */
210 	return ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
211 		ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE &&
212 		ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT;
213 }
214 
DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_reenlightenment)215 DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_reenlightenment)
216 {
217 	apic_eoi();
218 	inc_irq_stat(irq_hv_reenlightenment_count);
219 	schedule_delayed_work(&hv_reenlightenment_work, HZ/10);
220 }
221 
set_hv_tscchange_cb(void (* cb)(void))222 void set_hv_tscchange_cb(void (*cb)(void))
223 {
224 	struct hv_reenlightenment_control re_ctrl = {
225 		.vector = HYPERV_REENLIGHTENMENT_VECTOR,
226 		.enabled = 1,
227 	};
228 	struct hv_tsc_emulation_control emu_ctrl = {.enabled = 1};
229 
230 	if (!hv_reenlightenment_available()) {
231 		pr_warn("reenlightenment support is unavailable\n");
232 		return;
233 	}
234 
235 	if (!hv_vp_index)
236 		return;
237 
238 	hv_reenlightenment_cb = cb;
239 
240 	/* Make sure callback is registered before we write to MSRs */
241 	wmb();
242 
243 	re_ctrl.target_vp = hv_vp_index[get_cpu()];
244 
245 	wrmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
246 	wrmsrq(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl));
247 
248 	put_cpu();
249 }
250 EXPORT_SYMBOL_GPL(set_hv_tscchange_cb);
251 
clear_hv_tscchange_cb(void)252 void clear_hv_tscchange_cb(void)
253 {
254 	struct hv_reenlightenment_control re_ctrl;
255 
256 	if (!hv_reenlightenment_available())
257 		return;
258 
259 	rdmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
260 	re_ctrl.enabled = 0;
261 	wrmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
262 
263 	hv_reenlightenment_cb = NULL;
264 }
265 EXPORT_SYMBOL_GPL(clear_hv_tscchange_cb);
266 
hv_cpu_die(unsigned int cpu)267 static int hv_cpu_die(unsigned int cpu)
268 {
269 	struct hv_reenlightenment_control re_ctrl;
270 	unsigned int new_cpu;
271 	void **ghcb_va;
272 
273 	if (hv_ghcb_pg) {
274 		ghcb_va = (void **)this_cpu_ptr(hv_ghcb_pg);
275 		if (*ghcb_va)
276 			iounmap(*ghcb_va);
277 		*ghcb_va = NULL;
278 	}
279 
280 	hv_common_cpu_die(cpu);
281 
282 	if (hv_vp_assist_page && hv_vp_assist_page[cpu]) {
283 		union hv_vp_assist_msr_contents msr = { 0 };
284 		if (hv_root_partition()) {
285 			/*
286 			 * For root partition the VP assist page is mapped to
287 			 * hypervisor provided page, and thus we unmap the
288 			 * page here and nullify it, so that in future we have
289 			 * correct page address mapped in hv_cpu_init.
290 			 */
291 			memunmap(hv_vp_assist_page[cpu]);
292 			hv_vp_assist_page[cpu] = NULL;
293 			rdmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
294 			msr.enable = 0;
295 		}
296 		wrmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
297 	}
298 
299 	if (hv_reenlightenment_cb == NULL)
300 		return 0;
301 
302 	rdmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
303 	if (re_ctrl.target_vp == hv_vp_index[cpu]) {
304 		/*
305 		 * Reassign reenlightenment notifications to some other online
306 		 * CPU or just disable the feature if there are no online CPUs
307 		 * left (happens on hibernation).
308 		 */
309 		new_cpu = cpumask_any_but(cpu_online_mask, cpu);
310 
311 		if (new_cpu < nr_cpu_ids)
312 			re_ctrl.target_vp = hv_vp_index[new_cpu];
313 		else
314 			re_ctrl.enabled = 0;
315 
316 		wrmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
317 	}
318 
319 	return 0;
320 }
321 
hv_pci_init(void)322 static int __init hv_pci_init(void)
323 {
324 	bool gen2vm = efi_enabled(EFI_BOOT);
325 
326 	/*
327 	 * A Generation-2 VM doesn't support legacy PCI/PCIe, so both
328 	 * raw_pci_ops and raw_pci_ext_ops are NULL, and pci_subsys_init() ->
329 	 * pcibios_init() doesn't call pcibios_resource_survey() ->
330 	 * e820__reserve_resources_late(); as a result, any emulated persistent
331 	 * memory of E820_TYPE_PRAM (12) via the kernel parameter
332 	 * memmap=nn[KMG]!ss is not added into iomem_resource and hence can't be
333 	 * detected by register_e820_pmem(). Fix this by directly calling
334 	 * e820__reserve_resources_late() here: e820__reserve_resources_late()
335 	 * depends on e820__reserve_resources(), which has been called earlier
336 	 * from setup_arch(). Note: e820__reserve_resources_late() also adds
337 	 * any memory of E820_TYPE_PMEM (7) into iomem_resource, and
338 	 * acpi_nfit_register_region() -> acpi_nfit_insert_resource() ->
339 	 * region_intersects() returns REGION_INTERSECTS, so the memory of
340 	 * E820_TYPE_PMEM won't get added twice.
341 	 *
342 	 * We return 0 here so that pci_arch_init() won't print the warning:
343 	 * "PCI: Fatal: No config space access function found"
344 	 */
345 	if (gen2vm) {
346 		e820__reserve_resources_late();
347 		return 0;
348 	}
349 
350 	/* For Generation-1 VM, we'll proceed in pci_arch_init().  */
351 	return 1;
352 }
353 
hv_suspend(void)354 static int hv_suspend(void)
355 {
356 	union hv_x64_msr_hypercall_contents hypercall_msr;
357 	int ret;
358 
359 	if (hv_root_partition())
360 		return -EPERM;
361 
362 	/*
363 	 * Reset the hypercall page as it is going to be invalidated
364 	 * across hibernation. Setting hv_hypercall_pg to NULL ensures
365 	 * that any subsequent hypercall operation fails safely instead of
366 	 * crashing due to an access of an invalid page. The hypercall page
367 	 * pointer is restored on resume.
368 	 */
369 	hv_hypercall_pg_saved = hv_hypercall_pg;
370 	hv_set_hypercall_pg(NULL);
371 
372 	/* Disable the hypercall page in the hypervisor */
373 	rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
374 	hypercall_msr.enable = 0;
375 	wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
376 
377 	ret = hv_cpu_die(0);
378 	return ret;
379 }
380 
hv_resume(void)381 static void hv_resume(void)
382 {
383 	union hv_x64_msr_hypercall_contents hypercall_msr;
384 	int ret;
385 
386 	ret = hv_cpu_init(0);
387 	WARN_ON(ret);
388 
389 	/* Re-enable the hypercall page */
390 	rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
391 	hypercall_msr.enable = 1;
392 	hypercall_msr.guest_physical_address =
393 		vmalloc_to_pfn(hv_hypercall_pg_saved);
394 	wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
395 
396 	hv_set_hypercall_pg(hv_hypercall_pg_saved);
397 	hv_hypercall_pg_saved = NULL;
398 
399 	/*
400 	 * Reenlightenment notifications are disabled by hv_cpu_die(0),
401 	 * reenable them here if hv_reenlightenment_cb was previously set.
402 	 */
403 	if (hv_reenlightenment_cb)
404 		set_hv_tscchange_cb(hv_reenlightenment_cb);
405 }
406 
407 /* Note: when the ops are called, only CPU0 is online and IRQs are disabled. */
408 static struct syscore_ops hv_syscore_ops = {
409 	.suspend	= hv_suspend,
410 	.resume		= hv_resume,
411 };
412 
413 static void (* __initdata old_setup_percpu_clockev)(void);
414 
hv_stimer_setup_percpu_clockev(void)415 static void __init hv_stimer_setup_percpu_clockev(void)
416 {
417 	/*
418 	 * Ignore any errors in setting up stimer clockevents
419 	 * as we can run with the LAPIC timer as a fallback.
420 	 */
421 	(void)hv_stimer_alloc(false);
422 
423 	/*
424 	 * Still register the LAPIC timer, because the direct-mode STIMER is
425 	 * not supported by old versions of Hyper-V. This also allows users
426 	 * to switch to LAPIC timer via /sys, if they want to.
427 	 */
428 	if (old_setup_percpu_clockev)
429 		old_setup_percpu_clockev();
430 }
431 
432 /*
433  * This function is to be invoked early in the boot sequence after the
434  * hypervisor has been detected.
435  *
436  * 1. Setup the hypercall page.
437  * 2. Register Hyper-V specific clocksource.
438  * 3. Setup Hyper-V specific APIC entry points.
439  */
hyperv_init(void)440 void __init hyperv_init(void)
441 {
442 	u64 guest_id;
443 	union hv_x64_msr_hypercall_contents hypercall_msr;
444 	int cpuhp;
445 
446 	if (x86_hyper_type != X86_HYPER_MS_HYPERV)
447 		return;
448 
449 	if (hv_common_init())
450 		return;
451 
452 	/*
453 	 * The VP assist page is useless to a TDX guest: the only use we
454 	 * would have for it is lazy EOI, which can not be used with TDX.
455 	 */
456 	if (hv_isolation_type_tdx())
457 		hv_vp_assist_page = NULL;
458 	else
459 		hv_vp_assist_page = kcalloc(nr_cpu_ids,
460 					    sizeof(*hv_vp_assist_page),
461 					    GFP_KERNEL);
462 	if (!hv_vp_assist_page) {
463 		ms_hyperv.hints &= ~HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
464 
465 		if (!hv_isolation_type_tdx())
466 			goto common_free;
467 	}
468 
469 	if (ms_hyperv.paravisor_present && hv_isolation_type_snp()) {
470 		/* Negotiate GHCB Version. */
471 		if (!hv_ghcb_negotiate_protocol())
472 			hv_ghcb_terminate(SEV_TERM_SET_GEN,
473 					  GHCB_SEV_ES_PROT_UNSUPPORTED);
474 
475 		hv_ghcb_pg = alloc_percpu(union hv_ghcb *);
476 		if (!hv_ghcb_pg)
477 			goto free_vp_assist_page;
478 	}
479 
480 	cpuhp = cpuhp_setup_state(CPUHP_AP_HYPERV_ONLINE, "x86/hyperv_init:online",
481 				  hv_cpu_init, hv_cpu_die);
482 	if (cpuhp < 0)
483 		goto free_ghcb_page;
484 
485 	/*
486 	 * Setup the hypercall page and enable hypercalls.
487 	 * 1. Register the guest ID
488 	 * 2. Enable the hypercall and register the hypercall page
489 	 *
490 	 * A TDX VM with no paravisor only uses TDX GHCI rather than hv_hypercall_pg:
491 	 * when the hypercall input is a page, such a VM must pass a decrypted
492 	 * page to Hyper-V, e.g. hv_post_message() uses the per-CPU page
493 	 * hyperv_pcpu_input_arg, which is decrypted if no paravisor is present.
494 	 *
495 	 * A TDX VM with the paravisor uses hv_hypercall_pg for most hypercalls,
496 	 * which are handled by the paravisor and the VM must use an encrypted
497 	 * input page: in such a VM, the hyperv_pcpu_input_arg is encrypted and
498 	 * used in the hypercalls, e.g. see hv_mark_gpa_visibility() and
499 	 * hv_arch_irq_unmask(). Such a VM uses TDX GHCI for two hypercalls:
500 	 * 1. HVCALL_SIGNAL_EVENT: see vmbus_set_event() and _hv_do_fast_hypercall8().
501 	 * 2. HVCALL_POST_MESSAGE: the input page must be a decrypted page, i.e.
502 	 * hv_post_message() in such a VM can't use the encrypted hyperv_pcpu_input_arg;
503 	 * instead, hv_post_message() uses the post_msg_page, which is decrypted
504 	 * in such a VM and is only used in such a VM.
505 	 */
506 	guest_id = hv_generate_guest_id(LINUX_VERSION_CODE);
507 	wrmsrq(HV_X64_MSR_GUEST_OS_ID, guest_id);
508 
509 	/* With the paravisor, the VM must also write the ID via GHCB/GHCI */
510 	hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, guest_id);
511 
512 	/* A TDX VM with no paravisor only uses TDX GHCI rather than hv_hypercall_pg */
513 	if (hv_isolation_type_tdx() && !ms_hyperv.paravisor_present)
514 		goto skip_hypercall_pg_init;
515 
516 	hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, MODULES_VADDR,
517 			MODULES_END, GFP_KERNEL, PAGE_KERNEL_ROX,
518 			VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
519 			__builtin_return_address(0));
520 	if (hv_hypercall_pg == NULL)
521 		goto clean_guest_os_id;
522 
523 	rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
524 	hypercall_msr.enable = 1;
525 
526 	if (hv_root_partition()) {
527 		struct page *pg;
528 		void *src;
529 
530 		/*
531 		 * For the root partition, the hypervisor will set up its
532 		 * hypercall page. The hypervisor guarantees it will not show
533 		 * up in the root's address space. The root can't change the
534 		 * location of the hypercall page.
535 		 *
536 		 * Order is important here. We must enable the hypercall page
537 		 * so it is populated with code, then copy the code to an
538 		 * executable page.
539 		 */
540 		wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
541 
542 		pg = vmalloc_to_page(hv_hypercall_pg);
543 		src = memremap(hypercall_msr.guest_physical_address << PAGE_SHIFT, PAGE_SIZE,
544 				MEMREMAP_WB);
545 		BUG_ON(!src);
546 		memcpy_to_page(pg, 0, src, HV_HYP_PAGE_SIZE);
547 		memunmap(src);
548 
549 		hv_remap_tsc_clocksource();
550 	} else {
551 		hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg);
552 		wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
553 	}
554 
555 	hv_set_hypercall_pg(hv_hypercall_pg);
556 
557 skip_hypercall_pg_init:
558 	/*
559 	 * hyperv_init() is called before LAPIC is initialized: see
560 	 * apic_intr_mode_init() -> x86_platform.apic_post_init() and
561 	 * apic_bsp_setup() -> setup_local_APIC(). The direct-mode STIMER
562 	 * depends on LAPIC, so hv_stimer_alloc() should be called from
563 	 * x86_init.timers.setup_percpu_clockev.
564 	 */
565 	old_setup_percpu_clockev = x86_init.timers.setup_percpu_clockev;
566 	x86_init.timers.setup_percpu_clockev = hv_stimer_setup_percpu_clockev;
567 
568 	hv_apic_init();
569 
570 	x86_init.pci.arch_init = hv_pci_init;
571 
572 	register_syscore_ops(&hv_syscore_ops);
573 
574 	if (ms_hyperv.priv_high & HV_ACCESS_PARTITION_ID)
575 		hv_get_partition_id();
576 
577 #ifdef CONFIG_PCI_MSI
578 	/*
579 	 * If we're running as root, we want to create our own PCI MSI domain.
580 	 * We can't set this in hv_pci_init because that would be too late.
581 	 */
582 	if (hv_root_partition())
583 		x86_init.irqs.create_pci_msi_domain = hv_create_pci_msi_domain;
584 #endif
585 
586 	/* Query the VMs extended capability once, so that it can be cached. */
587 	hv_query_ext_cap(0);
588 
589 	/* Find the VTL */
590 	ms_hyperv.vtl = get_vtl();
591 
592 	if (ms_hyperv.vtl > 0) /* non default VTL */
593 		hv_vtl_early_init();
594 
595 	return;
596 
597 clean_guest_os_id:
598 	wrmsrq(HV_X64_MSR_GUEST_OS_ID, 0);
599 	hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
600 	cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
601 free_ghcb_page:
602 	free_percpu(hv_ghcb_pg);
603 free_vp_assist_page:
604 	kfree(hv_vp_assist_page);
605 	hv_vp_assist_page = NULL;
606 common_free:
607 	hv_common_free();
608 }
609 
610 /*
611  * This routine is called before kexec/kdump, it does the required cleanup.
612  */
hyperv_cleanup(void)613 void hyperv_cleanup(void)
614 {
615 	union hv_x64_msr_hypercall_contents hypercall_msr;
616 	union hv_reference_tsc_msr tsc_msr;
617 
618 	/* Reset our OS id */
619 	wrmsrq(HV_X64_MSR_GUEST_OS_ID, 0);
620 	hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
621 
622 	/*
623 	 * Reset hypercall page reference before reset the page,
624 	 * let hypercall operations fail safely rather than
625 	 * panic the kernel for using invalid hypercall page
626 	 */
627 	hv_hypercall_pg = NULL;
628 
629 	/* Reset the hypercall page */
630 	hypercall_msr.as_uint64 = hv_get_msr(HV_X64_MSR_HYPERCALL);
631 	hypercall_msr.enable = 0;
632 	hv_set_msr(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
633 
634 	/* Reset the TSC page */
635 	tsc_msr.as_uint64 = hv_get_msr(HV_X64_MSR_REFERENCE_TSC);
636 	tsc_msr.enable = 0;
637 	hv_set_msr(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
638 }
639 
hyperv_report_panic(struct pt_regs * regs,long err,bool in_die)640 void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
641 {
642 	static bool panic_reported;
643 	u64 guest_id;
644 
645 	if (in_die && !panic_on_oops)
646 		return;
647 
648 	/*
649 	 * We prefer to report panic on 'die' chain as we have proper
650 	 * registers to report, but if we miss it (e.g. on BUG()) we need
651 	 * to report it on 'panic'.
652 	 */
653 	if (panic_reported)
654 		return;
655 	panic_reported = true;
656 
657 	rdmsrq(HV_X64_MSR_GUEST_OS_ID, guest_id);
658 
659 	wrmsrq(HV_X64_MSR_CRASH_P0, err);
660 	wrmsrq(HV_X64_MSR_CRASH_P1, guest_id);
661 	wrmsrq(HV_X64_MSR_CRASH_P2, regs->ip);
662 	wrmsrq(HV_X64_MSR_CRASH_P3, regs->ax);
663 	wrmsrq(HV_X64_MSR_CRASH_P4, regs->sp);
664 
665 	/*
666 	 * Let Hyper-V know there is crash data available
667 	 */
668 	wrmsrq(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
669 }
670 EXPORT_SYMBOL_GPL(hyperv_report_panic);
671 
hv_is_hyperv_initialized(void)672 bool hv_is_hyperv_initialized(void)
673 {
674 	union hv_x64_msr_hypercall_contents hypercall_msr;
675 
676 	/*
677 	 * Ensure that we're really on Hyper-V, and not a KVM or Xen
678 	 * emulation of Hyper-V
679 	 */
680 	if (x86_hyper_type != X86_HYPER_MS_HYPERV)
681 		return false;
682 
683 	/* A TDX VM with no paravisor uses TDX GHCI call rather than hv_hypercall_pg */
684 	if (hv_isolation_type_tdx() && !ms_hyperv.paravisor_present)
685 		return true;
686 	/*
687 	 * Verify that earlier initialization succeeded by checking
688 	 * that the hypercall page is setup
689 	 */
690 	hypercall_msr.as_uint64 = 0;
691 	rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
692 
693 	return hypercall_msr.enable;
694 }
695 EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized);
696 
hv_apicid_to_vp_index(u32 apic_id)697 int hv_apicid_to_vp_index(u32 apic_id)
698 {
699 	u64 control;
700 	u64 status;
701 	unsigned long irq_flags;
702 	struct hv_get_vp_from_apic_id_in *input;
703 	u32 *output, ret;
704 
705 	local_irq_save(irq_flags);
706 
707 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
708 	memset(input, 0, sizeof(*input));
709 	input->partition_id = HV_PARTITION_ID_SELF;
710 	input->apic_ids[0] = apic_id;
711 
712 	output = *this_cpu_ptr(hyperv_pcpu_output_arg);
713 
714 	control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_INDEX_FROM_APIC_ID;
715 	status = hv_do_hypercall(control, input, output);
716 	ret = output[0];
717 
718 	local_irq_restore(irq_flags);
719 
720 	if (!hv_result_success(status)) {
721 		pr_err("failed to get vp index from apic id %d, status %#llx\n",
722 		       apic_id, status);
723 		return -EINVAL;
724 	}
725 
726 	return ret;
727 }
728 EXPORT_SYMBOL_GPL(hv_apicid_to_vp_index);
729