xref: /linux/arch/x86/kernel/cpu/mshyperv.c (revision 733f7e9c18c5e377025c1bfdce6bc9a7d55649be)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * HyperV  Detection code.
4  *
5  * Copyright (C) 2010, Novell, Inc.
6  * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/time.h>
11 #include <linux/clocksource.h>
12 #include <linux/init.h>
13 #include <linux/export.h>
14 #include <linux/hardirq.h>
15 #include <linux/efi.h>
16 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/kexec.h>
19 #include <linux/i8253.h>
20 #include <linux/random.h>
21 #include <linux/swiotlb.h>
22 #include <asm/processor.h>
23 #include <asm/hypervisor.h>
24 #include <asm/hyperv-tlfs.h>
25 #include <asm/mshyperv.h>
26 #include <asm/desc.h>
27 #include <asm/idtentry.h>
28 #include <asm/irq_regs.h>
29 #include <asm/i8259.h>
30 #include <asm/apic.h>
31 #include <asm/timer.h>
32 #include <asm/reboot.h>
33 #include <asm/nmi.h>
34 #include <clocksource/hyperv_timer.h>
35 #include <asm/numa.h>
36 
37 /* Is Linux running as the root partition? */
38 bool hv_root_partition;
39 /* Is Linux running on nested Microsoft Hypervisor */
40 bool hv_nested;
41 struct ms_hyperv_info ms_hyperv;
42 
43 #if IS_ENABLED(CONFIG_HYPERV)
44 static inline unsigned int hv_get_nested_reg(unsigned int reg)
45 {
46 	if (hv_is_sint_reg(reg))
47 		return reg - HV_REGISTER_SINT0 + HV_REGISTER_NESTED_SINT0;
48 
49 	switch (reg) {
50 	case HV_REGISTER_SIMP:
51 		return HV_REGISTER_NESTED_SIMP;
52 	case HV_REGISTER_SIEFP:
53 		return HV_REGISTER_NESTED_SIEFP;
54 	case HV_REGISTER_SVERSION:
55 		return HV_REGISTER_NESTED_SVERSION;
56 	case HV_REGISTER_SCONTROL:
57 		return HV_REGISTER_NESTED_SCONTROL;
58 	case HV_REGISTER_EOM:
59 		return HV_REGISTER_NESTED_EOM;
60 	default:
61 		return reg;
62 	}
63 }
64 
65 u64 hv_get_non_nested_register(unsigned int reg)
66 {
67 	u64 value;
68 
69 	if (hv_is_synic_reg(reg) && hv_isolation_type_snp())
70 		hv_ghcb_msr_read(reg, &value);
71 	else
72 		rdmsrl(reg, value);
73 	return value;
74 }
75 EXPORT_SYMBOL_GPL(hv_get_non_nested_register);
76 
77 void hv_set_non_nested_register(unsigned int reg, u64 value)
78 {
79 	if (hv_is_synic_reg(reg) && hv_isolation_type_snp()) {
80 		hv_ghcb_msr_write(reg, value);
81 
82 		/* Write proxy bit via wrmsl instruction */
83 		if (hv_is_sint_reg(reg))
84 			wrmsrl(reg, value | 1 << 20);
85 	} else {
86 		wrmsrl(reg, value);
87 	}
88 }
89 EXPORT_SYMBOL_GPL(hv_set_non_nested_register);
90 
91 u64 hv_get_register(unsigned int reg)
92 {
93 	if (hv_nested)
94 		reg = hv_get_nested_reg(reg);
95 
96 	return hv_get_non_nested_register(reg);
97 }
98 EXPORT_SYMBOL_GPL(hv_get_register);
99 
100 void hv_set_register(unsigned int reg, u64 value)
101 {
102 	if (hv_nested)
103 		reg = hv_get_nested_reg(reg);
104 
105 	hv_set_non_nested_register(reg, value);
106 }
107 EXPORT_SYMBOL_GPL(hv_set_register);
108 
109 static void (*vmbus_handler)(void);
110 static void (*hv_stimer0_handler)(void);
111 static void (*hv_kexec_handler)(void);
112 static void (*hv_crash_handler)(struct pt_regs *regs);
113 
114 DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback)
115 {
116 	struct pt_regs *old_regs = set_irq_regs(regs);
117 
118 	inc_irq_stat(irq_hv_callback_count);
119 	if (vmbus_handler)
120 		vmbus_handler();
121 
122 	if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
123 		ack_APIC_irq();
124 
125 	set_irq_regs(old_regs);
126 }
127 
128 void hv_setup_vmbus_handler(void (*handler)(void))
129 {
130 	vmbus_handler = handler;
131 }
132 
133 void hv_remove_vmbus_handler(void)
134 {
135 	/* We have no way to deallocate the interrupt gate */
136 	vmbus_handler = NULL;
137 }
138 
139 /*
140  * Routines to do per-architecture handling of stimer0
141  * interrupts when in Direct Mode
142  */
143 DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0)
144 {
145 	struct pt_regs *old_regs = set_irq_regs(regs);
146 
147 	inc_irq_stat(hyperv_stimer0_count);
148 	if (hv_stimer0_handler)
149 		hv_stimer0_handler();
150 	add_interrupt_randomness(HYPERV_STIMER0_VECTOR);
151 	ack_APIC_irq();
152 
153 	set_irq_regs(old_regs);
154 }
155 
156 /* For x86/x64, override weak placeholders in hyperv_timer.c */
157 void hv_setup_stimer0_handler(void (*handler)(void))
158 {
159 	hv_stimer0_handler = handler;
160 }
161 
162 void hv_remove_stimer0_handler(void)
163 {
164 	/* We have no way to deallocate the interrupt gate */
165 	hv_stimer0_handler = NULL;
166 }
167 
168 void hv_setup_kexec_handler(void (*handler)(void))
169 {
170 	hv_kexec_handler = handler;
171 }
172 
173 void hv_remove_kexec_handler(void)
174 {
175 	hv_kexec_handler = NULL;
176 }
177 
178 void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs))
179 {
180 	hv_crash_handler = handler;
181 }
182 
183 void hv_remove_crash_handler(void)
184 {
185 	hv_crash_handler = NULL;
186 }
187 
188 #ifdef CONFIG_KEXEC_CORE
189 static void hv_machine_shutdown(void)
190 {
191 	if (kexec_in_progress && hv_kexec_handler)
192 		hv_kexec_handler();
193 
194 	/*
195 	 * Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor
196 	 * corrupts the old VP Assist Pages and can crash the kexec kernel.
197 	 */
198 	if (kexec_in_progress && hyperv_init_cpuhp > 0)
199 		cpuhp_remove_state(hyperv_init_cpuhp);
200 
201 	/* The function calls stop_other_cpus(). */
202 	native_machine_shutdown();
203 
204 	/* Disable the hypercall page when there is only 1 active CPU. */
205 	if (kexec_in_progress)
206 		hyperv_cleanup();
207 }
208 
209 static void hv_machine_crash_shutdown(struct pt_regs *regs)
210 {
211 	if (hv_crash_handler)
212 		hv_crash_handler(regs);
213 
214 	/* The function calls crash_smp_send_stop(). */
215 	native_machine_crash_shutdown(regs);
216 
217 	/* Disable the hypercall page when there is only 1 active CPU. */
218 	hyperv_cleanup();
219 }
220 #endif /* CONFIG_KEXEC_CORE */
221 #endif /* CONFIG_HYPERV */
222 
223 static uint32_t  __init ms_hyperv_platform(void)
224 {
225 	u32 eax;
226 	u32 hyp_signature[3];
227 
228 	if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
229 		return 0;
230 
231 	cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
232 	      &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
233 
234 	if (eax < HYPERV_CPUID_MIN || eax > HYPERV_CPUID_MAX ||
235 	    memcmp("Microsoft Hv", hyp_signature, 12))
236 		return 0;
237 
238 	/* HYPERCALL and VP_INDEX MSRs are mandatory for all features. */
239 	eax = cpuid_eax(HYPERV_CPUID_FEATURES);
240 	if (!(eax & HV_MSR_HYPERCALL_AVAILABLE)) {
241 		pr_warn("x86/hyperv: HYPERCALL MSR not available.\n");
242 		return 0;
243 	}
244 	if (!(eax & HV_MSR_VP_INDEX_AVAILABLE)) {
245 		pr_warn("x86/hyperv: VP_INDEX MSR not available.\n");
246 		return 0;
247 	}
248 
249 	return HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
250 }
251 
252 static unsigned char hv_get_nmi_reason(void)
253 {
254 	return 0;
255 }
256 
257 #ifdef CONFIG_X86_LOCAL_APIC
258 /*
259  * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes
260  * it difficult to process CHANNELMSG_UNLOAD in case of crash. Handle
261  * unknown NMI on the first CPU which gets it.
262  */
263 static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
264 {
265 	static atomic_t nmi_cpu = ATOMIC_INIT(-1);
266 
267 	if (!unknown_nmi_panic)
268 		return NMI_DONE;
269 
270 	if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1)
271 		return NMI_HANDLED;
272 
273 	return NMI_DONE;
274 }
275 #endif
276 
277 static unsigned long hv_get_tsc_khz(void)
278 {
279 	unsigned long freq;
280 
281 	rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq);
282 
283 	return freq / 1000;
284 }
285 
286 #if defined(CONFIG_SMP) && IS_ENABLED(CONFIG_HYPERV)
287 static void __init hv_smp_prepare_boot_cpu(void)
288 {
289 	native_smp_prepare_boot_cpu();
290 #if defined(CONFIG_X86_64) && defined(CONFIG_PARAVIRT_SPINLOCKS)
291 	hv_init_spinlocks();
292 #endif
293 }
294 
295 static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
296 {
297 #ifdef CONFIG_X86_64
298 	int i;
299 	int ret;
300 #endif
301 
302 	native_smp_prepare_cpus(max_cpus);
303 
304 #ifdef CONFIG_X86_64
305 	for_each_present_cpu(i) {
306 		if (i == 0)
307 			continue;
308 		ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i));
309 		BUG_ON(ret);
310 	}
311 
312 	for_each_present_cpu(i) {
313 		if (i == 0)
314 			continue;
315 		ret = hv_call_create_vp(numa_cpu_node(i), hv_current_partition_id, i, i);
316 		BUG_ON(ret);
317 	}
318 #endif
319 }
320 #endif
321 
322 static void __init ms_hyperv_init_platform(void)
323 {
324 	int hv_max_functions_eax;
325 	int hv_host_info_eax;
326 	int hv_host_info_ebx;
327 	int hv_host_info_ecx;
328 	int hv_host_info_edx;
329 
330 #ifdef CONFIG_PARAVIRT
331 	pv_info.name = "Hyper-V";
332 #endif
333 
334 	/*
335 	 * Extract the features and hints
336 	 */
337 	ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
338 	ms_hyperv.priv_high = cpuid_ebx(HYPERV_CPUID_FEATURES);
339 	ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
340 	ms_hyperv.hints    = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
341 
342 	hv_max_functions_eax = cpuid_eax(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS);
343 
344 	pr_info("Hyper-V: privilege flags low 0x%x, high 0x%x, hints 0x%x, misc 0x%x\n",
345 		ms_hyperv.features, ms_hyperv.priv_high, ms_hyperv.hints,
346 		ms_hyperv.misc_features);
347 
348 	ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS);
349 	ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS);
350 
351 	pr_debug("Hyper-V: max %u virtual processors, %u logical processors\n",
352 		 ms_hyperv.max_vp_index, ms_hyperv.max_lp_index);
353 
354 	/*
355 	 * Check CPU management privilege.
356 	 *
357 	 * To mirror what Windows does we should extract CPU management
358 	 * features and use the ReservedIdentityBit to detect if Linux is the
359 	 * root partition. But that requires negotiating CPU management
360 	 * interface (a process to be finalized). For now, use the privilege
361 	 * flag as the indicator for running as root.
362 	 *
363 	 * Hyper-V should never specify running as root and as a Confidential
364 	 * VM. But to protect against a compromised/malicious Hyper-V trying
365 	 * to exploit root behavior to expose Confidential VM memory, ignore
366 	 * the root partition setting if also a Confidential VM.
367 	 */
368 	if ((ms_hyperv.priv_high & HV_CPU_MANAGEMENT) &&
369 	    !(ms_hyperv.priv_high & HV_ISOLATION)) {
370 		hv_root_partition = true;
371 		pr_info("Hyper-V: running as root partition\n");
372 	}
373 
374 	if (ms_hyperv.hints & HV_X64_HYPERV_NESTED) {
375 		hv_nested = true;
376 		pr_info("Hyper-V: running on a nested hypervisor\n");
377 	}
378 
379 	/*
380 	 * Extract host information.
381 	 */
382 	if (hv_max_functions_eax >= HYPERV_CPUID_VERSION) {
383 		hv_host_info_eax = cpuid_eax(HYPERV_CPUID_VERSION);
384 		hv_host_info_ebx = cpuid_ebx(HYPERV_CPUID_VERSION);
385 		hv_host_info_ecx = cpuid_ecx(HYPERV_CPUID_VERSION);
386 		hv_host_info_edx = cpuid_edx(HYPERV_CPUID_VERSION);
387 
388 		pr_info("Hyper-V: Host Build %d.%d.%d.%d-%d-%d\n",
389 			hv_host_info_ebx >> 16, hv_host_info_ebx & 0xFFFF,
390 			hv_host_info_eax, hv_host_info_edx & 0xFFFFFF,
391 			hv_host_info_ecx, hv_host_info_edx >> 24);
392 	}
393 
394 	if (ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
395 	    ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
396 		x86_platform.calibrate_tsc = hv_get_tsc_khz;
397 		x86_platform.calibrate_cpu = hv_get_tsc_khz;
398 	}
399 
400 	if (ms_hyperv.priv_high & HV_ISOLATION) {
401 		ms_hyperv.isolation_config_a = cpuid_eax(HYPERV_CPUID_ISOLATION_CONFIG);
402 		ms_hyperv.isolation_config_b = cpuid_ebx(HYPERV_CPUID_ISOLATION_CONFIG);
403 
404 		if (ms_hyperv.shared_gpa_boundary_active)
405 			ms_hyperv.shared_gpa_boundary =
406 				BIT_ULL(ms_hyperv.shared_gpa_boundary_bits);
407 
408 		pr_info("Hyper-V: Isolation Config: Group A 0x%x, Group B 0x%x\n",
409 			ms_hyperv.isolation_config_a, ms_hyperv.isolation_config_b);
410 
411 		if (hv_get_isolation_type() == HV_ISOLATION_TYPE_SNP) {
412 			static_branch_enable(&isolation_type_snp);
413 #ifdef CONFIG_SWIOTLB
414 			swiotlb_unencrypted_base = ms_hyperv.shared_gpa_boundary;
415 #endif
416 		}
417 	}
418 
419 	if (hv_max_functions_eax >= HYPERV_CPUID_NESTED_FEATURES) {
420 		ms_hyperv.nested_features =
421 			cpuid_eax(HYPERV_CPUID_NESTED_FEATURES);
422 		pr_info("Hyper-V: Nested features: 0x%x\n",
423 			ms_hyperv.nested_features);
424 	}
425 
426 #ifdef CONFIG_X86_LOCAL_APIC
427 	if (ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
428 	    ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
429 		/*
430 		 * Get the APIC frequency.
431 		 */
432 		u64	hv_lapic_frequency;
433 
434 		rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
435 		hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
436 		lapic_timer_period = hv_lapic_frequency;
437 		pr_info("Hyper-V: LAPIC Timer Frequency: %#x\n",
438 			lapic_timer_period);
439 	}
440 
441 	register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST,
442 			     "hv_nmi_unknown");
443 #endif
444 
445 #ifdef CONFIG_X86_IO_APIC
446 	no_timer_check = 1;
447 #endif
448 
449 #if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
450 	machine_ops.shutdown = hv_machine_shutdown;
451 	machine_ops.crash_shutdown = hv_machine_crash_shutdown;
452 #endif
453 	if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
454 		/*
455 		 * Writing to synthetic MSR 0x40000118 updates/changes the
456 		 * guest visible CPUIDs. Setting bit 0 of this MSR  enables
457 		 * guests to report invariant TSC feature through CPUID
458 		 * instruction, CPUID 0x800000007/EDX, bit 8. See code in
459 		 * early_init_intel() where this bit is examined. The
460 		 * setting of this MSR bit should happen before init_intel()
461 		 * is called.
462 		 */
463 		wrmsrl(HV_X64_MSR_TSC_INVARIANT_CONTROL, HV_EXPOSE_INVARIANT_TSC);
464 		setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
465 	}
466 
467 	/*
468 	 * Generation 2 instances don't support reading the NMI status from
469 	 * 0x61 port.
470 	 */
471 	if (efi_enabled(EFI_BOOT))
472 		x86_platform.get_nmi_reason = hv_get_nmi_reason;
473 
474 	/*
475 	 * Hyper-V VMs have a PIT emulation quirk such that zeroing the
476 	 * counter register during PIT shutdown restarts the PIT. So it
477 	 * continues to interrupt @18.2 HZ. Setting i8253_clear_counter
478 	 * to false tells pit_shutdown() not to zero the counter so that
479 	 * the PIT really is shutdown. Generation 2 VMs don't have a PIT,
480 	 * and setting this value has no effect.
481 	 */
482 	i8253_clear_counter_on_shutdown = false;
483 
484 #if IS_ENABLED(CONFIG_HYPERV)
485 	if ((hv_get_isolation_type() == HV_ISOLATION_TYPE_VBS) ||
486 	    (hv_get_isolation_type() == HV_ISOLATION_TYPE_SNP))
487 		hv_vtom_init();
488 	/*
489 	 * Setup the hook to get control post apic initialization.
490 	 */
491 	x86_platform.apic_post_init = hyperv_init;
492 	hyperv_setup_mmu_ops();
493 	/* Setup the IDT for hypervisor callback */
494 	alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_hyperv_callback);
495 
496 	/* Setup the IDT for reenlightenment notifications */
497 	if (ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT) {
498 		alloc_intr_gate(HYPERV_REENLIGHTENMENT_VECTOR,
499 				asm_sysvec_hyperv_reenlightenment);
500 	}
501 
502 	/* Setup the IDT for stimer0 */
503 	if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE) {
504 		alloc_intr_gate(HYPERV_STIMER0_VECTOR,
505 				asm_sysvec_hyperv_stimer0);
506 	}
507 
508 # ifdef CONFIG_SMP
509 	smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu;
510 	if (hv_root_partition)
511 		smp_ops.smp_prepare_cpus = hv_smp_prepare_cpus;
512 # endif
513 
514 	/*
515 	 * Hyper-V doesn't provide irq remapping for IO-APIC. To enable x2apic,
516 	 * set x2apic destination mode to physical mode when x2apic is available
517 	 * and Hyper-V IOMMU driver makes sure cpus assigned with IO-APIC irqs
518 	 * have 8-bit APIC id.
519 	 */
520 # ifdef CONFIG_X86_X2APIC
521 	if (x2apic_supported())
522 		x2apic_phys = 1;
523 # endif
524 
525 	/* Register Hyper-V specific clocksource */
526 	hv_init_clocksource();
527 #endif
528 	/*
529 	 * TSC should be marked as unstable only after Hyper-V
530 	 * clocksource has been initialized. This ensures that the
531 	 * stability of the sched_clock is not altered.
532 	 */
533 	if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT))
534 		mark_tsc_unstable("running on Hyper-V");
535 
536 	hardlockup_detector_disable();
537 }
538 
539 static bool __init ms_hyperv_x2apic_available(void)
540 {
541 	return x2apic_supported();
542 }
543 
544 /*
545  * If ms_hyperv_msi_ext_dest_id() returns true, hyperv_prepare_irq_remapping()
546  * returns -ENODEV and the Hyper-V IOMMU driver is not used; instead, the
547  * generic support of the 15-bit APIC ID is used: see __irq_msi_compose_msg().
548  *
549  * Note: for a VM on Hyper-V, the I/O-APIC is the only device which
550  * (logically) generates MSIs directly to the system APIC irq domain.
551  * There is no HPET, and PCI MSI/MSI-X interrupts are remapped by the
552  * pci-hyperv host bridge.
553  *
554  * Note: for a Hyper-V root partition, this will always return false.
555  * The hypervisor doesn't expose these HYPERV_CPUID_VIRT_STACK_* cpuids by
556  * default, they are implemented as intercepts by the Windows Hyper-V stack.
557  * Even a nested root partition (L2 root) will not get them because the
558  * nested (L1) hypervisor filters them out.
559  */
560 static bool __init ms_hyperv_msi_ext_dest_id(void)
561 {
562 	u32 eax;
563 
564 	eax = cpuid_eax(HYPERV_CPUID_VIRT_STACK_INTERFACE);
565 	if (eax != HYPERV_VS_INTERFACE_EAX_SIGNATURE)
566 		return false;
567 
568 	eax = cpuid_eax(HYPERV_CPUID_VIRT_STACK_PROPERTIES);
569 	return eax & HYPERV_VS_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE;
570 }
571 
572 const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
573 	.name			= "Microsoft Hyper-V",
574 	.detect			= ms_hyperv_platform,
575 	.type			= X86_HYPER_MS_HYPERV,
576 	.init.x2apic_available	= ms_hyperv_x2apic_available,
577 	.init.msi_ext_dest_id	= ms_hyperv_msi_ext_dest_id,
578 	.init.init_platform	= ms_hyperv_init_platform,
579 };
580