1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * HyperV Detection code.
4 *
5 * Copyright (C) 2010, Novell, Inc.
6 * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
7 */
8
9 #include <linux/types.h>
10 #include <linux/time.h>
11 #include <linux/clocksource.h>
12 #include <linux/init.h>
13 #include <linux/export.h>
14 #include <linux/hardirq.h>
15 #include <linux/efi.h>
16 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/kexec.h>
19 #include <linux/random.h>
20 #include <asm/processor.h>
21 #include <asm/hypervisor.h>
22 #include <hyperv/hvhdk.h>
23 #include <asm/mshyperv.h>
24 #include <asm/desc.h>
25 #include <asm/idtentry.h>
26 #include <asm/irq_regs.h>
27 #include <asm/i8259.h>
28 #include <asm/apic.h>
29 #include <asm/timer.h>
30 #include <asm/reboot.h>
31 #include <asm/msr.h>
32 #include <asm/nmi.h>
33 #include <clocksource/hyperv_timer.h>
34 #include <asm/numa.h>
35 #include <asm/svm.h>
36
37 /* Is Linux running on nested Microsoft Hypervisor */
38 bool hv_nested;
39 struct ms_hyperv_info ms_hyperv;
40
41 #if IS_ENABLED(CONFIG_HYPERV)
42 /*
43 * When running with the paravisor, controls proxying the synthetic interrupts
44 * from the host
45 */
46 static bool hv_para_sint_proxy;
47
hv_get_nested_msr(unsigned int reg)48 static inline unsigned int hv_get_nested_msr(unsigned int reg)
49 {
50 if (hv_is_sint_msr(reg))
51 return reg - HV_X64_MSR_SINT0 + HV_X64_MSR_NESTED_SINT0;
52
53 switch (reg) {
54 case HV_X64_MSR_SIMP:
55 return HV_X64_MSR_NESTED_SIMP;
56 case HV_X64_MSR_SIEFP:
57 return HV_X64_MSR_NESTED_SIEFP;
58 case HV_X64_MSR_SVERSION:
59 return HV_X64_MSR_NESTED_SVERSION;
60 case HV_X64_MSR_SCONTROL:
61 return HV_X64_MSR_NESTED_SCONTROL;
62 case HV_X64_MSR_EOM:
63 return HV_X64_MSR_NESTED_EOM;
64 default:
65 return reg;
66 }
67 }
68
hv_get_non_nested_msr(unsigned int reg)69 u64 hv_get_non_nested_msr(unsigned int reg)
70 {
71 u64 value;
72
73 if (hv_is_synic_msr(reg) && ms_hyperv.paravisor_present)
74 hv_ivm_msr_read(reg, &value);
75 else
76 rdmsrq(reg, value);
77 return value;
78 }
79 EXPORT_SYMBOL_GPL(hv_get_non_nested_msr);
80
hv_set_non_nested_msr(unsigned int reg,u64 value)81 void hv_set_non_nested_msr(unsigned int reg, u64 value)
82 {
83 if (hv_is_synic_msr(reg) && ms_hyperv.paravisor_present) {
84 /* The hypervisor will get the intercept. */
85 hv_ivm_msr_write(reg, value);
86
87 /* Using wrmsrq so the following goes to the paravisor. */
88 if (hv_is_sint_msr(reg)) {
89 union hv_synic_sint sint = { .as_uint64 = value };
90
91 sint.proxy = hv_para_sint_proxy;
92 native_wrmsrq(reg, sint.as_uint64);
93 }
94 } else {
95 native_wrmsrq(reg, value);
96 }
97 }
98 EXPORT_SYMBOL_GPL(hv_set_non_nested_msr);
99
100 /*
101 * Enable or disable proxying synthetic interrupts
102 * to the paravisor.
103 */
hv_para_set_sint_proxy(bool enable)104 void hv_para_set_sint_proxy(bool enable)
105 {
106 hv_para_sint_proxy = enable;
107 }
108
109 /*
110 * Get the SynIC register value from the paravisor.
111 */
hv_para_get_synic_register(unsigned int reg)112 u64 hv_para_get_synic_register(unsigned int reg)
113 {
114 if (WARN_ON(!ms_hyperv.paravisor_present || !hv_is_synic_msr(reg)))
115 return ~0ULL;
116 return native_read_msr(reg);
117 }
118
119 /*
120 * Set the SynIC register value with the paravisor.
121 */
hv_para_set_synic_register(unsigned int reg,u64 val)122 void hv_para_set_synic_register(unsigned int reg, u64 val)
123 {
124 if (WARN_ON(!ms_hyperv.paravisor_present || !hv_is_synic_msr(reg)))
125 return;
126 native_write_msr(reg, val);
127 }
128
hv_get_msr(unsigned int reg)129 u64 hv_get_msr(unsigned int reg)
130 {
131 if (hv_nested)
132 reg = hv_get_nested_msr(reg);
133
134 return hv_get_non_nested_msr(reg);
135 }
136 EXPORT_SYMBOL_GPL(hv_get_msr);
137
hv_set_msr(unsigned int reg,u64 value)138 void hv_set_msr(unsigned int reg, u64 value)
139 {
140 if (hv_nested)
141 reg = hv_get_nested_msr(reg);
142
143 hv_set_non_nested_msr(reg, value);
144 }
145 EXPORT_SYMBOL_GPL(hv_set_msr);
146
147 static void (*mshv_handler)(void);
148 static void (*vmbus_handler)(void);
149 static void (*hv_stimer0_handler)(void);
150 static void (*hv_kexec_handler)(void);
151 static void (*hv_crash_handler)(struct pt_regs *regs);
152
DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback)153 DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback)
154 {
155 struct pt_regs *old_regs = set_irq_regs(regs);
156
157 inc_irq_stat(irq_hv_callback_count);
158 if (mshv_handler)
159 mshv_handler();
160
161 if (vmbus_handler)
162 vmbus_handler();
163
164 add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR);
165
166 if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
167 apic_eoi();
168
169 set_irq_regs(old_regs);
170 }
171
hv_setup_mshv_handler(void (* handler)(void))172 void hv_setup_mshv_handler(void (*handler)(void))
173 {
174 mshv_handler = handler;
175 }
176
hv_setup_vmbus_handler(void (* handler)(void))177 void hv_setup_vmbus_handler(void (*handler)(void))
178 {
179 vmbus_handler = handler;
180 }
181
hv_remove_vmbus_handler(void)182 void hv_remove_vmbus_handler(void)
183 {
184 /* We have no way to deallocate the interrupt gate */
185 vmbus_handler = NULL;
186 }
187
188 /*
189 * Routines to do per-architecture handling of stimer0
190 * interrupts when in Direct Mode
191 */
DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0)192 DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0)
193 {
194 struct pt_regs *old_regs = set_irq_regs(regs);
195
196 inc_irq_stat(hyperv_stimer0_count);
197 if (hv_stimer0_handler)
198 hv_stimer0_handler();
199 add_interrupt_randomness(HYPERV_STIMER0_VECTOR);
200 apic_eoi();
201
202 set_irq_regs(old_regs);
203 }
204
205 /* For x86/x64, override weak placeholders in hyperv_timer.c */
hv_setup_stimer0_handler(void (* handler)(void))206 void hv_setup_stimer0_handler(void (*handler)(void))
207 {
208 hv_stimer0_handler = handler;
209 }
210
hv_remove_stimer0_handler(void)211 void hv_remove_stimer0_handler(void)
212 {
213 /* We have no way to deallocate the interrupt gate */
214 hv_stimer0_handler = NULL;
215 }
216
hv_setup_kexec_handler(void (* handler)(void))217 void hv_setup_kexec_handler(void (*handler)(void))
218 {
219 hv_kexec_handler = handler;
220 }
221
hv_remove_kexec_handler(void)222 void hv_remove_kexec_handler(void)
223 {
224 hv_kexec_handler = NULL;
225 }
226
hv_setup_crash_handler(void (* handler)(struct pt_regs * regs))227 void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs))
228 {
229 hv_crash_handler = handler;
230 }
231
hv_remove_crash_handler(void)232 void hv_remove_crash_handler(void)
233 {
234 hv_crash_handler = NULL;
235 }
236
237 #ifdef CONFIG_KEXEC_CORE
hv_machine_shutdown(void)238 static void hv_machine_shutdown(void)
239 {
240 if (kexec_in_progress) {
241 hv_stimer_global_cleanup();
242
243 if (hv_kexec_handler)
244 hv_kexec_handler();
245 }
246
247 /*
248 * Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor
249 * corrupts the old VP Assist Pages and can crash the kexec kernel.
250 */
251 if (kexec_in_progress)
252 cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
253
254 /* The function calls stop_other_cpus(). */
255 native_machine_shutdown();
256
257 /* Disable the hypercall page when there is only 1 active CPU. */
258 if (kexec_in_progress)
259 hyperv_cleanup();
260 }
261 #endif /* CONFIG_KEXEC_CORE */
262
263 #ifdef CONFIG_CRASH_DUMP
hv_guest_crash_shutdown(struct pt_regs * regs)264 static void hv_guest_crash_shutdown(struct pt_regs *regs)
265 {
266 if (hv_crash_handler)
267 hv_crash_handler(regs);
268
269 /* The function calls crash_smp_send_stop(). */
270 native_machine_crash_shutdown(regs);
271
272 /* Disable the hypercall page when there is only 1 active CPU. */
273 hyperv_cleanup();
274 }
275 #endif /* CONFIG_CRASH_DUMP */
276
277 static u64 hv_ref_counter_at_suspend;
278 static void (*old_save_sched_clock_state)(void);
279 static void (*old_restore_sched_clock_state)(void);
280
281 /*
282 * Hyper-V clock counter resets during hibernation. Save and restore clock
283 * offset during suspend/resume, while also considering the time passed
284 * before suspend. This is to make sure that sched_clock using hv tsc page
285 * based clocksource, proceeds from where it left off during suspend and
286 * it shows correct time for the timestamps of kernel messages after resume.
287 */
save_hv_clock_tsc_state(void)288 static void save_hv_clock_tsc_state(void)
289 {
290 hv_ref_counter_at_suspend = hv_read_reference_counter();
291 }
292
restore_hv_clock_tsc_state(void)293 static void restore_hv_clock_tsc_state(void)
294 {
295 /*
296 * Adjust the offsets used by hv tsc clocksource to
297 * account for the time spent before hibernation.
298 * adjusted value = reference counter (time) at suspend
299 * - reference counter (time) now.
300 */
301 hv_adj_sched_clock_offset(hv_ref_counter_at_suspend - hv_read_reference_counter());
302 }
303
304 /*
305 * Functions to override save_sched_clock_state and restore_sched_clock_state
306 * functions of x86_platform. The Hyper-V clock counter is reset during
307 * suspend-resume and the offset used to measure time needs to be
308 * corrected, post resume.
309 */
hv_save_sched_clock_state(void)310 static void hv_save_sched_clock_state(void)
311 {
312 old_save_sched_clock_state();
313 save_hv_clock_tsc_state();
314 }
315
hv_restore_sched_clock_state(void)316 static void hv_restore_sched_clock_state(void)
317 {
318 restore_hv_clock_tsc_state();
319 old_restore_sched_clock_state();
320 }
321
x86_setup_ops_for_tsc_pg_clock(void)322 static void __init x86_setup_ops_for_tsc_pg_clock(void)
323 {
324 if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
325 return;
326
327 old_save_sched_clock_state = x86_platform.save_sched_clock_state;
328 x86_platform.save_sched_clock_state = hv_save_sched_clock_state;
329
330 old_restore_sched_clock_state = x86_platform.restore_sched_clock_state;
331 x86_platform.restore_sched_clock_state = hv_restore_sched_clock_state;
332 }
333
334 #ifdef CONFIG_X86_64
335 DEFINE_STATIC_CALL(hv_hypercall, hv_std_hypercall);
336 EXPORT_STATIC_CALL_TRAMP_GPL(hv_hypercall);
337 #define hypercall_update(hc) static_call_update(hv_hypercall, hc)
338 #endif
339 #endif /* CONFIG_HYPERV */
340
341 #ifndef hypercall_update
342 #define hypercall_update(hc) (void)hc
343 #endif
344
ms_hyperv_platform(void)345 static uint32_t __init ms_hyperv_platform(void)
346 {
347 u32 eax;
348 u32 hyp_signature[3];
349
350 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
351 return 0;
352
353 cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
354 &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
355
356 if (eax < HYPERV_CPUID_MIN || eax > HYPERV_CPUID_MAX ||
357 memcmp("Microsoft Hv", hyp_signature, 12))
358 return 0;
359
360 /* HYPERCALL and VP_INDEX MSRs are mandatory for all features. */
361 eax = cpuid_eax(HYPERV_CPUID_FEATURES);
362 if (!(eax & HV_MSR_HYPERCALL_AVAILABLE)) {
363 pr_warn("x86/hyperv: HYPERCALL MSR not available.\n");
364 return 0;
365 }
366 if (!(eax & HV_MSR_VP_INDEX_AVAILABLE)) {
367 pr_warn("x86/hyperv: VP_INDEX MSR not available.\n");
368 return 0;
369 }
370
371 return HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
372 }
373
374 #ifdef CONFIG_X86_LOCAL_APIC
375 /*
376 * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes
377 * it difficult to process CHANNELMSG_UNLOAD in case of crash. Handle
378 * unknown NMI on the first CPU which gets it.
379 */
hv_nmi_unknown(unsigned int val,struct pt_regs * regs)380 static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
381 {
382 static atomic_t nmi_cpu = ATOMIC_INIT(-1);
383 unsigned int old_cpu, this_cpu;
384
385 if (!unknown_nmi_panic)
386 return NMI_DONE;
387
388 old_cpu = -1;
389 this_cpu = raw_smp_processor_id();
390 if (!atomic_try_cmpxchg(&nmi_cpu, &old_cpu, this_cpu))
391 return NMI_HANDLED;
392
393 return NMI_DONE;
394 }
395 #endif
396
hv_get_tsc_khz(void)397 static unsigned long hv_get_tsc_khz(void)
398 {
399 unsigned long freq;
400
401 rdmsrq(HV_X64_MSR_TSC_FREQUENCY, freq);
402
403 return freq / 1000;
404 }
405
406 #if defined(CONFIG_SMP) && IS_ENABLED(CONFIG_HYPERV)
hv_smp_prepare_boot_cpu(void)407 static void __init hv_smp_prepare_boot_cpu(void)
408 {
409 native_smp_prepare_boot_cpu();
410 #if defined(CONFIG_X86_64) && defined(CONFIG_PARAVIRT_SPINLOCKS)
411 hv_init_spinlocks();
412 #endif
413 }
414
hv_smp_prepare_cpus(unsigned int max_cpus)415 static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
416 {
417 #ifdef CONFIG_X86_64
418 int i;
419 int ret;
420 #endif
421
422 native_smp_prepare_cpus(max_cpus);
423
424 /*
425 * Override wakeup_secondary_cpu_64 callback for SEV-SNP
426 * enlightened guest.
427 */
428 if (!ms_hyperv.paravisor_present && hv_isolation_type_snp()) {
429 apic->wakeup_secondary_cpu_64 = hv_snp_boot_ap;
430 return;
431 }
432
433 #ifdef CONFIG_X86_64
434 /* If AP LPs exist, we are in a kexec'd kernel and VPs already exist */
435 if (num_present_cpus() == 1 || hv_lp_exists(1))
436 return;
437
438 for_each_present_cpu(i) {
439 if (i == 0)
440 continue;
441 ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i));
442 BUG_ON(ret);
443 }
444
445 ret = hv_call_notify_all_processors_started();
446 WARN_ON(ret);
447
448 for_each_present_cpu(i) {
449 if (i == 0)
450 continue;
451 ret = hv_call_create_vp(numa_cpu_node(i), hv_current_partition_id, i, i);
452 BUG_ON(ret);
453 }
454 #endif
455 }
456 #endif
457
458 /*
459 * When a fully enlightened TDX VM runs on Hyper-V, the firmware sets the
460 * HW_REDUCED flag: refer to acpi_tb_create_local_fadt(). Consequently ttyS0
461 * interrupts can't work because request_irq() -> ... -> irq_to_desc() returns
462 * NULL for ttyS0. This happens because mp_config_acpi_legacy_irqs() sees a
463 * nr_legacy_irqs() of 0, so it doesn't initialize the array 'mp_irqs[]', and
464 * later setup_IO_APIC_irqs() -> find_irq_entry() fails to find the legacy irqs
465 * from the array and hence doesn't create the necessary irq description info.
466 *
467 * Clone arch/x86/kernel/acpi/boot.c: acpi_generic_reduced_hw_init() here,
468 * except don't change 'legacy_pic', which keeps its default value
469 * 'default_legacy_pic'. This way, mp_config_acpi_legacy_irqs() sees a non-zero
470 * nr_legacy_irqs() and eventually serial console interrupts works properly.
471 */
reduced_hw_init(void)472 static void __init reduced_hw_init(void)
473 {
474 x86_init.timers.timer_init = x86_init_noop;
475 x86_init.irqs.pre_vector_init = x86_init_noop;
476 }
477
hv_get_hypervisor_version(union hv_hypervisor_version_info * info)478 int hv_get_hypervisor_version(union hv_hypervisor_version_info *info)
479 {
480 unsigned int hv_max_functions;
481
482 hv_max_functions = cpuid_eax(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS);
483 if (hv_max_functions < HYPERV_CPUID_VERSION) {
484 pr_err("%s: Could not detect Hyper-V version\n", __func__);
485 return -ENODEV;
486 }
487
488 cpuid(HYPERV_CPUID_VERSION, &info->eax, &info->ebx, &info->ecx, &info->edx);
489
490 return 0;
491 }
492 EXPORT_SYMBOL_GPL(hv_get_hypervisor_version);
493
494 /*
495 * Reserved vectors hard coded in the hypervisor. If used outside, the hypervisor
496 * will either crash or hang or attempt to break into debugger.
497 */
hv_reserve_irq_vectors(void)498 static void hv_reserve_irq_vectors(void)
499 {
500 #define HYPERV_DBG_FASTFAIL_VECTOR 0x29
501 #define HYPERV_DBG_ASSERT_VECTOR 0x2C
502 #define HYPERV_DBG_SERVICE_VECTOR 0x2D
503
504 if (cpu_feature_enabled(X86_FEATURE_FRED))
505 return;
506
507 if (test_and_set_bit(HYPERV_DBG_ASSERT_VECTOR, system_vectors) ||
508 test_and_set_bit(HYPERV_DBG_SERVICE_VECTOR, system_vectors) ||
509 test_and_set_bit(HYPERV_DBG_FASTFAIL_VECTOR, system_vectors))
510 BUG();
511
512 pr_info("Hyper-V: reserve vectors: 0x%x 0x%x 0x%x\n",
513 HYPERV_DBG_ASSERT_VECTOR, HYPERV_DBG_SERVICE_VECTOR,
514 HYPERV_DBG_FASTFAIL_VECTOR);
515 }
516
ms_hyperv_init_platform(void)517 static void __init ms_hyperv_init_platform(void)
518 {
519 int hv_max_functions_eax, eax;
520
521 #ifdef CONFIG_PARAVIRT
522 pv_info.name = "Hyper-V";
523 #endif
524
525 /*
526 * Extract the features and hints
527 */
528 ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
529 ms_hyperv.priv_high = cpuid_ebx(HYPERV_CPUID_FEATURES);
530 ms_hyperv.ext_features = cpuid_ecx(HYPERV_CPUID_FEATURES);
531 ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
532 ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
533
534 hv_max_functions_eax = cpuid_eax(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS);
535
536 pr_info("Hyper-V: privilege flags low %#x, high %#x, ext %#x, hints %#x, misc %#x\n",
537 ms_hyperv.features, ms_hyperv.priv_high,
538 ms_hyperv.ext_features, ms_hyperv.hints,
539 ms_hyperv.misc_features);
540
541 ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS);
542 ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS);
543
544 pr_debug("Hyper-V: max %u virtual processors, %u logical processors\n",
545 ms_hyperv.max_vp_index, ms_hyperv.max_lp_index);
546
547 hv_identify_partition_type();
548
549 if (hv_root_partition())
550 hv_reserve_irq_vectors();
551
552 if (cc_platform_has(CC_ATTR_SNP_SECURE_AVIC))
553 ms_hyperv.hints |= HV_DEPRECATING_AEOI_RECOMMENDED;
554
555 if (ms_hyperv.hints & HV_X64_HYPERV_NESTED) {
556 hv_nested = true;
557 pr_info("Hyper-V: running on a nested hypervisor\n");
558 }
559
560 /*
561 * There is no check against the max function for HYPERV_CPUID_VIRT_STACK_* CPUID
562 * leaves as the hypervisor doesn't handle them. Even a nested root partition (L2
563 * root) will not get them because the nested (L1) hypervisor filters them out.
564 * These are handled through intercept processing by the Windows Hyper-V stack
565 * or the paravisor.
566 */
567 eax = cpuid_eax(HYPERV_CPUID_VIRT_STACK_PROPERTIES);
568 ms_hyperv.confidential_vmbus_available =
569 eax & HYPERV_VS_PROPERTIES_EAX_CONFIDENTIAL_VMBUS_AVAILABLE;
570 ms_hyperv.msi_ext_dest_id =
571 eax & HYPERV_VS_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE;
572
573 if (ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
574 ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
575 x86_platform.calibrate_tsc = hv_get_tsc_khz;
576 x86_platform.calibrate_cpu = hv_get_tsc_khz;
577 setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
578 }
579
580 if (ms_hyperv.priv_high & HV_ISOLATION) {
581 ms_hyperv.isolation_config_a = cpuid_eax(HYPERV_CPUID_ISOLATION_CONFIG);
582 ms_hyperv.isolation_config_b = cpuid_ebx(HYPERV_CPUID_ISOLATION_CONFIG);
583
584 if (ms_hyperv.shared_gpa_boundary_active)
585 ms_hyperv.shared_gpa_boundary =
586 BIT_ULL(ms_hyperv.shared_gpa_boundary_bits);
587
588 pr_info("Hyper-V: Isolation Config: Group A 0x%x, Group B 0x%x\n",
589 ms_hyperv.isolation_config_a, ms_hyperv.isolation_config_b);
590
591
592 if (hv_get_isolation_type() == HV_ISOLATION_TYPE_SNP) {
593 static_branch_enable(&isolation_type_snp);
594 if (!ms_hyperv.paravisor_present)
595 hypercall_update(hv_snp_hypercall);
596 } else if (hv_get_isolation_type() == HV_ISOLATION_TYPE_TDX) {
597 static_branch_enable(&isolation_type_tdx);
598
599 /* A TDX VM must use x2APIC and doesn't use lazy EOI. */
600 ms_hyperv.hints &= ~HV_X64_APIC_ACCESS_RECOMMENDED;
601
602 if (!ms_hyperv.paravisor_present) {
603 hypercall_update(hv_tdx_hypercall);
604 /*
605 * Mark the Hyper-V TSC page feature as disabled
606 * in a TDX VM without paravisor so that the
607 * Invariant TSC, which is a better clocksource
608 * anyway, is used instead.
609 */
610 ms_hyperv.features &= ~HV_MSR_REFERENCE_TSC_AVAILABLE;
611
612 /*
613 * The Invariant TSC is expected to be available
614 * in a TDX VM without paravisor, but if not,
615 * print a warning message. The slower Hyper-V MSR-based
616 * Ref Counter should end up being the clocksource.
617 */
618 if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT))
619 pr_warn("Hyper-V: Invariant TSC is unavailable\n");
620
621 /* HV_MSR_CRASH_CTL is unsupported. */
622 ms_hyperv.misc_features &= ~HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
623
624 /* Don't trust Hyper-V's TLB-flushing hypercalls. */
625 ms_hyperv.hints &= ~HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
626
627 x86_init.acpi.reduced_hw_early_init = reduced_hw_init;
628 }
629 }
630 }
631
632 if (hv_max_functions_eax >= HYPERV_CPUID_NESTED_FEATURES) {
633 ms_hyperv.nested_features =
634 cpuid_eax(HYPERV_CPUID_NESTED_FEATURES);
635 pr_info("Hyper-V: Nested features: 0x%x\n",
636 ms_hyperv.nested_features);
637 }
638
639 #ifdef CONFIG_X86_LOCAL_APIC
640 if (ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
641 ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
642 /*
643 * Get the APIC frequency.
644 */
645 u64 hv_lapic_frequency;
646
647 rdmsrq(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
648 hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
649 lapic_timer_period = hv_lapic_frequency;
650 pr_info("Hyper-V: LAPIC Timer Frequency: %#x\n",
651 lapic_timer_period);
652 }
653
654 register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST,
655 "hv_nmi_unknown");
656 #endif
657
658 #ifdef CONFIG_X86_IO_APIC
659 no_timer_check = 1;
660 #endif
661
662 #if IS_ENABLED(CONFIG_HYPERV)
663 if (hv_root_partition())
664 machine_ops.power_off = hv_machine_power_off;
665 #if defined(CONFIG_KEXEC_CORE)
666 machine_ops.shutdown = hv_machine_shutdown;
667 #endif
668 #if defined(CONFIG_CRASH_DUMP)
669 if (!hv_root_partition())
670 machine_ops.crash_shutdown = hv_guest_crash_shutdown;
671 #endif
672 #endif
673 /*
674 * HV_ACCESS_TSC_INVARIANT is always zero for the root partition. Root
675 * partition doesn't need to write to synthetic MSR to enable invariant
676 * TSC feature. It sees what the hardware provides.
677 */
678 if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
679 /*
680 * Writing to synthetic MSR 0x40000118 updates/changes the
681 * guest visible CPUIDs. Setting bit 0 of this MSR enables
682 * guests to report invariant TSC feature through CPUID
683 * instruction, CPUID 0x800000007/EDX, bit 8. See code in
684 * early_init_intel() where this bit is examined. The
685 * setting of this MSR bit should happen before init_intel()
686 * is called.
687 */
688 wrmsrq(HV_X64_MSR_TSC_INVARIANT_CONTROL, HV_EXPOSE_INVARIANT_TSC);
689 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
690 }
691
692 /*
693 * Generation 2 instances don't support reading the NMI status from
694 * 0x61 port.
695 */
696 if (efi_enabled(EFI_BOOT))
697 x86_platform.get_nmi_reason = hv_get_nmi_reason;
698
699 #if IS_ENABLED(CONFIG_HYPERV)
700 if ((hv_get_isolation_type() == HV_ISOLATION_TYPE_VBS) ||
701 ms_hyperv.paravisor_present)
702 hv_vtom_init();
703 /*
704 * Setup the hook to get control post apic initialization.
705 */
706 x86_platform.apic_post_init = hyperv_init;
707 hyperv_setup_mmu_ops();
708
709 /* Install system interrupt handler for hypervisor callback */
710 sysvec_install(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback);
711
712 /* Install system interrupt handler for reenlightenment notifications */
713 if (ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT) {
714 sysvec_install(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment);
715 }
716
717 /* Install system interrupt handler for stimer0 */
718 if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE) {
719 sysvec_install(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0);
720 }
721
722 # ifdef CONFIG_SMP
723 smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu;
724 if (hv_root_partition() ||
725 (!ms_hyperv.paravisor_present && hv_isolation_type_snp()))
726 smp_ops.smp_prepare_cpus = hv_smp_prepare_cpus;
727 # endif
728
729 /*
730 * Hyper-V doesn't provide irq remapping for IO-APIC. To enable x2apic,
731 * set x2apic destination mode to physical mode when x2apic is available
732 * and Hyper-V IOMMU driver makes sure cpus assigned with IO-APIC irqs
733 * have 8-bit APIC id.
734 */
735 # ifdef CONFIG_X86_X2APIC
736 if (x2apic_supported())
737 x2apic_phys = 1;
738 # endif
739
740 /* Register Hyper-V specific clocksource */
741 hv_init_clocksource();
742 x86_setup_ops_for_tsc_pg_clock();
743 hv_vtl_init_platform();
744 #endif
745 /*
746 * TSC should be marked as unstable only after Hyper-V
747 * clocksource has been initialized. This ensures that the
748 * stability of the sched_clock is not altered.
749 *
750 * HV_ACCESS_TSC_INVARIANT is always zero for the root partition. No
751 * need to check for it.
752 */
753 if (!hv_root_partition() &&
754 !(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT))
755 mark_tsc_unstable("running on Hyper-V");
756
757 hardlockup_detector_disable();
758 }
759
ms_hyperv_x2apic_available(void)760 static bool __init ms_hyperv_x2apic_available(void)
761 {
762 return x2apic_supported();
763 }
764
765 /*
766 * If ms_hyperv_msi_ext_dest_id() returns true, hyperv_prepare_irq_remapping()
767 * returns -ENODEV and the Hyper-V IOMMU driver is not used; instead, the
768 * generic support of the 15-bit APIC ID is used: see __irq_msi_compose_msg().
769 *
770 * Note: for a VM on Hyper-V, the I/O-APIC is the only device which
771 * (logically) generates MSIs directly to the system APIC irq domain.
772 * There is no HPET, and PCI MSI/MSI-X interrupts are remapped by the
773 * pci-hyperv host bridge.
774 *
775 * Note: for a Hyper-V root partition, this will always return false.
776 */
ms_hyperv_msi_ext_dest_id(void)777 static bool __init ms_hyperv_msi_ext_dest_id(void)
778 {
779 return ms_hyperv.msi_ext_dest_id;
780 }
781
782 #ifdef CONFIG_AMD_MEM_ENCRYPT
hv_sev_es_hcall_prepare(struct ghcb * ghcb,struct pt_regs * regs)783 static void hv_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs)
784 {
785 /* RAX and CPL are already in the GHCB */
786 ghcb_set_rcx(ghcb, regs->cx);
787 ghcb_set_rdx(ghcb, regs->dx);
788 ghcb_set_r8(ghcb, regs->r8);
789 }
790
hv_sev_es_hcall_finish(struct ghcb * ghcb,struct pt_regs * regs)791 static bool hv_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
792 {
793 /* No checking of the return state needed */
794 return true;
795 }
796 #endif
797
798 const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
799 .name = "Microsoft Hyper-V",
800 .detect = ms_hyperv_platform,
801 .type = X86_HYPER_MS_HYPERV,
802 .init.x2apic_available = ms_hyperv_x2apic_available,
803 .init.msi_ext_dest_id = ms_hyperv_msi_ext_dest_id,
804 .init.init_platform = ms_hyperv_init_platform,
805 .init.guest_late_init = ms_hyperv_late_init,
806 #ifdef CONFIG_AMD_MEM_ENCRYPT
807 .runtime.sev_es_hcall_prepare = hv_sev_es_hcall_prepare,
808 .runtime.sev_es_hcall_finish = hv_sev_es_hcall_finish,
809 #endif
810 };
811