1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Architecture neutral utility routines for interacting with 5 * Hyper-V. This file is specifically for code that must be 6 * built-in to the kernel image when CONFIG_HYPERV is set 7 * (vs. being in a module) because it is called from architecture 8 * specific code under arch/. 9 * 10 * Copyright (C) 2021, Microsoft, Inc. 11 * 12 * Author : Michael Kelley <mikelley@microsoft.com> 13 */ 14 15 #include <linux/types.h> 16 #include <linux/acpi.h> 17 #include <linux/export.h> 18 #include <linux/bitfield.h> 19 #include <linux/cpumask.h> 20 #include <linux/sched/task_stack.h> 21 #include <linux/panic_notifier.h> 22 #include <linux/ptrace.h> 23 #include <linux/random.h> 24 #include <linux/efi.h> 25 #include <linux/kdebug.h> 26 #include <linux/kmsg_dump.h> 27 #include <linux/sizes.h> 28 #include <linux/slab.h> 29 #include <linux/dma-map-ops.h> 30 #include <linux/set_memory.h> 31 #include <hyperv/hvhdk.h> 32 #include <asm/mshyperv.h> 33 34 u64 hv_current_partition_id = HV_PARTITION_ID_SELF; 35 EXPORT_SYMBOL_GPL(hv_current_partition_id); 36 37 enum hv_partition_type hv_curr_partition_type; 38 EXPORT_SYMBOL_GPL(hv_curr_partition_type); 39 40 /* 41 * ms_hyperv and hv_nested are defined here with other 42 * Hyper-V specific globals so they are shared across all architectures and are 43 * built only when CONFIG_HYPERV is defined. But on x86, 44 * ms_hyperv_init_platform() is built even when CONFIG_HYPERV is not 45 * defined, and it uses these three variables. So mark them as __weak 46 * here, allowing for an overriding definition in the module containing 47 * ms_hyperv_init_platform(). 48 */ 49 bool __weak hv_nested; 50 EXPORT_SYMBOL_GPL(hv_nested); 51 52 struct ms_hyperv_info __weak ms_hyperv; 53 EXPORT_SYMBOL_GPL(ms_hyperv); 54 55 u32 *hv_vp_index; 56 EXPORT_SYMBOL_GPL(hv_vp_index); 57 58 u32 hv_max_vp_index; 59 EXPORT_SYMBOL_GPL(hv_max_vp_index); 60 61 void * __percpu *hyperv_pcpu_input_arg; 62 EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg); 63 64 void * __percpu *hyperv_pcpu_output_arg; 65 EXPORT_SYMBOL_GPL(hyperv_pcpu_output_arg); 66 67 static void hv_kmsg_dump_unregister(void); 68 69 static struct ctl_table_header *hv_ctl_table_hdr; 70 71 /* 72 * Per-cpu array holding the tail pointer for the SynIC event ring buffer 73 * for each SINT. 74 * 75 * We cannot maintain this in mshv driver because the tail pointer should 76 * persist even if the mshv driver is unloaded. 77 */ 78 u8 * __percpu *hv_synic_eventring_tail; 79 EXPORT_SYMBOL_GPL(hv_synic_eventring_tail); 80 81 /* 82 * Hyper-V specific initialization and shutdown code that is 83 * common across all architectures. Called from architecture 84 * specific initialization functions. 85 */ 86 87 void __init hv_common_free(void) 88 { 89 unregister_sysctl_table(hv_ctl_table_hdr); 90 hv_ctl_table_hdr = NULL; 91 92 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) 93 hv_kmsg_dump_unregister(); 94 95 kfree(hv_vp_index); 96 hv_vp_index = NULL; 97 98 free_percpu(hyperv_pcpu_output_arg); 99 hyperv_pcpu_output_arg = NULL; 100 101 free_percpu(hyperv_pcpu_input_arg); 102 hyperv_pcpu_input_arg = NULL; 103 104 free_percpu(hv_synic_eventring_tail); 105 hv_synic_eventring_tail = NULL; 106 } 107 108 static void *hv_panic_page; 109 110 /* 111 * Boolean to control whether to report panic messages over Hyper-V. 112 * 113 * It can be set via /proc/sys/kernel/hyperv_record_panic_msg 114 */ 115 static int sysctl_record_panic_msg = 1; 116 117 /* 118 * sysctl option to allow the user to control whether kmsg data should be 119 * reported to Hyper-V on panic. 120 */ 121 static const struct ctl_table hv_ctl_table[] = { 122 { 123 .procname = "hyperv_record_panic_msg", 124 .data = &sysctl_record_panic_msg, 125 .maxlen = sizeof(int), 126 .mode = 0644, 127 .proc_handler = proc_dointvec_minmax, 128 .extra1 = SYSCTL_ZERO, 129 .extra2 = SYSCTL_ONE 130 }, 131 }; 132 133 static int hv_die_panic_notify_crash(struct notifier_block *self, 134 unsigned long val, void *args); 135 136 static struct notifier_block hyperv_die_report_block = { 137 .notifier_call = hv_die_panic_notify_crash, 138 }; 139 140 static struct notifier_block hyperv_panic_report_block = { 141 .notifier_call = hv_die_panic_notify_crash, 142 }; 143 144 /* 145 * The following callback works both as die and panic notifier; its 146 * goal is to provide panic information to the hypervisor unless the 147 * kmsg dumper is used [see hv_kmsg_dump()], which provides more 148 * information but isn't always available. 149 * 150 * Notice that both the panic/die report notifiers are registered only 151 * if we have the capability HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE set. 152 */ 153 static int hv_die_panic_notify_crash(struct notifier_block *self, 154 unsigned long val, void *args) 155 { 156 struct pt_regs *regs; 157 bool is_die; 158 159 /* Don't notify Hyper-V unless we have a die oops event or panic. */ 160 if (self == &hyperv_panic_report_block) { 161 is_die = false; 162 regs = current_pt_regs(); 163 } else { /* die event */ 164 if (val != DIE_OOPS) 165 return NOTIFY_DONE; 166 167 is_die = true; 168 regs = ((struct die_args *)args)->regs; 169 } 170 171 /* 172 * Hyper-V should be notified only once about a panic/die. If we will 173 * be calling hv_kmsg_dump() later with kmsg data, don't do the 174 * notification here. 175 */ 176 if (!sysctl_record_panic_msg || !hv_panic_page) 177 hyperv_report_panic(regs, val, is_die); 178 179 return NOTIFY_DONE; 180 } 181 182 /* 183 * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg 184 * buffer and call into Hyper-V to transfer the data. 185 */ 186 static void hv_kmsg_dump(struct kmsg_dumper *dumper, 187 struct kmsg_dump_detail *detail) 188 { 189 struct kmsg_dump_iter iter; 190 size_t bytes_written; 191 192 /* We are only interested in panics. */ 193 if (detail->reason != KMSG_DUMP_PANIC || !sysctl_record_panic_msg) 194 return; 195 196 /* 197 * Write dump contents to the page. No need to synchronize; panic should 198 * be single-threaded. Ignore failures from kmsg_dump_get_buffer() since 199 * panic notification should be done even if there is no message data. 200 * Don't assume bytes_written is set in case of failure, so initialize it. 201 */ 202 kmsg_dump_rewind(&iter); 203 bytes_written = 0; 204 (void)kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE, 205 &bytes_written); 206 207 /* 208 * P3 to contain the physical address of the panic page & P4 to 209 * contain the size of the panic data in that page. Rest of the 210 * registers are no-op when the NOTIFY_MSG flag is set. 211 */ 212 hv_set_msr(HV_MSR_CRASH_P0, 0); 213 hv_set_msr(HV_MSR_CRASH_P1, 0); 214 hv_set_msr(HV_MSR_CRASH_P2, 0); 215 hv_set_msr(HV_MSR_CRASH_P3, bytes_written ? virt_to_phys(hv_panic_page) : 0); 216 hv_set_msr(HV_MSR_CRASH_P4, bytes_written); 217 218 /* 219 * Let Hyper-V know there is crash data available along with 220 * the panic message. 221 */ 222 hv_set_msr(HV_MSR_CRASH_CTL, 223 (HV_CRASH_CTL_CRASH_NOTIFY | 224 HV_CRASH_CTL_CRASH_NOTIFY_MSG)); 225 } 226 227 static struct kmsg_dumper hv_kmsg_dumper = { 228 .dump = hv_kmsg_dump, 229 }; 230 231 static void hv_kmsg_dump_unregister(void) 232 { 233 kmsg_dump_unregister(&hv_kmsg_dumper); 234 unregister_die_notifier(&hyperv_die_report_block); 235 atomic_notifier_chain_unregister(&panic_notifier_list, 236 &hyperv_panic_report_block); 237 238 kfree(hv_panic_page); 239 hv_panic_page = NULL; 240 } 241 242 static void hv_kmsg_dump_register(void) 243 { 244 int ret; 245 246 hv_panic_page = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); 247 if (!hv_panic_page) { 248 pr_err("Hyper-V: panic message page memory allocation failed\n"); 249 return; 250 } 251 252 ret = kmsg_dump_register(&hv_kmsg_dumper); 253 if (ret) { 254 pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret); 255 kfree(hv_panic_page); 256 hv_panic_page = NULL; 257 } 258 } 259 260 static inline bool hv_output_page_exists(void) 261 { 262 return hv_parent_partition() || IS_ENABLED(CONFIG_HYPERV_VTL_MODE); 263 } 264 265 void __init hv_get_partition_id(void) 266 { 267 struct hv_output_get_partition_id *output; 268 unsigned long flags; 269 u64 status, pt_id; 270 271 local_irq_save(flags); 272 output = *this_cpu_ptr(hyperv_pcpu_input_arg); 273 status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output); 274 pt_id = output->partition_id; 275 local_irq_restore(flags); 276 277 if (hv_result_success(status)) 278 hv_current_partition_id = pt_id; 279 else 280 pr_err("Hyper-V: failed to get partition ID: %#x\n", 281 hv_result(status)); 282 } 283 #if IS_ENABLED(CONFIG_HYPERV_VTL_MODE) 284 u8 __init get_vtl(void) 285 { 286 u64 control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_REGISTERS; 287 struct hv_input_get_vp_registers *input; 288 struct hv_output_get_vp_registers *output; 289 unsigned long flags; 290 u64 ret; 291 292 local_irq_save(flags); 293 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 294 output = *this_cpu_ptr(hyperv_pcpu_output_arg); 295 296 memset(input, 0, struct_size(input, names, 1)); 297 input->partition_id = HV_PARTITION_ID_SELF; 298 input->vp_index = HV_VP_INDEX_SELF; 299 input->input_vtl.as_uint8 = 0; 300 input->names[0] = HV_REGISTER_VSM_VP_STATUS; 301 302 ret = hv_do_hypercall(control, input, output); 303 if (hv_result_success(ret)) { 304 ret = output->values[0].reg8 & HV_VTL_MASK; 305 } else { 306 pr_err("Failed to get VTL(error: %lld) exiting...\n", ret); 307 BUG(); 308 } 309 310 local_irq_restore(flags); 311 return ret; 312 } 313 #endif 314 315 int __init hv_common_init(void) 316 { 317 int i; 318 union hv_hypervisor_version_info version; 319 320 /* Get information about the Microsoft Hypervisor version */ 321 if (!hv_get_hypervisor_version(&version)) 322 pr_info("Hyper-V: Hypervisor Build %d.%d.%d.%d-%d-%d\n", 323 version.major_version, version.minor_version, 324 version.build_number, version.service_number, 325 version.service_pack, version.service_branch); 326 327 if (hv_is_isolation_supported()) 328 sysctl_record_panic_msg = 0; 329 330 /* 331 * Hyper-V expects to get crash register data or kmsg when 332 * crash enlightment is available and system crashes. Set 333 * crash_kexec_post_notifiers to be true to make sure that 334 * calling crash enlightment interface before running kdump 335 * kernel. 336 */ 337 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { 338 u64 hyperv_crash_ctl; 339 340 crash_kexec_post_notifiers = true; 341 pr_info("Hyper-V: enabling crash_kexec_post_notifiers\n"); 342 343 /* 344 * Panic message recording (sysctl_record_panic_msg) 345 * is enabled by default in non-isolated guests and 346 * disabled by default in isolated guests; the panic 347 * message recording won't be available in isolated 348 * guests should the following registration fail. 349 */ 350 hv_ctl_table_hdr = register_sysctl("kernel", hv_ctl_table); 351 if (!hv_ctl_table_hdr) 352 pr_err("Hyper-V: sysctl table register error"); 353 354 /* 355 * Register for panic kmsg callback only if the right 356 * capability is supported by the hypervisor. 357 */ 358 hyperv_crash_ctl = hv_get_msr(HV_MSR_CRASH_CTL); 359 if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) 360 hv_kmsg_dump_register(); 361 362 register_die_notifier(&hyperv_die_report_block); 363 atomic_notifier_chain_register(&panic_notifier_list, 364 &hyperv_panic_report_block); 365 } 366 367 /* 368 * Allocate the per-CPU state for the hypercall input arg. 369 * If this allocation fails, we will not be able to setup 370 * (per-CPU) hypercall input page and thus this failure is 371 * fatal on Hyper-V. 372 */ 373 hyperv_pcpu_input_arg = alloc_percpu(void *); 374 BUG_ON(!hyperv_pcpu_input_arg); 375 376 /* Allocate the per-CPU state for output arg for root */ 377 if (hv_output_page_exists()) { 378 hyperv_pcpu_output_arg = alloc_percpu(void *); 379 BUG_ON(!hyperv_pcpu_output_arg); 380 } 381 382 if (hv_parent_partition()) { 383 hv_synic_eventring_tail = alloc_percpu(u8 *); 384 BUG_ON(!hv_synic_eventring_tail); 385 } 386 387 hv_vp_index = kmalloc_array(nr_cpu_ids, sizeof(*hv_vp_index), 388 GFP_KERNEL); 389 if (!hv_vp_index) { 390 hv_common_free(); 391 return -ENOMEM; 392 } 393 394 for (i = 0; i < nr_cpu_ids; i++) 395 hv_vp_index[i] = VP_INVAL; 396 397 return 0; 398 } 399 400 void __init ms_hyperv_late_init(void) 401 { 402 struct acpi_table_header *header; 403 acpi_status status; 404 u8 *randomdata; 405 u32 length, i; 406 407 /* 408 * Seed the Linux random number generator with entropy provided by 409 * the Hyper-V host in ACPI table OEM0. 410 */ 411 if (!IS_ENABLED(CONFIG_ACPI)) 412 return; 413 414 status = acpi_get_table("OEM0", 0, &header); 415 if (ACPI_FAILURE(status) || !header) 416 return; 417 418 /* 419 * Since the "OEM0" table name is for OEM specific usage, verify 420 * that what we're seeing purports to be from Microsoft. 421 */ 422 if (strncmp(header->oem_table_id, "MICROSFT", 8)) 423 goto error; 424 425 /* 426 * Ensure the length is reasonable. Requiring at least 8 bytes and 427 * no more than 4K bytes is somewhat arbitrary and just protects 428 * against a malformed table. Hyper-V currently provides 64 bytes, 429 * but allow for a change in a later version. 430 */ 431 if (header->length < sizeof(*header) + 8 || 432 header->length > sizeof(*header) + SZ_4K) 433 goto error; 434 435 length = header->length - sizeof(*header); 436 randomdata = (u8 *)(header + 1); 437 438 pr_debug("Hyper-V: Seeding rng with %d random bytes from ACPI table OEM0\n", 439 length); 440 441 add_bootloader_randomness(randomdata, length); 442 443 /* 444 * To prevent the seed data from being visible in /sys/firmware/acpi, 445 * zero out the random data in the ACPI table and fixup the checksum. 446 * The zero'ing is done out of an abundance of caution in avoiding 447 * potential security risks to the rng. Similarly, reset the table 448 * length to just the header size so that a subsequent kexec doesn't 449 * try to use the zero'ed out random data. 450 */ 451 for (i = 0; i < length; i++) { 452 header->checksum += randomdata[i]; 453 randomdata[i] = 0; 454 } 455 456 for (i = 0; i < sizeof(header->length); i++) 457 header->checksum += ((u8 *)&header->length)[i]; 458 header->length = sizeof(*header); 459 for (i = 0; i < sizeof(header->length); i++) 460 header->checksum -= ((u8 *)&header->length)[i]; 461 462 error: 463 acpi_put_table(header); 464 } 465 466 /* 467 * Hyper-V specific initialization and die code for 468 * individual CPUs that is common across all architectures. 469 * Called by the CPU hotplug mechanism. 470 */ 471 472 int hv_common_cpu_init(unsigned int cpu) 473 { 474 void **inputarg, **outputarg; 475 u8 **synic_eventring_tail; 476 u64 msr_vp_index; 477 gfp_t flags; 478 const int pgcount = hv_output_page_exists() ? 2 : 1; 479 void *mem; 480 int ret = 0; 481 482 /* hv_cpu_init() can be called with IRQs disabled from hv_resume() */ 483 flags = irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL; 484 485 inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); 486 487 /* 488 * The per-cpu memory is already allocated if this CPU was previously 489 * online and then taken offline 490 */ 491 if (!*inputarg) { 492 mem = kmalloc_array(pgcount, HV_HYP_PAGE_SIZE, flags); 493 if (!mem) 494 return -ENOMEM; 495 496 if (hv_output_page_exists()) { 497 outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg); 498 *outputarg = (char *)mem + HV_HYP_PAGE_SIZE; 499 } 500 501 if (!ms_hyperv.paravisor_present && 502 (hv_isolation_type_snp() || hv_isolation_type_tdx())) { 503 ret = set_memory_decrypted((unsigned long)mem, pgcount); 504 if (ret) { 505 /* It may be unsafe to free 'mem' */ 506 return ret; 507 } 508 509 memset(mem, 0x00, pgcount * HV_HYP_PAGE_SIZE); 510 } 511 512 /* 513 * In a fully enlightened TDX/SNP VM with more than 64 VPs, if 514 * hyperv_pcpu_input_arg is not NULL, set_memory_decrypted() -> 515 * ... -> cpa_flush()-> ... -> __send_ipi_mask_ex() tries to 516 * use hyperv_pcpu_input_arg as the hypercall input page, which 517 * must be a decrypted page in such a VM, but the page is still 518 * encrypted before set_memory_decrypted() returns. Fix this by 519 * setting *inputarg after the above set_memory_decrypted(): if 520 * hyperv_pcpu_input_arg is NULL, __send_ipi_mask_ex() returns 521 * HV_STATUS_INVALID_PARAMETER immediately, and the function 522 * hv_send_ipi_mask() falls back to orig_apic.send_IPI_mask(), 523 * which may be slightly slower than the hypercall, but still 524 * works correctly in such a VM. 525 */ 526 *inputarg = mem; 527 } 528 529 msr_vp_index = hv_get_msr(HV_MSR_VP_INDEX); 530 531 hv_vp_index[cpu] = msr_vp_index; 532 533 if (msr_vp_index > hv_max_vp_index) 534 hv_max_vp_index = msr_vp_index; 535 536 if (hv_parent_partition()) { 537 synic_eventring_tail = (u8 **)this_cpu_ptr(hv_synic_eventring_tail); 538 *synic_eventring_tail = kcalloc(HV_SYNIC_SINT_COUNT, 539 sizeof(u8), flags); 540 /* No need to unwind any of the above on failure here */ 541 if (unlikely(!*synic_eventring_tail)) 542 ret = -ENOMEM; 543 } 544 545 return ret; 546 } 547 548 int hv_common_cpu_die(unsigned int cpu) 549 { 550 u8 **synic_eventring_tail; 551 /* 552 * The hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory 553 * is not freed when the CPU goes offline as the hyperv_pcpu_input_arg 554 * may be used by the Hyper-V vPCI driver in reassigning interrupts 555 * as part of the offlining process. The interrupt reassignment 556 * happens *after* the CPUHP_AP_HYPERV_ONLINE state has run and 557 * called this function. 558 * 559 * If a previously offlined CPU is brought back online again, the 560 * originally allocated memory is reused in hv_common_cpu_init(). 561 */ 562 563 if (hv_parent_partition()) { 564 synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail); 565 kfree(*synic_eventring_tail); 566 *synic_eventring_tail = NULL; 567 } 568 569 return 0; 570 } 571 572 /* Bit mask of the extended capability to query: see HV_EXT_CAPABILITY_xxx */ 573 bool hv_query_ext_cap(u64 cap_query) 574 { 575 /* 576 * The address of the 'hv_extended_cap' variable will be used as an 577 * output parameter to the hypercall below and so it should be 578 * compatible with 'virt_to_phys'. Which means, it's address should be 579 * directly mapped. Use 'static' to keep it compatible; stack variables 580 * can be virtually mapped, making them incompatible with 581 * 'virt_to_phys'. 582 * Hypercall input/output addresses should also be 8-byte aligned. 583 */ 584 static u64 hv_extended_cap __aligned(8); 585 static bool hv_extended_cap_queried; 586 u64 status; 587 588 /* 589 * Querying extended capabilities is an extended hypercall. Check if the 590 * partition supports extended hypercall, first. 591 */ 592 if (!(ms_hyperv.priv_high & HV_ENABLE_EXTENDED_HYPERCALLS)) 593 return false; 594 595 /* Extended capabilities do not change at runtime. */ 596 if (hv_extended_cap_queried) 597 return hv_extended_cap & cap_query; 598 599 status = hv_do_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, NULL, 600 &hv_extended_cap); 601 602 /* 603 * The query extended capabilities hypercall should not fail under 604 * any normal circumstances. Avoid repeatedly making the hypercall, on 605 * error. 606 */ 607 hv_extended_cap_queried = true; 608 if (!hv_result_success(status)) { 609 pr_err("Hyper-V: Extended query capabilities hypercall failed 0x%llx\n", 610 status); 611 return false; 612 } 613 614 return hv_extended_cap & cap_query; 615 } 616 EXPORT_SYMBOL_GPL(hv_query_ext_cap); 617 618 void hv_setup_dma_ops(struct device *dev, bool coherent) 619 { 620 arch_setup_dma_ops(dev, coherent); 621 } 622 EXPORT_SYMBOL_GPL(hv_setup_dma_ops); 623 624 bool hv_is_hibernation_supported(void) 625 { 626 return !hv_root_partition() && acpi_sleep_state_supported(ACPI_STATE_S4); 627 } 628 EXPORT_SYMBOL_GPL(hv_is_hibernation_supported); 629 630 /* 631 * Default function to read the Hyper-V reference counter, independent 632 * of whether Hyper-V enlightened clocks/timers are being used. But on 633 * architectures where it is used, Hyper-V enlightenment code in 634 * hyperv_timer.c may override this function. 635 */ 636 static u64 __hv_read_ref_counter(void) 637 { 638 return hv_get_msr(HV_MSR_TIME_REF_COUNT); 639 } 640 641 u64 (*hv_read_reference_counter)(void) = __hv_read_ref_counter; 642 EXPORT_SYMBOL_GPL(hv_read_reference_counter); 643 644 /* These __weak functions provide default "no-op" behavior and 645 * may be overridden by architecture specific versions. Architectures 646 * for which the default "no-op" behavior is sufficient can leave 647 * them unimplemented and not be cluttered with a bunch of stub 648 * functions in arch-specific code. 649 */ 650 651 bool __weak hv_is_isolation_supported(void) 652 { 653 return false; 654 } 655 EXPORT_SYMBOL_GPL(hv_is_isolation_supported); 656 657 bool __weak hv_isolation_type_snp(void) 658 { 659 return false; 660 } 661 EXPORT_SYMBOL_GPL(hv_isolation_type_snp); 662 663 bool __weak hv_isolation_type_tdx(void) 664 { 665 return false; 666 } 667 EXPORT_SYMBOL_GPL(hv_isolation_type_tdx); 668 669 void __weak hv_setup_vmbus_handler(void (*handler)(void)) 670 { 671 } 672 EXPORT_SYMBOL_GPL(hv_setup_vmbus_handler); 673 674 void __weak hv_remove_vmbus_handler(void) 675 { 676 } 677 EXPORT_SYMBOL_GPL(hv_remove_vmbus_handler); 678 679 void __weak hv_setup_mshv_handler(void (*handler)(void)) 680 { 681 } 682 EXPORT_SYMBOL_GPL(hv_setup_mshv_handler); 683 684 void __weak hv_setup_kexec_handler(void (*handler)(void)) 685 { 686 } 687 EXPORT_SYMBOL_GPL(hv_setup_kexec_handler); 688 689 void __weak hv_remove_kexec_handler(void) 690 { 691 } 692 EXPORT_SYMBOL_GPL(hv_remove_kexec_handler); 693 694 void __weak hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)) 695 { 696 } 697 EXPORT_SYMBOL_GPL(hv_setup_crash_handler); 698 699 void __weak hv_remove_crash_handler(void) 700 { 701 } 702 EXPORT_SYMBOL_GPL(hv_remove_crash_handler); 703 704 void __weak hyperv_cleanup(void) 705 { 706 } 707 EXPORT_SYMBOL_GPL(hyperv_cleanup); 708 709 u64 __weak hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size) 710 { 711 return HV_STATUS_INVALID_PARAMETER; 712 } 713 EXPORT_SYMBOL_GPL(hv_ghcb_hypercall); 714 715 u64 __weak hv_tdx_hypercall(u64 control, u64 param1, u64 param2) 716 { 717 return HV_STATUS_INVALID_PARAMETER; 718 } 719 EXPORT_SYMBOL_GPL(hv_tdx_hypercall); 720 721 void __weak hv_enable_coco_interrupt(unsigned int cpu, unsigned int vector, bool set) 722 { 723 } 724 EXPORT_SYMBOL_GPL(hv_enable_coco_interrupt); 725 726 void __weak hv_para_set_sint_proxy(bool enable) 727 { 728 } 729 EXPORT_SYMBOL_GPL(hv_para_set_sint_proxy); 730 731 u64 __weak hv_para_get_synic_register(unsigned int reg) 732 { 733 return ~0ULL; 734 } 735 EXPORT_SYMBOL_GPL(hv_para_get_synic_register); 736 737 void __weak hv_para_set_synic_register(unsigned int reg, u64 val) 738 { 739 } 740 EXPORT_SYMBOL_GPL(hv_para_set_synic_register); 741 742 void hv_identify_partition_type(void) 743 { 744 /* Assume guest role */ 745 hv_curr_partition_type = HV_PARTITION_TYPE_GUEST; 746 /* 747 * Check partition creation and cpu management privileges 748 * 749 * Hyper-V should never specify running as root and as a Confidential 750 * VM. But to protect against a compromised/malicious Hyper-V trying 751 * to exploit root behavior to expose Confidential VM memory, ignore 752 * the root partition setting if also a Confidential VM. 753 */ 754 if ((ms_hyperv.priv_high & HV_CREATE_PARTITIONS) && 755 !(ms_hyperv.priv_high & HV_ISOLATION)) { 756 757 if (!IS_ENABLED(CONFIG_MSHV_ROOT)) { 758 pr_crit("Hyper-V: CONFIG_MSHV_ROOT not enabled!\n"); 759 } else if (ms_hyperv.priv_high & HV_CPU_MANAGEMENT) { 760 pr_info("Hyper-V: running as root partition\n"); 761 hv_curr_partition_type = HV_PARTITION_TYPE_ROOT; 762 } else { 763 pr_info("Hyper-V: running as L1VH partition\n"); 764 hv_curr_partition_type = HV_PARTITION_TYPE_L1VH; 765 } 766 } 767 } 768 769 struct hv_status_info { 770 char *string; 771 int errno; 772 u16 code; 773 }; 774 775 /* 776 * Note on the errno mappings: 777 * A failed hypercall is usually only recoverable (or loggable) near 778 * the call site where the HV_STATUS_* code is known. So the errno 779 * it gets converted to is not too useful further up the stack. 780 * Provide a few mappings that could be useful, and revert to -EIO 781 * as a fallback. 782 */ 783 static const struct hv_status_info hv_status_infos[] = { 784 #define _STATUS_INFO(status, errno) { #status, (errno), (status) } 785 _STATUS_INFO(HV_STATUS_SUCCESS, 0), 786 _STATUS_INFO(HV_STATUS_INVALID_HYPERCALL_CODE, -EINVAL), 787 _STATUS_INFO(HV_STATUS_INVALID_HYPERCALL_INPUT, -EINVAL), 788 _STATUS_INFO(HV_STATUS_INVALID_ALIGNMENT, -EIO), 789 _STATUS_INFO(HV_STATUS_INVALID_PARAMETER, -EINVAL), 790 _STATUS_INFO(HV_STATUS_ACCESS_DENIED, -EIO), 791 _STATUS_INFO(HV_STATUS_INVALID_PARTITION_STATE, -EIO), 792 _STATUS_INFO(HV_STATUS_OPERATION_DENIED, -EIO), 793 _STATUS_INFO(HV_STATUS_UNKNOWN_PROPERTY, -EIO), 794 _STATUS_INFO(HV_STATUS_PROPERTY_VALUE_OUT_OF_RANGE, -EIO), 795 _STATUS_INFO(HV_STATUS_INSUFFICIENT_MEMORY, -ENOMEM), 796 _STATUS_INFO(HV_STATUS_INVALID_PARTITION_ID, -EINVAL), 797 _STATUS_INFO(HV_STATUS_INVALID_VP_INDEX, -EINVAL), 798 _STATUS_INFO(HV_STATUS_NOT_FOUND, -EIO), 799 _STATUS_INFO(HV_STATUS_INVALID_PORT_ID, -EINVAL), 800 _STATUS_INFO(HV_STATUS_INVALID_CONNECTION_ID, -EINVAL), 801 _STATUS_INFO(HV_STATUS_INSUFFICIENT_BUFFERS, -EIO), 802 _STATUS_INFO(HV_STATUS_NOT_ACKNOWLEDGED, -EIO), 803 _STATUS_INFO(HV_STATUS_INVALID_VP_STATE, -EIO), 804 _STATUS_INFO(HV_STATUS_NO_RESOURCES, -EIO), 805 _STATUS_INFO(HV_STATUS_PROCESSOR_FEATURE_NOT_SUPPORTED, -EIO), 806 _STATUS_INFO(HV_STATUS_INVALID_LP_INDEX, -EINVAL), 807 _STATUS_INFO(HV_STATUS_INVALID_REGISTER_VALUE, -EINVAL), 808 _STATUS_INFO(HV_STATUS_INVALID_LP_INDEX, -EIO), 809 _STATUS_INFO(HV_STATUS_INVALID_REGISTER_VALUE, -EIO), 810 _STATUS_INFO(HV_STATUS_OPERATION_FAILED, -EIO), 811 _STATUS_INFO(HV_STATUS_TIME_OUT, -EIO), 812 _STATUS_INFO(HV_STATUS_CALL_PENDING, -EIO), 813 _STATUS_INFO(HV_STATUS_VTL_ALREADY_ENABLED, -EIO), 814 #undef _STATUS_INFO 815 }; 816 817 static inline const struct hv_status_info *find_hv_status_info(u64 hv_status) 818 { 819 int i; 820 u16 code = hv_result(hv_status); 821 822 for (i = 0; i < ARRAY_SIZE(hv_status_infos); ++i) { 823 const struct hv_status_info *info = &hv_status_infos[i]; 824 825 if (info->code == code) 826 return info; 827 } 828 829 return NULL; 830 } 831 832 /* Convert a hypercall result into a linux-friendly error code. */ 833 int hv_result_to_errno(u64 status) 834 { 835 const struct hv_status_info *info; 836 837 /* hv_do_hypercall() may return U64_MAX, hypercalls aren't possible */ 838 if (unlikely(status == U64_MAX)) 839 return -EOPNOTSUPP; 840 841 info = find_hv_status_info(status); 842 if (info) 843 return info->errno; 844 845 return -EIO; 846 } 847 EXPORT_SYMBOL_GPL(hv_result_to_errno); 848 849 const char *hv_result_to_string(u64 status) 850 { 851 const struct hv_status_info *info; 852 853 if (unlikely(status == U64_MAX)) 854 return "Hypercall page missing!"; 855 856 info = find_hv_status_info(status); 857 if (info) 858 return info->string; 859 860 return "Unknown"; 861 } 862 EXPORT_SYMBOL_GPL(hv_result_to_string); 863