1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Architecture neutral utility routines for interacting with 5 * Hyper-V. This file is specifically for code that must be 6 * built-in to the kernel image when CONFIG_HYPERV is set 7 * (vs. being in a module) because it is called from architecture 8 * specific code under arch/. 9 * 10 * Copyright (C) 2021, Microsoft, Inc. 11 * 12 * Author : Michael Kelley <mikelley@microsoft.com> 13 */ 14 15 #include <linux/types.h> 16 #include <linux/acpi.h> 17 #include <linux/export.h> 18 #include <linux/bitfield.h> 19 #include <linux/cpumask.h> 20 #include <linux/sched/task_stack.h> 21 #include <linux/panic_notifier.h> 22 #include <linux/ptrace.h> 23 #include <linux/random.h> 24 #include <linux/efi.h> 25 #include <linux/kdebug.h> 26 #include <linux/kmsg_dump.h> 27 #include <linux/sizes.h> 28 #include <linux/slab.h> 29 #include <linux/dma-map-ops.h> 30 #include <linux/set_memory.h> 31 #include <hyperv/hvhdk.h> 32 #include <asm/mshyperv.h> 33 34 u64 hv_current_partition_id = HV_PARTITION_ID_SELF; 35 EXPORT_SYMBOL_GPL(hv_current_partition_id); 36 37 enum hv_partition_type hv_curr_partition_type; 38 EXPORT_SYMBOL_GPL(hv_curr_partition_type); 39 40 /* 41 * ms_hyperv and hv_nested are defined here with other 42 * Hyper-V specific globals so they are shared across all architectures and are 43 * built only when CONFIG_HYPERV is defined. But on x86, 44 * ms_hyperv_init_platform() is built even when CONFIG_HYPERV is not 45 * defined, and it uses these three variables. So mark them as __weak 46 * here, allowing for an overriding definition in the module containing 47 * ms_hyperv_init_platform(). 48 */ 49 bool __weak hv_nested; 50 EXPORT_SYMBOL_GPL(hv_nested); 51 52 struct ms_hyperv_info __weak ms_hyperv; 53 EXPORT_SYMBOL_GPL(ms_hyperv); 54 55 u32 *hv_vp_index; 56 EXPORT_SYMBOL_GPL(hv_vp_index); 57 58 u32 hv_max_vp_index; 59 EXPORT_SYMBOL_GPL(hv_max_vp_index); 60 61 void * __percpu *hyperv_pcpu_input_arg; 62 EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg); 63 64 void * __percpu *hyperv_pcpu_output_arg; 65 EXPORT_SYMBOL_GPL(hyperv_pcpu_output_arg); 66 67 static void hv_kmsg_dump_unregister(void); 68 69 static struct ctl_table_header *hv_ctl_table_hdr; 70 71 /* 72 * Per-cpu array holding the tail pointer for the SynIC event ring buffer 73 * for each SINT. 74 * 75 * We cannot maintain this in mshv driver because the tail pointer should 76 * persist even if the mshv driver is unloaded. 77 */ 78 u8 * __percpu *hv_synic_eventring_tail; 79 EXPORT_SYMBOL_GPL(hv_synic_eventring_tail); 80 81 /* 82 * Hyper-V specific initialization and shutdown code that is 83 * common across all architectures. Called from architecture 84 * specific initialization functions. 85 */ 86 87 void __init hv_common_free(void) 88 { 89 unregister_sysctl_table(hv_ctl_table_hdr); 90 hv_ctl_table_hdr = NULL; 91 92 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) 93 hv_kmsg_dump_unregister(); 94 95 kfree(hv_vp_index); 96 hv_vp_index = NULL; 97 98 free_percpu(hyperv_pcpu_output_arg); 99 hyperv_pcpu_output_arg = NULL; 100 101 free_percpu(hyperv_pcpu_input_arg); 102 hyperv_pcpu_input_arg = NULL; 103 104 free_percpu(hv_synic_eventring_tail); 105 hv_synic_eventring_tail = NULL; 106 } 107 108 static void *hv_panic_page; 109 110 /* 111 * Boolean to control whether to report panic messages over Hyper-V. 112 * 113 * It can be set via /proc/sys/kernel/hyperv_record_panic_msg 114 */ 115 static int sysctl_record_panic_msg = 1; 116 117 /* 118 * sysctl option to allow the user to control whether kmsg data should be 119 * reported to Hyper-V on panic. 120 */ 121 static const struct ctl_table hv_ctl_table[] = { 122 { 123 .procname = "hyperv_record_panic_msg", 124 .data = &sysctl_record_panic_msg, 125 .maxlen = sizeof(int), 126 .mode = 0644, 127 .proc_handler = proc_dointvec_minmax, 128 .extra1 = SYSCTL_ZERO, 129 .extra2 = SYSCTL_ONE 130 }, 131 }; 132 133 static int hv_die_panic_notify_crash(struct notifier_block *self, 134 unsigned long val, void *args); 135 136 static struct notifier_block hyperv_die_report_block = { 137 .notifier_call = hv_die_panic_notify_crash, 138 }; 139 140 static struct notifier_block hyperv_panic_report_block = { 141 .notifier_call = hv_die_panic_notify_crash, 142 }; 143 144 /* 145 * The following callback works both as die and panic notifier; its 146 * goal is to provide panic information to the hypervisor unless the 147 * kmsg dumper is used [see hv_kmsg_dump()], which provides more 148 * information but isn't always available. 149 * 150 * Notice that both the panic/die report notifiers are registered only 151 * if we have the capability HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE set. 152 */ 153 static int hv_die_panic_notify_crash(struct notifier_block *self, 154 unsigned long val, void *args) 155 { 156 struct pt_regs *regs; 157 bool is_die; 158 159 /* Don't notify Hyper-V unless we have a die oops event or panic. */ 160 if (self == &hyperv_panic_report_block) { 161 is_die = false; 162 regs = current_pt_regs(); 163 } else { /* die event */ 164 if (val != DIE_OOPS) 165 return NOTIFY_DONE; 166 167 is_die = true; 168 regs = ((struct die_args *)args)->regs; 169 } 170 171 /* 172 * Hyper-V should be notified only once about a panic/die. If we will 173 * be calling hv_kmsg_dump() later with kmsg data, don't do the 174 * notification here. 175 */ 176 if (!sysctl_record_panic_msg || !hv_panic_page) 177 hyperv_report_panic(regs, val, is_die); 178 179 return NOTIFY_DONE; 180 } 181 182 /* 183 * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg 184 * buffer and call into Hyper-V to transfer the data. 185 */ 186 static void hv_kmsg_dump(struct kmsg_dumper *dumper, 187 struct kmsg_dump_detail *detail) 188 { 189 struct kmsg_dump_iter iter; 190 size_t bytes_written; 191 192 /* We are only interested in panics. */ 193 if (detail->reason != KMSG_DUMP_PANIC || !sysctl_record_panic_msg) 194 return; 195 196 /* 197 * Write dump contents to the page. No need to synchronize; panic should 198 * be single-threaded. 199 */ 200 kmsg_dump_rewind(&iter); 201 kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE, 202 &bytes_written); 203 if (!bytes_written) 204 return; 205 /* 206 * P3 to contain the physical address of the panic page & P4 to 207 * contain the size of the panic data in that page. Rest of the 208 * registers are no-op when the NOTIFY_MSG flag is set. 209 */ 210 hv_set_msr(HV_MSR_CRASH_P0, 0); 211 hv_set_msr(HV_MSR_CRASH_P1, 0); 212 hv_set_msr(HV_MSR_CRASH_P2, 0); 213 hv_set_msr(HV_MSR_CRASH_P3, virt_to_phys(hv_panic_page)); 214 hv_set_msr(HV_MSR_CRASH_P4, bytes_written); 215 216 /* 217 * Let Hyper-V know there is crash data available along with 218 * the panic message. 219 */ 220 hv_set_msr(HV_MSR_CRASH_CTL, 221 (HV_CRASH_CTL_CRASH_NOTIFY | 222 HV_CRASH_CTL_CRASH_NOTIFY_MSG)); 223 } 224 225 static struct kmsg_dumper hv_kmsg_dumper = { 226 .dump = hv_kmsg_dump, 227 }; 228 229 static void hv_kmsg_dump_unregister(void) 230 { 231 kmsg_dump_unregister(&hv_kmsg_dumper); 232 unregister_die_notifier(&hyperv_die_report_block); 233 atomic_notifier_chain_unregister(&panic_notifier_list, 234 &hyperv_panic_report_block); 235 236 kfree(hv_panic_page); 237 hv_panic_page = NULL; 238 } 239 240 static void hv_kmsg_dump_register(void) 241 { 242 int ret; 243 244 hv_panic_page = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); 245 if (!hv_panic_page) { 246 pr_err("Hyper-V: panic message page memory allocation failed\n"); 247 return; 248 } 249 250 ret = kmsg_dump_register(&hv_kmsg_dumper); 251 if (ret) { 252 pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret); 253 kfree(hv_panic_page); 254 hv_panic_page = NULL; 255 } 256 } 257 258 static inline bool hv_output_page_exists(void) 259 { 260 return hv_root_partition() || IS_ENABLED(CONFIG_HYPERV_VTL_MODE); 261 } 262 263 void __init hv_get_partition_id(void) 264 { 265 struct hv_output_get_partition_id *output; 266 unsigned long flags; 267 u64 status, pt_id; 268 269 local_irq_save(flags); 270 output = *this_cpu_ptr(hyperv_pcpu_input_arg); 271 status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output); 272 pt_id = output->partition_id; 273 local_irq_restore(flags); 274 275 if (hv_result_success(status)) 276 hv_current_partition_id = pt_id; 277 else 278 pr_err("Hyper-V: failed to get partition ID: %#x\n", 279 hv_result(status)); 280 } 281 #if IS_ENABLED(CONFIG_HYPERV_VTL_MODE) 282 u8 __init get_vtl(void) 283 { 284 u64 control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_REGISTERS; 285 struct hv_input_get_vp_registers *input; 286 struct hv_output_get_vp_registers *output; 287 unsigned long flags; 288 u64 ret; 289 290 local_irq_save(flags); 291 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 292 output = *this_cpu_ptr(hyperv_pcpu_output_arg); 293 294 memset(input, 0, struct_size(input, names, 1)); 295 input->partition_id = HV_PARTITION_ID_SELF; 296 input->vp_index = HV_VP_INDEX_SELF; 297 input->input_vtl.as_uint8 = 0; 298 input->names[0] = HV_REGISTER_VSM_VP_STATUS; 299 300 ret = hv_do_hypercall(control, input, output); 301 if (hv_result_success(ret)) { 302 ret = output->values[0].reg8 & HV_VTL_MASK; 303 } else { 304 pr_err("Failed to get VTL(error: %lld) exiting...\n", ret); 305 BUG(); 306 } 307 308 local_irq_restore(flags); 309 return ret; 310 } 311 #endif 312 313 int __init hv_common_init(void) 314 { 315 int i; 316 union hv_hypervisor_version_info version; 317 318 /* Get information about the Hyper-V host version */ 319 if (!hv_get_hypervisor_version(&version)) 320 pr_info("Hyper-V: Host Build %d.%d.%d.%d-%d-%d\n", 321 version.major_version, version.minor_version, 322 version.build_number, version.service_number, 323 version.service_pack, version.service_branch); 324 325 if (hv_is_isolation_supported()) 326 sysctl_record_panic_msg = 0; 327 328 /* 329 * Hyper-V expects to get crash register data or kmsg when 330 * crash enlightment is available and system crashes. Set 331 * crash_kexec_post_notifiers to be true to make sure that 332 * calling crash enlightment interface before running kdump 333 * kernel. 334 */ 335 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { 336 u64 hyperv_crash_ctl; 337 338 crash_kexec_post_notifiers = true; 339 pr_info("Hyper-V: enabling crash_kexec_post_notifiers\n"); 340 341 /* 342 * Panic message recording (sysctl_record_panic_msg) 343 * is enabled by default in non-isolated guests and 344 * disabled by default in isolated guests; the panic 345 * message recording won't be available in isolated 346 * guests should the following registration fail. 347 */ 348 hv_ctl_table_hdr = register_sysctl("kernel", hv_ctl_table); 349 if (!hv_ctl_table_hdr) 350 pr_err("Hyper-V: sysctl table register error"); 351 352 /* 353 * Register for panic kmsg callback only if the right 354 * capability is supported by the hypervisor. 355 */ 356 hyperv_crash_ctl = hv_get_msr(HV_MSR_CRASH_CTL); 357 if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) 358 hv_kmsg_dump_register(); 359 360 register_die_notifier(&hyperv_die_report_block); 361 atomic_notifier_chain_register(&panic_notifier_list, 362 &hyperv_panic_report_block); 363 } 364 365 /* 366 * Allocate the per-CPU state for the hypercall input arg. 367 * If this allocation fails, we will not be able to setup 368 * (per-CPU) hypercall input page and thus this failure is 369 * fatal on Hyper-V. 370 */ 371 hyperv_pcpu_input_arg = alloc_percpu(void *); 372 BUG_ON(!hyperv_pcpu_input_arg); 373 374 /* Allocate the per-CPU state for output arg for root */ 375 if (hv_output_page_exists()) { 376 hyperv_pcpu_output_arg = alloc_percpu(void *); 377 BUG_ON(!hyperv_pcpu_output_arg); 378 } 379 380 if (hv_root_partition()) { 381 hv_synic_eventring_tail = alloc_percpu(u8 *); 382 BUG_ON(!hv_synic_eventring_tail); 383 } 384 385 hv_vp_index = kmalloc_array(nr_cpu_ids, sizeof(*hv_vp_index), 386 GFP_KERNEL); 387 if (!hv_vp_index) { 388 hv_common_free(); 389 return -ENOMEM; 390 } 391 392 for (i = 0; i < nr_cpu_ids; i++) 393 hv_vp_index[i] = VP_INVAL; 394 395 return 0; 396 } 397 398 void __init ms_hyperv_late_init(void) 399 { 400 struct acpi_table_header *header; 401 acpi_status status; 402 u8 *randomdata; 403 u32 length, i; 404 405 /* 406 * Seed the Linux random number generator with entropy provided by 407 * the Hyper-V host in ACPI table OEM0. 408 */ 409 if (!IS_ENABLED(CONFIG_ACPI)) 410 return; 411 412 status = acpi_get_table("OEM0", 0, &header); 413 if (ACPI_FAILURE(status) || !header) 414 return; 415 416 /* 417 * Since the "OEM0" table name is for OEM specific usage, verify 418 * that what we're seeing purports to be from Microsoft. 419 */ 420 if (strncmp(header->oem_table_id, "MICROSFT", 8)) 421 goto error; 422 423 /* 424 * Ensure the length is reasonable. Requiring at least 8 bytes and 425 * no more than 4K bytes is somewhat arbitrary and just protects 426 * against a malformed table. Hyper-V currently provides 64 bytes, 427 * but allow for a change in a later version. 428 */ 429 if (header->length < sizeof(*header) + 8 || 430 header->length > sizeof(*header) + SZ_4K) 431 goto error; 432 433 length = header->length - sizeof(*header); 434 randomdata = (u8 *)(header + 1); 435 436 pr_debug("Hyper-V: Seeding rng with %d random bytes from ACPI table OEM0\n", 437 length); 438 439 add_bootloader_randomness(randomdata, length); 440 441 /* 442 * To prevent the seed data from being visible in /sys/firmware/acpi, 443 * zero out the random data in the ACPI table and fixup the checksum. 444 * The zero'ing is done out of an abundance of caution in avoiding 445 * potential security risks to the rng. Similarly, reset the table 446 * length to just the header size so that a subsequent kexec doesn't 447 * try to use the zero'ed out random data. 448 */ 449 for (i = 0; i < length; i++) { 450 header->checksum += randomdata[i]; 451 randomdata[i] = 0; 452 } 453 454 for (i = 0; i < sizeof(header->length); i++) 455 header->checksum += ((u8 *)&header->length)[i]; 456 header->length = sizeof(*header); 457 for (i = 0; i < sizeof(header->length); i++) 458 header->checksum -= ((u8 *)&header->length)[i]; 459 460 error: 461 acpi_put_table(header); 462 } 463 464 /* 465 * Hyper-V specific initialization and die code for 466 * individual CPUs that is common across all architectures. 467 * Called by the CPU hotplug mechanism. 468 */ 469 470 int hv_common_cpu_init(unsigned int cpu) 471 { 472 void **inputarg, **outputarg; 473 u8 **synic_eventring_tail; 474 u64 msr_vp_index; 475 gfp_t flags; 476 const int pgcount = hv_output_page_exists() ? 2 : 1; 477 void *mem; 478 int ret = 0; 479 480 /* hv_cpu_init() can be called with IRQs disabled from hv_resume() */ 481 flags = irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL; 482 483 inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); 484 485 /* 486 * The per-cpu memory is already allocated if this CPU was previously 487 * online and then taken offline 488 */ 489 if (!*inputarg) { 490 mem = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags); 491 if (!mem) 492 return -ENOMEM; 493 494 if (hv_output_page_exists()) { 495 outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg); 496 *outputarg = (char *)mem + HV_HYP_PAGE_SIZE; 497 } 498 499 if (!ms_hyperv.paravisor_present && 500 (hv_isolation_type_snp() || hv_isolation_type_tdx())) { 501 ret = set_memory_decrypted((unsigned long)mem, pgcount); 502 if (ret) { 503 /* It may be unsafe to free 'mem' */ 504 return ret; 505 } 506 507 memset(mem, 0x00, pgcount * HV_HYP_PAGE_SIZE); 508 } 509 510 /* 511 * In a fully enlightened TDX/SNP VM with more than 64 VPs, if 512 * hyperv_pcpu_input_arg is not NULL, set_memory_decrypted() -> 513 * ... -> cpa_flush()-> ... -> __send_ipi_mask_ex() tries to 514 * use hyperv_pcpu_input_arg as the hypercall input page, which 515 * must be a decrypted page in such a VM, but the page is still 516 * encrypted before set_memory_decrypted() returns. Fix this by 517 * setting *inputarg after the above set_memory_decrypted(): if 518 * hyperv_pcpu_input_arg is NULL, __send_ipi_mask_ex() returns 519 * HV_STATUS_INVALID_PARAMETER immediately, and the function 520 * hv_send_ipi_mask() falls back to orig_apic.send_IPI_mask(), 521 * which may be slightly slower than the hypercall, but still 522 * works correctly in such a VM. 523 */ 524 *inputarg = mem; 525 } 526 527 msr_vp_index = hv_get_msr(HV_MSR_VP_INDEX); 528 529 hv_vp_index[cpu] = msr_vp_index; 530 531 if (msr_vp_index > hv_max_vp_index) 532 hv_max_vp_index = msr_vp_index; 533 534 if (hv_root_partition()) { 535 synic_eventring_tail = (u8 **)this_cpu_ptr(hv_synic_eventring_tail); 536 *synic_eventring_tail = kcalloc(HV_SYNIC_SINT_COUNT, 537 sizeof(u8), flags); 538 /* No need to unwind any of the above on failure here */ 539 if (unlikely(!*synic_eventring_tail)) 540 ret = -ENOMEM; 541 } 542 543 return ret; 544 } 545 546 int hv_common_cpu_die(unsigned int cpu) 547 { 548 u8 **synic_eventring_tail; 549 /* 550 * The hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory 551 * is not freed when the CPU goes offline as the hyperv_pcpu_input_arg 552 * may be used by the Hyper-V vPCI driver in reassigning interrupts 553 * as part of the offlining process. The interrupt reassignment 554 * happens *after* the CPUHP_AP_HYPERV_ONLINE state has run and 555 * called this function. 556 * 557 * If a previously offlined CPU is brought back online again, the 558 * originally allocated memory is reused in hv_common_cpu_init(). 559 */ 560 561 if (hv_root_partition()) { 562 synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail); 563 kfree(*synic_eventring_tail); 564 *synic_eventring_tail = NULL; 565 } 566 567 return 0; 568 } 569 570 /* Bit mask of the extended capability to query: see HV_EXT_CAPABILITY_xxx */ 571 bool hv_query_ext_cap(u64 cap_query) 572 { 573 /* 574 * The address of the 'hv_extended_cap' variable will be used as an 575 * output parameter to the hypercall below and so it should be 576 * compatible with 'virt_to_phys'. Which means, it's address should be 577 * directly mapped. Use 'static' to keep it compatible; stack variables 578 * can be virtually mapped, making them incompatible with 579 * 'virt_to_phys'. 580 * Hypercall input/output addresses should also be 8-byte aligned. 581 */ 582 static u64 hv_extended_cap __aligned(8); 583 static bool hv_extended_cap_queried; 584 u64 status; 585 586 /* 587 * Querying extended capabilities is an extended hypercall. Check if the 588 * partition supports extended hypercall, first. 589 */ 590 if (!(ms_hyperv.priv_high & HV_ENABLE_EXTENDED_HYPERCALLS)) 591 return false; 592 593 /* Extended capabilities do not change at runtime. */ 594 if (hv_extended_cap_queried) 595 return hv_extended_cap & cap_query; 596 597 status = hv_do_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, NULL, 598 &hv_extended_cap); 599 600 /* 601 * The query extended capabilities hypercall should not fail under 602 * any normal circumstances. Avoid repeatedly making the hypercall, on 603 * error. 604 */ 605 hv_extended_cap_queried = true; 606 if (!hv_result_success(status)) { 607 pr_err("Hyper-V: Extended query capabilities hypercall failed 0x%llx\n", 608 status); 609 return false; 610 } 611 612 return hv_extended_cap & cap_query; 613 } 614 EXPORT_SYMBOL_GPL(hv_query_ext_cap); 615 616 void hv_setup_dma_ops(struct device *dev, bool coherent) 617 { 618 arch_setup_dma_ops(dev, coherent); 619 } 620 EXPORT_SYMBOL_GPL(hv_setup_dma_ops); 621 622 bool hv_is_hibernation_supported(void) 623 { 624 return !hv_root_partition() && acpi_sleep_state_supported(ACPI_STATE_S4); 625 } 626 EXPORT_SYMBOL_GPL(hv_is_hibernation_supported); 627 628 /* 629 * Default function to read the Hyper-V reference counter, independent 630 * of whether Hyper-V enlightened clocks/timers are being used. But on 631 * architectures where it is used, Hyper-V enlightenment code in 632 * hyperv_timer.c may override this function. 633 */ 634 static u64 __hv_read_ref_counter(void) 635 { 636 return hv_get_msr(HV_MSR_TIME_REF_COUNT); 637 } 638 639 u64 (*hv_read_reference_counter)(void) = __hv_read_ref_counter; 640 EXPORT_SYMBOL_GPL(hv_read_reference_counter); 641 642 /* These __weak functions provide default "no-op" behavior and 643 * may be overridden by architecture specific versions. Architectures 644 * for which the default "no-op" behavior is sufficient can leave 645 * them unimplemented and not be cluttered with a bunch of stub 646 * functions in arch-specific code. 647 */ 648 649 bool __weak hv_is_isolation_supported(void) 650 { 651 return false; 652 } 653 EXPORT_SYMBOL_GPL(hv_is_isolation_supported); 654 655 bool __weak hv_isolation_type_snp(void) 656 { 657 return false; 658 } 659 EXPORT_SYMBOL_GPL(hv_isolation_type_snp); 660 661 bool __weak hv_isolation_type_tdx(void) 662 { 663 return false; 664 } 665 EXPORT_SYMBOL_GPL(hv_isolation_type_tdx); 666 667 void __weak hv_setup_vmbus_handler(void (*handler)(void)) 668 { 669 } 670 EXPORT_SYMBOL_GPL(hv_setup_vmbus_handler); 671 672 void __weak hv_remove_vmbus_handler(void) 673 { 674 } 675 EXPORT_SYMBOL_GPL(hv_remove_vmbus_handler); 676 677 void __weak hv_setup_mshv_handler(void (*handler)(void)) 678 { 679 } 680 EXPORT_SYMBOL_GPL(hv_setup_mshv_handler); 681 682 void __weak hv_setup_kexec_handler(void (*handler)(void)) 683 { 684 } 685 EXPORT_SYMBOL_GPL(hv_setup_kexec_handler); 686 687 void __weak hv_remove_kexec_handler(void) 688 { 689 } 690 EXPORT_SYMBOL_GPL(hv_remove_kexec_handler); 691 692 void __weak hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)) 693 { 694 } 695 EXPORT_SYMBOL_GPL(hv_setup_crash_handler); 696 697 void __weak hv_remove_crash_handler(void) 698 { 699 } 700 EXPORT_SYMBOL_GPL(hv_remove_crash_handler); 701 702 void __weak hyperv_cleanup(void) 703 { 704 } 705 EXPORT_SYMBOL_GPL(hyperv_cleanup); 706 707 u64 __weak hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size) 708 { 709 return HV_STATUS_INVALID_PARAMETER; 710 } 711 EXPORT_SYMBOL_GPL(hv_ghcb_hypercall); 712 713 u64 __weak hv_tdx_hypercall(u64 control, u64 param1, u64 param2) 714 { 715 return HV_STATUS_INVALID_PARAMETER; 716 } 717 EXPORT_SYMBOL_GPL(hv_tdx_hypercall); 718 719 void hv_identify_partition_type(void) 720 { 721 /* Assume guest role */ 722 hv_curr_partition_type = HV_PARTITION_TYPE_GUEST; 723 /* 724 * Check partition creation and cpu management privileges 725 * 726 * Hyper-V should never specify running as root and as a Confidential 727 * VM. But to protect against a compromised/malicious Hyper-V trying 728 * to exploit root behavior to expose Confidential VM memory, ignore 729 * the root partition setting if also a Confidential VM. 730 */ 731 if ((ms_hyperv.priv_high & HV_CREATE_PARTITIONS) && 732 (ms_hyperv.priv_high & HV_CPU_MANAGEMENT) && 733 !(ms_hyperv.priv_high & HV_ISOLATION)) { 734 pr_info("Hyper-V: running as root partition\n"); 735 if (IS_ENABLED(CONFIG_MSHV_ROOT)) 736 hv_curr_partition_type = HV_PARTITION_TYPE_ROOT; 737 else 738 pr_crit("Hyper-V: CONFIG_MSHV_ROOT not enabled!\n"); 739 } 740 } 741 742 struct hv_status_info { 743 char *string; 744 int errno; 745 u16 code; 746 }; 747 748 /* 749 * Note on the errno mappings: 750 * A failed hypercall is usually only recoverable (or loggable) near 751 * the call site where the HV_STATUS_* code is known. So the errno 752 * it gets converted to is not too useful further up the stack. 753 * Provide a few mappings that could be useful, and revert to -EIO 754 * as a fallback. 755 */ 756 static const struct hv_status_info hv_status_infos[] = { 757 #define _STATUS_INFO(status, errno) { #status, (errno), (status) } 758 _STATUS_INFO(HV_STATUS_SUCCESS, 0), 759 _STATUS_INFO(HV_STATUS_INVALID_HYPERCALL_CODE, -EINVAL), 760 _STATUS_INFO(HV_STATUS_INVALID_HYPERCALL_INPUT, -EINVAL), 761 _STATUS_INFO(HV_STATUS_INVALID_ALIGNMENT, -EIO), 762 _STATUS_INFO(HV_STATUS_INVALID_PARAMETER, -EINVAL), 763 _STATUS_INFO(HV_STATUS_ACCESS_DENIED, -EIO), 764 _STATUS_INFO(HV_STATUS_INVALID_PARTITION_STATE, -EIO), 765 _STATUS_INFO(HV_STATUS_OPERATION_DENIED, -EIO), 766 _STATUS_INFO(HV_STATUS_UNKNOWN_PROPERTY, -EIO), 767 _STATUS_INFO(HV_STATUS_PROPERTY_VALUE_OUT_OF_RANGE, -EIO), 768 _STATUS_INFO(HV_STATUS_INSUFFICIENT_MEMORY, -ENOMEM), 769 _STATUS_INFO(HV_STATUS_INVALID_PARTITION_ID, -EINVAL), 770 _STATUS_INFO(HV_STATUS_INVALID_VP_INDEX, -EINVAL), 771 _STATUS_INFO(HV_STATUS_NOT_FOUND, -EIO), 772 _STATUS_INFO(HV_STATUS_INVALID_PORT_ID, -EINVAL), 773 _STATUS_INFO(HV_STATUS_INVALID_CONNECTION_ID, -EINVAL), 774 _STATUS_INFO(HV_STATUS_INSUFFICIENT_BUFFERS, -EIO), 775 _STATUS_INFO(HV_STATUS_NOT_ACKNOWLEDGED, -EIO), 776 _STATUS_INFO(HV_STATUS_INVALID_VP_STATE, -EIO), 777 _STATUS_INFO(HV_STATUS_NO_RESOURCES, -EIO), 778 _STATUS_INFO(HV_STATUS_PROCESSOR_FEATURE_NOT_SUPPORTED, -EIO), 779 _STATUS_INFO(HV_STATUS_INVALID_LP_INDEX, -EINVAL), 780 _STATUS_INFO(HV_STATUS_INVALID_REGISTER_VALUE, -EINVAL), 781 _STATUS_INFO(HV_STATUS_INVALID_LP_INDEX, -EIO), 782 _STATUS_INFO(HV_STATUS_INVALID_REGISTER_VALUE, -EIO), 783 _STATUS_INFO(HV_STATUS_OPERATION_FAILED, -EIO), 784 _STATUS_INFO(HV_STATUS_TIME_OUT, -EIO), 785 _STATUS_INFO(HV_STATUS_CALL_PENDING, -EIO), 786 _STATUS_INFO(HV_STATUS_VTL_ALREADY_ENABLED, -EIO), 787 #undef _STATUS_INFO 788 }; 789 790 static inline const struct hv_status_info *find_hv_status_info(u64 hv_status) 791 { 792 int i; 793 u16 code = hv_result(hv_status); 794 795 for (i = 0; i < ARRAY_SIZE(hv_status_infos); ++i) { 796 const struct hv_status_info *info = &hv_status_infos[i]; 797 798 if (info->code == code) 799 return info; 800 } 801 802 return NULL; 803 } 804 805 /* Convert a hypercall result into a linux-friendly error code. */ 806 int hv_result_to_errno(u64 status) 807 { 808 const struct hv_status_info *info; 809 810 /* hv_do_hypercall() may return U64_MAX, hypercalls aren't possible */ 811 if (unlikely(status == U64_MAX)) 812 return -EOPNOTSUPP; 813 814 info = find_hv_status_info(status); 815 if (info) 816 return info->errno; 817 818 return -EIO; 819 } 820 EXPORT_SYMBOL_GPL(hv_result_to_errno); 821 822 const char *hv_result_to_string(u64 status) 823 { 824 const struct hv_status_info *info; 825 826 if (unlikely(status == U64_MAX)) 827 return "Hypercall page missing!"; 828 829 info = find_hv_status_info(status); 830 if (info) 831 return info->string; 832 833 return "Unknown"; 834 } 835 EXPORT_SYMBOL_GPL(hv_result_to_string); 836