1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Architecture neutral utility routines for interacting with 5 * Hyper-V. This file is specifically for code that must be 6 * built-in to the kernel image when CONFIG_HYPERV is set 7 * (vs. being in a module) because it is called from architecture 8 * specific code under arch/. 9 * 10 * Copyright (C) 2021, Microsoft, Inc. 11 * 12 * Author : Michael Kelley <mikelley@microsoft.com> 13 */ 14 15 #include <linux/types.h> 16 #include <linux/acpi.h> 17 #include <linux/export.h> 18 #include <linux/bitfield.h> 19 #include <linux/cpumask.h> 20 #include <linux/sched/task_stack.h> 21 #include <linux/panic_notifier.h> 22 #include <linux/ptrace.h> 23 #include <linux/random.h> 24 #include <linux/efi.h> 25 #include <linux/kdebug.h> 26 #include <linux/kmsg_dump.h> 27 #include <linux/sizes.h> 28 #include <linux/slab.h> 29 #include <linux/dma-map-ops.h> 30 #include <linux/set_memory.h> 31 #include <hyperv/hvhdk.h> 32 #include <asm/mshyperv.h> 33 34 u64 hv_current_partition_id = HV_PARTITION_ID_SELF; 35 EXPORT_SYMBOL_GPL(hv_current_partition_id); 36 37 enum hv_partition_type hv_curr_partition_type; 38 EXPORT_SYMBOL_GPL(hv_curr_partition_type); 39 40 /* 41 * ms_hyperv and hv_nested are defined here with other 42 * Hyper-V specific globals so they are shared across all architectures and are 43 * built only when CONFIG_HYPERV is defined. But on x86, 44 * ms_hyperv_init_platform() is built even when CONFIG_HYPERV is not 45 * defined, and it uses these three variables. So mark them as __weak 46 * here, allowing for an overriding definition in the module containing 47 * ms_hyperv_init_platform(). 48 */ 49 bool __weak hv_nested; 50 EXPORT_SYMBOL_GPL(hv_nested); 51 52 struct ms_hyperv_info __weak ms_hyperv; 53 EXPORT_SYMBOL_GPL(ms_hyperv); 54 55 u32 *hv_vp_index; 56 EXPORT_SYMBOL_GPL(hv_vp_index); 57 58 u32 hv_max_vp_index; 59 EXPORT_SYMBOL_GPL(hv_max_vp_index); 60 61 void * __percpu *hyperv_pcpu_input_arg; 62 EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg); 63 64 void * __percpu *hyperv_pcpu_output_arg; 65 EXPORT_SYMBOL_GPL(hyperv_pcpu_output_arg); 66 67 static void hv_kmsg_dump_unregister(void); 68 69 static struct ctl_table_header *hv_ctl_table_hdr; 70 71 /* 72 * Per-cpu array holding the tail pointer for the SynIC event ring buffer 73 * for each SINT. 74 * 75 * We cannot maintain this in mshv driver because the tail pointer should 76 * persist even if the mshv driver is unloaded. 77 */ 78 u8 * __percpu *hv_synic_eventring_tail; 79 EXPORT_SYMBOL_GPL(hv_synic_eventring_tail); 80 81 /* 82 * Hyper-V specific initialization and shutdown code that is 83 * common across all architectures. Called from architecture 84 * specific initialization functions. 85 */ 86 87 void __init hv_common_free(void) 88 { 89 unregister_sysctl_table(hv_ctl_table_hdr); 90 hv_ctl_table_hdr = NULL; 91 92 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) 93 hv_kmsg_dump_unregister(); 94 95 kfree(hv_vp_index); 96 hv_vp_index = NULL; 97 98 free_percpu(hyperv_pcpu_output_arg); 99 hyperv_pcpu_output_arg = NULL; 100 101 free_percpu(hyperv_pcpu_input_arg); 102 hyperv_pcpu_input_arg = NULL; 103 104 free_percpu(hv_synic_eventring_tail); 105 hv_synic_eventring_tail = NULL; 106 } 107 108 /* 109 * Functions for allocating and freeing memory with size and 110 * alignment HV_HYP_PAGE_SIZE. These functions are needed because 111 * the guest page size may not be the same as the Hyper-V page 112 * size. We depend upon kmalloc() aligning power-of-two size 113 * allocations to the allocation size boundary, so that the 114 * allocated memory appears to Hyper-V as a page of the size 115 * it expects. 116 */ 117 118 void *hv_alloc_hyperv_page(void) 119 { 120 BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE); 121 122 if (PAGE_SIZE == HV_HYP_PAGE_SIZE) 123 return (void *)__get_free_page(GFP_KERNEL); 124 else 125 return kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); 126 } 127 EXPORT_SYMBOL_GPL(hv_alloc_hyperv_page); 128 129 void *hv_alloc_hyperv_zeroed_page(void) 130 { 131 if (PAGE_SIZE == HV_HYP_PAGE_SIZE) 132 return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 133 else 134 return kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); 135 } 136 EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page); 137 138 void hv_free_hyperv_page(void *addr) 139 { 140 if (PAGE_SIZE == HV_HYP_PAGE_SIZE) 141 free_page((unsigned long)addr); 142 else 143 kfree(addr); 144 } 145 EXPORT_SYMBOL_GPL(hv_free_hyperv_page); 146 147 static void *hv_panic_page; 148 149 /* 150 * Boolean to control whether to report panic messages over Hyper-V. 151 * 152 * It can be set via /proc/sys/kernel/hyperv_record_panic_msg 153 */ 154 static int sysctl_record_panic_msg = 1; 155 156 /* 157 * sysctl option to allow the user to control whether kmsg data should be 158 * reported to Hyper-V on panic. 159 */ 160 static const struct ctl_table hv_ctl_table[] = { 161 { 162 .procname = "hyperv_record_panic_msg", 163 .data = &sysctl_record_panic_msg, 164 .maxlen = sizeof(int), 165 .mode = 0644, 166 .proc_handler = proc_dointvec_minmax, 167 .extra1 = SYSCTL_ZERO, 168 .extra2 = SYSCTL_ONE 169 }, 170 }; 171 172 static int hv_die_panic_notify_crash(struct notifier_block *self, 173 unsigned long val, void *args); 174 175 static struct notifier_block hyperv_die_report_block = { 176 .notifier_call = hv_die_panic_notify_crash, 177 }; 178 179 static struct notifier_block hyperv_panic_report_block = { 180 .notifier_call = hv_die_panic_notify_crash, 181 }; 182 183 /* 184 * The following callback works both as die and panic notifier; its 185 * goal is to provide panic information to the hypervisor unless the 186 * kmsg dumper is used [see hv_kmsg_dump()], which provides more 187 * information but isn't always available. 188 * 189 * Notice that both the panic/die report notifiers are registered only 190 * if we have the capability HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE set. 191 */ 192 static int hv_die_panic_notify_crash(struct notifier_block *self, 193 unsigned long val, void *args) 194 { 195 struct pt_regs *regs; 196 bool is_die; 197 198 /* Don't notify Hyper-V unless we have a die oops event or panic. */ 199 if (self == &hyperv_panic_report_block) { 200 is_die = false; 201 regs = current_pt_regs(); 202 } else { /* die event */ 203 if (val != DIE_OOPS) 204 return NOTIFY_DONE; 205 206 is_die = true; 207 regs = ((struct die_args *)args)->regs; 208 } 209 210 /* 211 * Hyper-V should be notified only once about a panic/die. If we will 212 * be calling hv_kmsg_dump() later with kmsg data, don't do the 213 * notification here. 214 */ 215 if (!sysctl_record_panic_msg || !hv_panic_page) 216 hyperv_report_panic(regs, val, is_die); 217 218 return NOTIFY_DONE; 219 } 220 221 /* 222 * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg 223 * buffer and call into Hyper-V to transfer the data. 224 */ 225 static void hv_kmsg_dump(struct kmsg_dumper *dumper, 226 struct kmsg_dump_detail *detail) 227 { 228 struct kmsg_dump_iter iter; 229 size_t bytes_written; 230 231 /* We are only interested in panics. */ 232 if (detail->reason != KMSG_DUMP_PANIC || !sysctl_record_panic_msg) 233 return; 234 235 /* 236 * Write dump contents to the page. No need to synchronize; panic should 237 * be single-threaded. 238 */ 239 kmsg_dump_rewind(&iter); 240 kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE, 241 &bytes_written); 242 if (!bytes_written) 243 return; 244 /* 245 * P3 to contain the physical address of the panic page & P4 to 246 * contain the size of the panic data in that page. Rest of the 247 * registers are no-op when the NOTIFY_MSG flag is set. 248 */ 249 hv_set_msr(HV_MSR_CRASH_P0, 0); 250 hv_set_msr(HV_MSR_CRASH_P1, 0); 251 hv_set_msr(HV_MSR_CRASH_P2, 0); 252 hv_set_msr(HV_MSR_CRASH_P3, virt_to_phys(hv_panic_page)); 253 hv_set_msr(HV_MSR_CRASH_P4, bytes_written); 254 255 /* 256 * Let Hyper-V know there is crash data available along with 257 * the panic message. 258 */ 259 hv_set_msr(HV_MSR_CRASH_CTL, 260 (HV_CRASH_CTL_CRASH_NOTIFY | 261 HV_CRASH_CTL_CRASH_NOTIFY_MSG)); 262 } 263 264 static struct kmsg_dumper hv_kmsg_dumper = { 265 .dump = hv_kmsg_dump, 266 }; 267 268 static void hv_kmsg_dump_unregister(void) 269 { 270 kmsg_dump_unregister(&hv_kmsg_dumper); 271 unregister_die_notifier(&hyperv_die_report_block); 272 atomic_notifier_chain_unregister(&panic_notifier_list, 273 &hyperv_panic_report_block); 274 275 hv_free_hyperv_page(hv_panic_page); 276 hv_panic_page = NULL; 277 } 278 279 static void hv_kmsg_dump_register(void) 280 { 281 int ret; 282 283 hv_panic_page = hv_alloc_hyperv_zeroed_page(); 284 if (!hv_panic_page) { 285 pr_err("Hyper-V: panic message page memory allocation failed\n"); 286 return; 287 } 288 289 ret = kmsg_dump_register(&hv_kmsg_dumper); 290 if (ret) { 291 pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret); 292 hv_free_hyperv_page(hv_panic_page); 293 hv_panic_page = NULL; 294 } 295 } 296 297 static inline bool hv_output_page_exists(void) 298 { 299 return hv_root_partition() || IS_ENABLED(CONFIG_HYPERV_VTL_MODE); 300 } 301 302 void __init hv_get_partition_id(void) 303 { 304 struct hv_output_get_partition_id *output; 305 unsigned long flags; 306 u64 status, pt_id; 307 308 local_irq_save(flags); 309 output = *this_cpu_ptr(hyperv_pcpu_input_arg); 310 status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output); 311 pt_id = output->partition_id; 312 local_irq_restore(flags); 313 314 if (hv_result_success(status)) 315 hv_current_partition_id = pt_id; 316 else 317 pr_err("Hyper-V: failed to get partition ID: %#x\n", 318 hv_result(status)); 319 } 320 321 int __init hv_common_init(void) 322 { 323 int i; 324 union hv_hypervisor_version_info version; 325 326 /* Get information about the Hyper-V host version */ 327 if (!hv_get_hypervisor_version(&version)) 328 pr_info("Hyper-V: Host Build %d.%d.%d.%d-%d-%d\n", 329 version.major_version, version.minor_version, 330 version.build_number, version.service_number, 331 version.service_pack, version.service_branch); 332 333 if (hv_is_isolation_supported()) 334 sysctl_record_panic_msg = 0; 335 336 /* 337 * Hyper-V expects to get crash register data or kmsg when 338 * crash enlightment is available and system crashes. Set 339 * crash_kexec_post_notifiers to be true to make sure that 340 * calling crash enlightment interface before running kdump 341 * kernel. 342 */ 343 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { 344 u64 hyperv_crash_ctl; 345 346 crash_kexec_post_notifiers = true; 347 pr_info("Hyper-V: enabling crash_kexec_post_notifiers\n"); 348 349 /* 350 * Panic message recording (sysctl_record_panic_msg) 351 * is enabled by default in non-isolated guests and 352 * disabled by default in isolated guests; the panic 353 * message recording won't be available in isolated 354 * guests should the following registration fail. 355 */ 356 hv_ctl_table_hdr = register_sysctl("kernel", hv_ctl_table); 357 if (!hv_ctl_table_hdr) 358 pr_err("Hyper-V: sysctl table register error"); 359 360 /* 361 * Register for panic kmsg callback only if the right 362 * capability is supported by the hypervisor. 363 */ 364 hyperv_crash_ctl = hv_get_msr(HV_MSR_CRASH_CTL); 365 if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) 366 hv_kmsg_dump_register(); 367 368 register_die_notifier(&hyperv_die_report_block); 369 atomic_notifier_chain_register(&panic_notifier_list, 370 &hyperv_panic_report_block); 371 } 372 373 /* 374 * Allocate the per-CPU state for the hypercall input arg. 375 * If this allocation fails, we will not be able to setup 376 * (per-CPU) hypercall input page and thus this failure is 377 * fatal on Hyper-V. 378 */ 379 hyperv_pcpu_input_arg = alloc_percpu(void *); 380 BUG_ON(!hyperv_pcpu_input_arg); 381 382 /* Allocate the per-CPU state for output arg for root */ 383 if (hv_output_page_exists()) { 384 hyperv_pcpu_output_arg = alloc_percpu(void *); 385 BUG_ON(!hyperv_pcpu_output_arg); 386 } 387 388 if (hv_root_partition()) { 389 hv_synic_eventring_tail = alloc_percpu(u8 *); 390 BUG_ON(!hv_synic_eventring_tail); 391 } 392 393 hv_vp_index = kmalloc_array(nr_cpu_ids, sizeof(*hv_vp_index), 394 GFP_KERNEL); 395 if (!hv_vp_index) { 396 hv_common_free(); 397 return -ENOMEM; 398 } 399 400 for (i = 0; i < nr_cpu_ids; i++) 401 hv_vp_index[i] = VP_INVAL; 402 403 return 0; 404 } 405 406 void __init ms_hyperv_late_init(void) 407 { 408 struct acpi_table_header *header; 409 acpi_status status; 410 u8 *randomdata; 411 u32 length, i; 412 413 /* 414 * Seed the Linux random number generator with entropy provided by 415 * the Hyper-V host in ACPI table OEM0. 416 */ 417 if (!IS_ENABLED(CONFIG_ACPI)) 418 return; 419 420 status = acpi_get_table("OEM0", 0, &header); 421 if (ACPI_FAILURE(status) || !header) 422 return; 423 424 /* 425 * Since the "OEM0" table name is for OEM specific usage, verify 426 * that what we're seeing purports to be from Microsoft. 427 */ 428 if (strncmp(header->oem_table_id, "MICROSFT", 8)) 429 goto error; 430 431 /* 432 * Ensure the length is reasonable. Requiring at least 8 bytes and 433 * no more than 4K bytes is somewhat arbitrary and just protects 434 * against a malformed table. Hyper-V currently provides 64 bytes, 435 * but allow for a change in a later version. 436 */ 437 if (header->length < sizeof(*header) + 8 || 438 header->length > sizeof(*header) + SZ_4K) 439 goto error; 440 441 length = header->length - sizeof(*header); 442 randomdata = (u8 *)(header + 1); 443 444 pr_debug("Hyper-V: Seeding rng with %d random bytes from ACPI table OEM0\n", 445 length); 446 447 add_bootloader_randomness(randomdata, length); 448 449 /* 450 * To prevent the seed data from being visible in /sys/firmware/acpi, 451 * zero out the random data in the ACPI table and fixup the checksum. 452 * The zero'ing is done out of an abundance of caution in avoiding 453 * potential security risks to the rng. Similarly, reset the table 454 * length to just the header size so that a subsequent kexec doesn't 455 * try to use the zero'ed out random data. 456 */ 457 for (i = 0; i < length; i++) { 458 header->checksum += randomdata[i]; 459 randomdata[i] = 0; 460 } 461 462 for (i = 0; i < sizeof(header->length); i++) 463 header->checksum += ((u8 *)&header->length)[i]; 464 header->length = sizeof(*header); 465 for (i = 0; i < sizeof(header->length); i++) 466 header->checksum -= ((u8 *)&header->length)[i]; 467 468 error: 469 acpi_put_table(header); 470 } 471 472 /* 473 * Hyper-V specific initialization and die code for 474 * individual CPUs that is common across all architectures. 475 * Called by the CPU hotplug mechanism. 476 */ 477 478 int hv_common_cpu_init(unsigned int cpu) 479 { 480 void **inputarg, **outputarg; 481 u8 **synic_eventring_tail; 482 u64 msr_vp_index; 483 gfp_t flags; 484 const int pgcount = hv_output_page_exists() ? 2 : 1; 485 void *mem; 486 int ret = 0; 487 488 /* hv_cpu_init() can be called with IRQs disabled from hv_resume() */ 489 flags = irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL; 490 491 inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); 492 493 /* 494 * The per-cpu memory is already allocated if this CPU was previously 495 * online and then taken offline 496 */ 497 if (!*inputarg) { 498 mem = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags); 499 if (!mem) 500 return -ENOMEM; 501 502 if (hv_output_page_exists()) { 503 outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg); 504 *outputarg = (char *)mem + HV_HYP_PAGE_SIZE; 505 } 506 507 if (!ms_hyperv.paravisor_present && 508 (hv_isolation_type_snp() || hv_isolation_type_tdx())) { 509 ret = set_memory_decrypted((unsigned long)mem, pgcount); 510 if (ret) { 511 /* It may be unsafe to free 'mem' */ 512 return ret; 513 } 514 515 memset(mem, 0x00, pgcount * HV_HYP_PAGE_SIZE); 516 } 517 518 /* 519 * In a fully enlightened TDX/SNP VM with more than 64 VPs, if 520 * hyperv_pcpu_input_arg is not NULL, set_memory_decrypted() -> 521 * ... -> cpa_flush()-> ... -> __send_ipi_mask_ex() tries to 522 * use hyperv_pcpu_input_arg as the hypercall input page, which 523 * must be a decrypted page in such a VM, but the page is still 524 * encrypted before set_memory_decrypted() returns. Fix this by 525 * setting *inputarg after the above set_memory_decrypted(): if 526 * hyperv_pcpu_input_arg is NULL, __send_ipi_mask_ex() returns 527 * HV_STATUS_INVALID_PARAMETER immediately, and the function 528 * hv_send_ipi_mask() falls back to orig_apic.send_IPI_mask(), 529 * which may be slightly slower than the hypercall, but still 530 * works correctly in such a VM. 531 */ 532 *inputarg = mem; 533 } 534 535 msr_vp_index = hv_get_msr(HV_MSR_VP_INDEX); 536 537 hv_vp_index[cpu] = msr_vp_index; 538 539 if (msr_vp_index > hv_max_vp_index) 540 hv_max_vp_index = msr_vp_index; 541 542 if (hv_root_partition()) { 543 synic_eventring_tail = (u8 **)this_cpu_ptr(hv_synic_eventring_tail); 544 *synic_eventring_tail = kcalloc(HV_SYNIC_SINT_COUNT, 545 sizeof(u8), flags); 546 /* No need to unwind any of the above on failure here */ 547 if (unlikely(!*synic_eventring_tail)) 548 ret = -ENOMEM; 549 } 550 551 return ret; 552 } 553 554 int hv_common_cpu_die(unsigned int cpu) 555 { 556 u8 **synic_eventring_tail; 557 /* 558 * The hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory 559 * is not freed when the CPU goes offline as the hyperv_pcpu_input_arg 560 * may be used by the Hyper-V vPCI driver in reassigning interrupts 561 * as part of the offlining process. The interrupt reassignment 562 * happens *after* the CPUHP_AP_HYPERV_ONLINE state has run and 563 * called this function. 564 * 565 * If a previously offlined CPU is brought back online again, the 566 * originally allocated memory is reused in hv_common_cpu_init(). 567 */ 568 569 if (hv_root_partition()) { 570 synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail); 571 kfree(*synic_eventring_tail); 572 *synic_eventring_tail = NULL; 573 } 574 575 return 0; 576 } 577 578 /* Bit mask of the extended capability to query: see HV_EXT_CAPABILITY_xxx */ 579 bool hv_query_ext_cap(u64 cap_query) 580 { 581 /* 582 * The address of the 'hv_extended_cap' variable will be used as an 583 * output parameter to the hypercall below and so it should be 584 * compatible with 'virt_to_phys'. Which means, it's address should be 585 * directly mapped. Use 'static' to keep it compatible; stack variables 586 * can be virtually mapped, making them incompatible with 587 * 'virt_to_phys'. 588 * Hypercall input/output addresses should also be 8-byte aligned. 589 */ 590 static u64 hv_extended_cap __aligned(8); 591 static bool hv_extended_cap_queried; 592 u64 status; 593 594 /* 595 * Querying extended capabilities is an extended hypercall. Check if the 596 * partition supports extended hypercall, first. 597 */ 598 if (!(ms_hyperv.priv_high & HV_ENABLE_EXTENDED_HYPERCALLS)) 599 return false; 600 601 /* Extended capabilities do not change at runtime. */ 602 if (hv_extended_cap_queried) 603 return hv_extended_cap & cap_query; 604 605 status = hv_do_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, NULL, 606 &hv_extended_cap); 607 608 /* 609 * The query extended capabilities hypercall should not fail under 610 * any normal circumstances. Avoid repeatedly making the hypercall, on 611 * error. 612 */ 613 hv_extended_cap_queried = true; 614 if (!hv_result_success(status)) { 615 pr_err("Hyper-V: Extended query capabilities hypercall failed 0x%llx\n", 616 status); 617 return false; 618 } 619 620 return hv_extended_cap & cap_query; 621 } 622 EXPORT_SYMBOL_GPL(hv_query_ext_cap); 623 624 void hv_setup_dma_ops(struct device *dev, bool coherent) 625 { 626 arch_setup_dma_ops(dev, coherent); 627 } 628 EXPORT_SYMBOL_GPL(hv_setup_dma_ops); 629 630 bool hv_is_hibernation_supported(void) 631 { 632 return !hv_root_partition() && acpi_sleep_state_supported(ACPI_STATE_S4); 633 } 634 EXPORT_SYMBOL_GPL(hv_is_hibernation_supported); 635 636 /* 637 * Default function to read the Hyper-V reference counter, independent 638 * of whether Hyper-V enlightened clocks/timers are being used. But on 639 * architectures where it is used, Hyper-V enlightenment code in 640 * hyperv_timer.c may override this function. 641 */ 642 static u64 __hv_read_ref_counter(void) 643 { 644 return hv_get_msr(HV_MSR_TIME_REF_COUNT); 645 } 646 647 u64 (*hv_read_reference_counter)(void) = __hv_read_ref_counter; 648 EXPORT_SYMBOL_GPL(hv_read_reference_counter); 649 650 /* These __weak functions provide default "no-op" behavior and 651 * may be overridden by architecture specific versions. Architectures 652 * for which the default "no-op" behavior is sufficient can leave 653 * them unimplemented and not be cluttered with a bunch of stub 654 * functions in arch-specific code. 655 */ 656 657 bool __weak hv_is_isolation_supported(void) 658 { 659 return false; 660 } 661 EXPORT_SYMBOL_GPL(hv_is_isolation_supported); 662 663 bool __weak hv_isolation_type_snp(void) 664 { 665 return false; 666 } 667 EXPORT_SYMBOL_GPL(hv_isolation_type_snp); 668 669 bool __weak hv_isolation_type_tdx(void) 670 { 671 return false; 672 } 673 EXPORT_SYMBOL_GPL(hv_isolation_type_tdx); 674 675 void __weak hv_setup_vmbus_handler(void (*handler)(void)) 676 { 677 } 678 EXPORT_SYMBOL_GPL(hv_setup_vmbus_handler); 679 680 void __weak hv_remove_vmbus_handler(void) 681 { 682 } 683 EXPORT_SYMBOL_GPL(hv_remove_vmbus_handler); 684 685 void __weak hv_setup_mshv_handler(void (*handler)(void)) 686 { 687 } 688 EXPORT_SYMBOL_GPL(hv_setup_mshv_handler); 689 690 void __weak hv_setup_kexec_handler(void (*handler)(void)) 691 { 692 } 693 EXPORT_SYMBOL_GPL(hv_setup_kexec_handler); 694 695 void __weak hv_remove_kexec_handler(void) 696 { 697 } 698 EXPORT_SYMBOL_GPL(hv_remove_kexec_handler); 699 700 void __weak hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)) 701 { 702 } 703 EXPORT_SYMBOL_GPL(hv_setup_crash_handler); 704 705 void __weak hv_remove_crash_handler(void) 706 { 707 } 708 EXPORT_SYMBOL_GPL(hv_remove_crash_handler); 709 710 void __weak hyperv_cleanup(void) 711 { 712 } 713 EXPORT_SYMBOL_GPL(hyperv_cleanup); 714 715 u64 __weak hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size) 716 { 717 return HV_STATUS_INVALID_PARAMETER; 718 } 719 EXPORT_SYMBOL_GPL(hv_ghcb_hypercall); 720 721 u64 __weak hv_tdx_hypercall(u64 control, u64 param1, u64 param2) 722 { 723 return HV_STATUS_INVALID_PARAMETER; 724 } 725 EXPORT_SYMBOL_GPL(hv_tdx_hypercall); 726 727 void hv_identify_partition_type(void) 728 { 729 /* Assume guest role */ 730 hv_curr_partition_type = HV_PARTITION_TYPE_GUEST; 731 /* 732 * Check partition creation and cpu management privileges 733 * 734 * Hyper-V should never specify running as root and as a Confidential 735 * VM. But to protect against a compromised/malicious Hyper-V trying 736 * to exploit root behavior to expose Confidential VM memory, ignore 737 * the root partition setting if also a Confidential VM. 738 */ 739 if ((ms_hyperv.priv_high & HV_CREATE_PARTITIONS) && 740 (ms_hyperv.priv_high & HV_CPU_MANAGEMENT) && 741 !(ms_hyperv.priv_high & HV_ISOLATION)) { 742 pr_info("Hyper-V: running as root partition\n"); 743 if (IS_ENABLED(CONFIG_MSHV_ROOT)) 744 hv_curr_partition_type = HV_PARTITION_TYPE_ROOT; 745 else 746 pr_crit("Hyper-V: CONFIG_MSHV_ROOT not enabled!\n"); 747 } 748 } 749 750 struct hv_status_info { 751 char *string; 752 int errno; 753 u16 code; 754 }; 755 756 /* 757 * Note on the errno mappings: 758 * A failed hypercall is usually only recoverable (or loggable) near 759 * the call site where the HV_STATUS_* code is known. So the errno 760 * it gets converted to is not too useful further up the stack. 761 * Provide a few mappings that could be useful, and revert to -EIO 762 * as a fallback. 763 */ 764 static const struct hv_status_info hv_status_infos[] = { 765 #define _STATUS_INFO(status, errno) { #status, (errno), (status) } 766 _STATUS_INFO(HV_STATUS_SUCCESS, 0), 767 _STATUS_INFO(HV_STATUS_INVALID_HYPERCALL_CODE, -EINVAL), 768 _STATUS_INFO(HV_STATUS_INVALID_HYPERCALL_INPUT, -EINVAL), 769 _STATUS_INFO(HV_STATUS_INVALID_ALIGNMENT, -EIO), 770 _STATUS_INFO(HV_STATUS_INVALID_PARAMETER, -EINVAL), 771 _STATUS_INFO(HV_STATUS_ACCESS_DENIED, -EIO), 772 _STATUS_INFO(HV_STATUS_INVALID_PARTITION_STATE, -EIO), 773 _STATUS_INFO(HV_STATUS_OPERATION_DENIED, -EIO), 774 _STATUS_INFO(HV_STATUS_UNKNOWN_PROPERTY, -EIO), 775 _STATUS_INFO(HV_STATUS_PROPERTY_VALUE_OUT_OF_RANGE, -EIO), 776 _STATUS_INFO(HV_STATUS_INSUFFICIENT_MEMORY, -ENOMEM), 777 _STATUS_INFO(HV_STATUS_INVALID_PARTITION_ID, -EINVAL), 778 _STATUS_INFO(HV_STATUS_INVALID_VP_INDEX, -EINVAL), 779 _STATUS_INFO(HV_STATUS_NOT_FOUND, -EIO), 780 _STATUS_INFO(HV_STATUS_INVALID_PORT_ID, -EINVAL), 781 _STATUS_INFO(HV_STATUS_INVALID_CONNECTION_ID, -EINVAL), 782 _STATUS_INFO(HV_STATUS_INSUFFICIENT_BUFFERS, -EIO), 783 _STATUS_INFO(HV_STATUS_NOT_ACKNOWLEDGED, -EIO), 784 _STATUS_INFO(HV_STATUS_INVALID_VP_STATE, -EIO), 785 _STATUS_INFO(HV_STATUS_NO_RESOURCES, -EIO), 786 _STATUS_INFO(HV_STATUS_PROCESSOR_FEATURE_NOT_SUPPORTED, -EIO), 787 _STATUS_INFO(HV_STATUS_INVALID_LP_INDEX, -EINVAL), 788 _STATUS_INFO(HV_STATUS_INVALID_REGISTER_VALUE, -EINVAL), 789 _STATUS_INFO(HV_STATUS_INVALID_LP_INDEX, -EIO), 790 _STATUS_INFO(HV_STATUS_INVALID_REGISTER_VALUE, -EIO), 791 _STATUS_INFO(HV_STATUS_OPERATION_FAILED, -EIO), 792 _STATUS_INFO(HV_STATUS_TIME_OUT, -EIO), 793 _STATUS_INFO(HV_STATUS_CALL_PENDING, -EIO), 794 _STATUS_INFO(HV_STATUS_VTL_ALREADY_ENABLED, -EIO), 795 #undef _STATUS_INFO 796 }; 797 798 static inline const struct hv_status_info *find_hv_status_info(u64 hv_status) 799 { 800 int i; 801 u16 code = hv_result(hv_status); 802 803 for (i = 0; i < ARRAY_SIZE(hv_status_infos); ++i) { 804 const struct hv_status_info *info = &hv_status_infos[i]; 805 806 if (info->code == code) 807 return info; 808 } 809 810 return NULL; 811 } 812 813 /* Convert a hypercall result into a linux-friendly error code. */ 814 int hv_result_to_errno(u64 status) 815 { 816 const struct hv_status_info *info; 817 818 /* hv_do_hypercall() may return U64_MAX, hypercalls aren't possible */ 819 if (unlikely(status == U64_MAX)) 820 return -EOPNOTSUPP; 821 822 info = find_hv_status_info(status); 823 if (info) 824 return info->errno; 825 826 return -EIO; 827 } 828 EXPORT_SYMBOL_GPL(hv_result_to_errno); 829 830 const char *hv_result_to_string(u64 status) 831 { 832 const struct hv_status_info *info; 833 834 if (unlikely(status == U64_MAX)) 835 return "Hypercall page missing!"; 836 837 info = find_hv_status_info(status); 838 if (info) 839 return info->string; 840 841 return "Unknown"; 842 } 843 EXPORT_SYMBOL_GPL(hv_result_to_string); 844