1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Architecture neutral utility routines for interacting with 5 * Hyper-V. This file is specifically for code that must be 6 * built-in to the kernel image when CONFIG_HYPERV is set 7 * (vs. being in a module) because it is called from architecture 8 * specific code under arch/. 9 * 10 * Copyright (C) 2021, Microsoft, Inc. 11 * 12 * Author : Michael Kelley <mikelley@microsoft.com> 13 */ 14 15 #include <linux/types.h> 16 #include <linux/acpi.h> 17 #include <linux/export.h> 18 #include <linux/bitfield.h> 19 #include <linux/cpumask.h> 20 #include <linux/sched/task_stack.h> 21 #include <linux/panic_notifier.h> 22 #include <linux/ptrace.h> 23 #include <linux/random.h> 24 #include <linux/efi.h> 25 #include <linux/kdebug.h> 26 #include <linux/kmsg_dump.h> 27 #include <linux/sizes.h> 28 #include <linux/slab.h> 29 #include <linux/dma-map-ops.h> 30 #include <linux/set_memory.h> 31 #include <hyperv/hvhdk.h> 32 #include <asm/mshyperv.h> 33 34 /* 35 * hv_root_partition, ms_hyperv and hv_nested are defined here with other 36 * Hyper-V specific globals so they are shared across all architectures and are 37 * built only when CONFIG_HYPERV is defined. But on x86, 38 * ms_hyperv_init_platform() is built even when CONFIG_HYPERV is not 39 * defined, and it uses these three variables. So mark them as __weak 40 * here, allowing for an overriding definition in the module containing 41 * ms_hyperv_init_platform(). 42 */ 43 bool __weak hv_root_partition; 44 EXPORT_SYMBOL_GPL(hv_root_partition); 45 46 bool __weak hv_nested; 47 EXPORT_SYMBOL_GPL(hv_nested); 48 49 struct ms_hyperv_info __weak ms_hyperv; 50 EXPORT_SYMBOL_GPL(ms_hyperv); 51 52 u32 *hv_vp_index; 53 EXPORT_SYMBOL_GPL(hv_vp_index); 54 55 u32 hv_max_vp_index; 56 EXPORT_SYMBOL_GPL(hv_max_vp_index); 57 58 void * __percpu *hyperv_pcpu_input_arg; 59 EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg); 60 61 void * __percpu *hyperv_pcpu_output_arg; 62 EXPORT_SYMBOL_GPL(hyperv_pcpu_output_arg); 63 64 static void hv_kmsg_dump_unregister(void); 65 66 static struct ctl_table_header *hv_ctl_table_hdr; 67 68 /* 69 * Hyper-V specific initialization and shutdown code that is 70 * common across all architectures. Called from architecture 71 * specific initialization functions. 72 */ 73 74 void __init hv_common_free(void) 75 { 76 unregister_sysctl_table(hv_ctl_table_hdr); 77 hv_ctl_table_hdr = NULL; 78 79 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) 80 hv_kmsg_dump_unregister(); 81 82 kfree(hv_vp_index); 83 hv_vp_index = NULL; 84 85 free_percpu(hyperv_pcpu_output_arg); 86 hyperv_pcpu_output_arg = NULL; 87 88 free_percpu(hyperv_pcpu_input_arg); 89 hyperv_pcpu_input_arg = NULL; 90 } 91 92 /* 93 * Functions for allocating and freeing memory with size and 94 * alignment HV_HYP_PAGE_SIZE. These functions are needed because 95 * the guest page size may not be the same as the Hyper-V page 96 * size. We depend upon kmalloc() aligning power-of-two size 97 * allocations to the allocation size boundary, so that the 98 * allocated memory appears to Hyper-V as a page of the size 99 * it expects. 100 */ 101 102 void *hv_alloc_hyperv_page(void) 103 { 104 BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE); 105 106 if (PAGE_SIZE == HV_HYP_PAGE_SIZE) 107 return (void *)__get_free_page(GFP_KERNEL); 108 else 109 return kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); 110 } 111 EXPORT_SYMBOL_GPL(hv_alloc_hyperv_page); 112 113 void *hv_alloc_hyperv_zeroed_page(void) 114 { 115 if (PAGE_SIZE == HV_HYP_PAGE_SIZE) 116 return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 117 else 118 return kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); 119 } 120 EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page); 121 122 void hv_free_hyperv_page(void *addr) 123 { 124 if (PAGE_SIZE == HV_HYP_PAGE_SIZE) 125 free_page((unsigned long)addr); 126 else 127 kfree(addr); 128 } 129 EXPORT_SYMBOL_GPL(hv_free_hyperv_page); 130 131 static void *hv_panic_page; 132 133 /* 134 * Boolean to control whether to report panic messages over Hyper-V. 135 * 136 * It can be set via /proc/sys/kernel/hyperv_record_panic_msg 137 */ 138 static int sysctl_record_panic_msg = 1; 139 140 /* 141 * sysctl option to allow the user to control whether kmsg data should be 142 * reported to Hyper-V on panic. 143 */ 144 static const struct ctl_table hv_ctl_table[] = { 145 { 146 .procname = "hyperv_record_panic_msg", 147 .data = &sysctl_record_panic_msg, 148 .maxlen = sizeof(int), 149 .mode = 0644, 150 .proc_handler = proc_dointvec_minmax, 151 .extra1 = SYSCTL_ZERO, 152 .extra2 = SYSCTL_ONE 153 }, 154 }; 155 156 static int hv_die_panic_notify_crash(struct notifier_block *self, 157 unsigned long val, void *args); 158 159 static struct notifier_block hyperv_die_report_block = { 160 .notifier_call = hv_die_panic_notify_crash, 161 }; 162 163 static struct notifier_block hyperv_panic_report_block = { 164 .notifier_call = hv_die_panic_notify_crash, 165 }; 166 167 /* 168 * The following callback works both as die and panic notifier; its 169 * goal is to provide panic information to the hypervisor unless the 170 * kmsg dumper is used [see hv_kmsg_dump()], which provides more 171 * information but isn't always available. 172 * 173 * Notice that both the panic/die report notifiers are registered only 174 * if we have the capability HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE set. 175 */ 176 static int hv_die_panic_notify_crash(struct notifier_block *self, 177 unsigned long val, void *args) 178 { 179 struct pt_regs *regs; 180 bool is_die; 181 182 /* Don't notify Hyper-V unless we have a die oops event or panic. */ 183 if (self == &hyperv_panic_report_block) { 184 is_die = false; 185 regs = current_pt_regs(); 186 } else { /* die event */ 187 if (val != DIE_OOPS) 188 return NOTIFY_DONE; 189 190 is_die = true; 191 regs = ((struct die_args *)args)->regs; 192 } 193 194 /* 195 * Hyper-V should be notified only once about a panic/die. If we will 196 * be calling hv_kmsg_dump() later with kmsg data, don't do the 197 * notification here. 198 */ 199 if (!sysctl_record_panic_msg || !hv_panic_page) 200 hyperv_report_panic(regs, val, is_die); 201 202 return NOTIFY_DONE; 203 } 204 205 /* 206 * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg 207 * buffer and call into Hyper-V to transfer the data. 208 */ 209 static void hv_kmsg_dump(struct kmsg_dumper *dumper, 210 struct kmsg_dump_detail *detail) 211 { 212 struct kmsg_dump_iter iter; 213 size_t bytes_written; 214 215 /* We are only interested in panics. */ 216 if (detail->reason != KMSG_DUMP_PANIC || !sysctl_record_panic_msg) 217 return; 218 219 /* 220 * Write dump contents to the page. No need to synchronize; panic should 221 * be single-threaded. 222 */ 223 kmsg_dump_rewind(&iter); 224 kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE, 225 &bytes_written); 226 if (!bytes_written) 227 return; 228 /* 229 * P3 to contain the physical address of the panic page & P4 to 230 * contain the size of the panic data in that page. Rest of the 231 * registers are no-op when the NOTIFY_MSG flag is set. 232 */ 233 hv_set_msr(HV_MSR_CRASH_P0, 0); 234 hv_set_msr(HV_MSR_CRASH_P1, 0); 235 hv_set_msr(HV_MSR_CRASH_P2, 0); 236 hv_set_msr(HV_MSR_CRASH_P3, virt_to_phys(hv_panic_page)); 237 hv_set_msr(HV_MSR_CRASH_P4, bytes_written); 238 239 /* 240 * Let Hyper-V know there is crash data available along with 241 * the panic message. 242 */ 243 hv_set_msr(HV_MSR_CRASH_CTL, 244 (HV_CRASH_CTL_CRASH_NOTIFY | 245 HV_CRASH_CTL_CRASH_NOTIFY_MSG)); 246 } 247 248 static struct kmsg_dumper hv_kmsg_dumper = { 249 .dump = hv_kmsg_dump, 250 }; 251 252 static void hv_kmsg_dump_unregister(void) 253 { 254 kmsg_dump_unregister(&hv_kmsg_dumper); 255 unregister_die_notifier(&hyperv_die_report_block); 256 atomic_notifier_chain_unregister(&panic_notifier_list, 257 &hyperv_panic_report_block); 258 259 hv_free_hyperv_page(hv_panic_page); 260 hv_panic_page = NULL; 261 } 262 263 static void hv_kmsg_dump_register(void) 264 { 265 int ret; 266 267 hv_panic_page = hv_alloc_hyperv_zeroed_page(); 268 if (!hv_panic_page) { 269 pr_err("Hyper-V: panic message page memory allocation failed\n"); 270 return; 271 } 272 273 ret = kmsg_dump_register(&hv_kmsg_dumper); 274 if (ret) { 275 pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret); 276 hv_free_hyperv_page(hv_panic_page); 277 hv_panic_page = NULL; 278 } 279 } 280 281 static inline bool hv_output_page_exists(void) 282 { 283 return hv_root_partition || IS_ENABLED(CONFIG_HYPERV_VTL_MODE); 284 } 285 286 int __init hv_common_init(void) 287 { 288 int i; 289 union hv_hypervisor_version_info version; 290 291 /* Get information about the Hyper-V host version */ 292 if (!hv_get_hypervisor_version(&version)) 293 pr_info("Hyper-V: Host Build %d.%d.%d.%d-%d-%d\n", 294 version.major_version, version.minor_version, 295 version.build_number, version.service_number, 296 version.service_pack, version.service_branch); 297 298 if (hv_is_isolation_supported()) 299 sysctl_record_panic_msg = 0; 300 301 /* 302 * Hyper-V expects to get crash register data or kmsg when 303 * crash enlightment is available and system crashes. Set 304 * crash_kexec_post_notifiers to be true to make sure that 305 * calling crash enlightment interface before running kdump 306 * kernel. 307 */ 308 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { 309 u64 hyperv_crash_ctl; 310 311 crash_kexec_post_notifiers = true; 312 pr_info("Hyper-V: enabling crash_kexec_post_notifiers\n"); 313 314 /* 315 * Panic message recording (sysctl_record_panic_msg) 316 * is enabled by default in non-isolated guests and 317 * disabled by default in isolated guests; the panic 318 * message recording won't be available in isolated 319 * guests should the following registration fail. 320 */ 321 hv_ctl_table_hdr = register_sysctl("kernel", hv_ctl_table); 322 if (!hv_ctl_table_hdr) 323 pr_err("Hyper-V: sysctl table register error"); 324 325 /* 326 * Register for panic kmsg callback only if the right 327 * capability is supported by the hypervisor. 328 */ 329 hyperv_crash_ctl = hv_get_msr(HV_MSR_CRASH_CTL); 330 if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) 331 hv_kmsg_dump_register(); 332 333 register_die_notifier(&hyperv_die_report_block); 334 atomic_notifier_chain_register(&panic_notifier_list, 335 &hyperv_panic_report_block); 336 } 337 338 /* 339 * Allocate the per-CPU state for the hypercall input arg. 340 * If this allocation fails, we will not be able to setup 341 * (per-CPU) hypercall input page and thus this failure is 342 * fatal on Hyper-V. 343 */ 344 hyperv_pcpu_input_arg = alloc_percpu(void *); 345 BUG_ON(!hyperv_pcpu_input_arg); 346 347 /* Allocate the per-CPU state for output arg for root */ 348 if (hv_output_page_exists()) { 349 hyperv_pcpu_output_arg = alloc_percpu(void *); 350 BUG_ON(!hyperv_pcpu_output_arg); 351 } 352 353 hv_vp_index = kmalloc_array(nr_cpu_ids, sizeof(*hv_vp_index), 354 GFP_KERNEL); 355 if (!hv_vp_index) { 356 hv_common_free(); 357 return -ENOMEM; 358 } 359 360 for (i = 0; i < nr_cpu_ids; i++) 361 hv_vp_index[i] = VP_INVAL; 362 363 return 0; 364 } 365 366 void __init ms_hyperv_late_init(void) 367 { 368 struct acpi_table_header *header; 369 acpi_status status; 370 u8 *randomdata; 371 u32 length, i; 372 373 /* 374 * Seed the Linux random number generator with entropy provided by 375 * the Hyper-V host in ACPI table OEM0. 376 */ 377 if (!IS_ENABLED(CONFIG_ACPI)) 378 return; 379 380 status = acpi_get_table("OEM0", 0, &header); 381 if (ACPI_FAILURE(status) || !header) 382 return; 383 384 /* 385 * Since the "OEM0" table name is for OEM specific usage, verify 386 * that what we're seeing purports to be from Microsoft. 387 */ 388 if (strncmp(header->oem_table_id, "MICROSFT", 8)) 389 goto error; 390 391 /* 392 * Ensure the length is reasonable. Requiring at least 8 bytes and 393 * no more than 4K bytes is somewhat arbitrary and just protects 394 * against a malformed table. Hyper-V currently provides 64 bytes, 395 * but allow for a change in a later version. 396 */ 397 if (header->length < sizeof(*header) + 8 || 398 header->length > sizeof(*header) + SZ_4K) 399 goto error; 400 401 length = header->length - sizeof(*header); 402 randomdata = (u8 *)(header + 1); 403 404 pr_debug("Hyper-V: Seeding rng with %d random bytes from ACPI table OEM0\n", 405 length); 406 407 add_bootloader_randomness(randomdata, length); 408 409 /* 410 * To prevent the seed data from being visible in /sys/firmware/acpi, 411 * zero out the random data in the ACPI table and fixup the checksum. 412 * The zero'ing is done out of an abundance of caution in avoiding 413 * potential security risks to the rng. Similarly, reset the table 414 * length to just the header size so that a subsequent kexec doesn't 415 * try to use the zero'ed out random data. 416 */ 417 for (i = 0; i < length; i++) { 418 header->checksum += randomdata[i]; 419 randomdata[i] = 0; 420 } 421 422 for (i = 0; i < sizeof(header->length); i++) 423 header->checksum += ((u8 *)&header->length)[i]; 424 header->length = sizeof(*header); 425 for (i = 0; i < sizeof(header->length); i++) 426 header->checksum -= ((u8 *)&header->length)[i]; 427 428 error: 429 acpi_put_table(header); 430 } 431 432 /* 433 * Hyper-V specific initialization and die code for 434 * individual CPUs that is common across all architectures. 435 * Called by the CPU hotplug mechanism. 436 */ 437 438 int hv_common_cpu_init(unsigned int cpu) 439 { 440 void **inputarg, **outputarg; 441 u64 msr_vp_index; 442 gfp_t flags; 443 const int pgcount = hv_output_page_exists() ? 2 : 1; 444 void *mem; 445 int ret; 446 447 /* hv_cpu_init() can be called with IRQs disabled from hv_resume() */ 448 flags = irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL; 449 450 inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); 451 452 /* 453 * hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory is already 454 * allocated if this CPU was previously online and then taken offline 455 */ 456 if (!*inputarg) { 457 mem = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags); 458 if (!mem) 459 return -ENOMEM; 460 461 if (hv_output_page_exists()) { 462 outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg); 463 *outputarg = (char *)mem + HV_HYP_PAGE_SIZE; 464 } 465 466 if (!ms_hyperv.paravisor_present && 467 (hv_isolation_type_snp() || hv_isolation_type_tdx())) { 468 ret = set_memory_decrypted((unsigned long)mem, pgcount); 469 if (ret) { 470 /* It may be unsafe to free 'mem' */ 471 return ret; 472 } 473 474 memset(mem, 0x00, pgcount * HV_HYP_PAGE_SIZE); 475 } 476 477 /* 478 * In a fully enlightened TDX/SNP VM with more than 64 VPs, if 479 * hyperv_pcpu_input_arg is not NULL, set_memory_decrypted() -> 480 * ... -> cpa_flush()-> ... -> __send_ipi_mask_ex() tries to 481 * use hyperv_pcpu_input_arg as the hypercall input page, which 482 * must be a decrypted page in such a VM, but the page is still 483 * encrypted before set_memory_decrypted() returns. Fix this by 484 * setting *inputarg after the above set_memory_decrypted(): if 485 * hyperv_pcpu_input_arg is NULL, __send_ipi_mask_ex() returns 486 * HV_STATUS_INVALID_PARAMETER immediately, and the function 487 * hv_send_ipi_mask() falls back to orig_apic.send_IPI_mask(), 488 * which may be slightly slower than the hypercall, but still 489 * works correctly in such a VM. 490 */ 491 *inputarg = mem; 492 } 493 494 msr_vp_index = hv_get_msr(HV_MSR_VP_INDEX); 495 496 hv_vp_index[cpu] = msr_vp_index; 497 498 if (msr_vp_index > hv_max_vp_index) 499 hv_max_vp_index = msr_vp_index; 500 501 return 0; 502 } 503 504 int hv_common_cpu_die(unsigned int cpu) 505 { 506 /* 507 * The hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory 508 * is not freed when the CPU goes offline as the hyperv_pcpu_input_arg 509 * may be used by the Hyper-V vPCI driver in reassigning interrupts 510 * as part of the offlining process. The interrupt reassignment 511 * happens *after* the CPUHP_AP_HYPERV_ONLINE state has run and 512 * called this function. 513 * 514 * If a previously offlined CPU is brought back online again, the 515 * originally allocated memory is reused in hv_common_cpu_init(). 516 */ 517 518 return 0; 519 } 520 521 /* Bit mask of the extended capability to query: see HV_EXT_CAPABILITY_xxx */ 522 bool hv_query_ext_cap(u64 cap_query) 523 { 524 /* 525 * The address of the 'hv_extended_cap' variable will be used as an 526 * output parameter to the hypercall below and so it should be 527 * compatible with 'virt_to_phys'. Which means, it's address should be 528 * directly mapped. Use 'static' to keep it compatible; stack variables 529 * can be virtually mapped, making them incompatible with 530 * 'virt_to_phys'. 531 * Hypercall input/output addresses should also be 8-byte aligned. 532 */ 533 static u64 hv_extended_cap __aligned(8); 534 static bool hv_extended_cap_queried; 535 u64 status; 536 537 /* 538 * Querying extended capabilities is an extended hypercall. Check if the 539 * partition supports extended hypercall, first. 540 */ 541 if (!(ms_hyperv.priv_high & HV_ENABLE_EXTENDED_HYPERCALLS)) 542 return false; 543 544 /* Extended capabilities do not change at runtime. */ 545 if (hv_extended_cap_queried) 546 return hv_extended_cap & cap_query; 547 548 status = hv_do_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, NULL, 549 &hv_extended_cap); 550 551 /* 552 * The query extended capabilities hypercall should not fail under 553 * any normal circumstances. Avoid repeatedly making the hypercall, on 554 * error. 555 */ 556 hv_extended_cap_queried = true; 557 if (!hv_result_success(status)) { 558 pr_err("Hyper-V: Extended query capabilities hypercall failed 0x%llx\n", 559 status); 560 return false; 561 } 562 563 return hv_extended_cap & cap_query; 564 } 565 EXPORT_SYMBOL_GPL(hv_query_ext_cap); 566 567 void hv_setup_dma_ops(struct device *dev, bool coherent) 568 { 569 arch_setup_dma_ops(dev, coherent); 570 } 571 EXPORT_SYMBOL_GPL(hv_setup_dma_ops); 572 573 bool hv_is_hibernation_supported(void) 574 { 575 return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4); 576 } 577 EXPORT_SYMBOL_GPL(hv_is_hibernation_supported); 578 579 /* 580 * Default function to read the Hyper-V reference counter, independent 581 * of whether Hyper-V enlightened clocks/timers are being used. But on 582 * architectures where it is used, Hyper-V enlightenment code in 583 * hyperv_timer.c may override this function. 584 */ 585 static u64 __hv_read_ref_counter(void) 586 { 587 return hv_get_msr(HV_MSR_TIME_REF_COUNT); 588 } 589 590 u64 (*hv_read_reference_counter)(void) = __hv_read_ref_counter; 591 EXPORT_SYMBOL_GPL(hv_read_reference_counter); 592 593 /* These __weak functions provide default "no-op" behavior and 594 * may be overridden by architecture specific versions. Architectures 595 * for which the default "no-op" behavior is sufficient can leave 596 * them unimplemented and not be cluttered with a bunch of stub 597 * functions in arch-specific code. 598 */ 599 600 bool __weak hv_is_isolation_supported(void) 601 { 602 return false; 603 } 604 EXPORT_SYMBOL_GPL(hv_is_isolation_supported); 605 606 bool __weak hv_isolation_type_snp(void) 607 { 608 return false; 609 } 610 EXPORT_SYMBOL_GPL(hv_isolation_type_snp); 611 612 bool __weak hv_isolation_type_tdx(void) 613 { 614 return false; 615 } 616 EXPORT_SYMBOL_GPL(hv_isolation_type_tdx); 617 618 void __weak hv_setup_vmbus_handler(void (*handler)(void)) 619 { 620 } 621 EXPORT_SYMBOL_GPL(hv_setup_vmbus_handler); 622 623 void __weak hv_remove_vmbus_handler(void) 624 { 625 } 626 EXPORT_SYMBOL_GPL(hv_remove_vmbus_handler); 627 628 void __weak hv_setup_kexec_handler(void (*handler)(void)) 629 { 630 } 631 EXPORT_SYMBOL_GPL(hv_setup_kexec_handler); 632 633 void __weak hv_remove_kexec_handler(void) 634 { 635 } 636 EXPORT_SYMBOL_GPL(hv_remove_kexec_handler); 637 638 void __weak hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)) 639 { 640 } 641 EXPORT_SYMBOL_GPL(hv_setup_crash_handler); 642 643 void __weak hv_remove_crash_handler(void) 644 { 645 } 646 EXPORT_SYMBOL_GPL(hv_remove_crash_handler); 647 648 void __weak hyperv_cleanup(void) 649 { 650 } 651 EXPORT_SYMBOL_GPL(hyperv_cleanup); 652 653 u64 __weak hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size) 654 { 655 return HV_STATUS_INVALID_PARAMETER; 656 } 657 EXPORT_SYMBOL_GPL(hv_ghcb_hypercall); 658 659 u64 __weak hv_tdx_hypercall(u64 control, u64 param1, u64 param2) 660 { 661 return HV_STATUS_INVALID_PARAMETER; 662 } 663 EXPORT_SYMBOL_GPL(hv_tdx_hypercall); 664