1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Hyper-V Isolation VM interface with paravisor and hypervisor 4 * 5 * Author: 6 * Tianyu Lan <Tianyu.Lan@microsoft.com> 7 */ 8 9 #include <linux/bitfield.h> 10 #include <linux/types.h> 11 #include <linux/slab.h> 12 #include <linux/cpu.h> 13 #include <asm/svm.h> 14 #include <asm/sev.h> 15 #include <asm/io.h> 16 #include <asm/coco.h> 17 #include <asm/mem_encrypt.h> 18 #include <asm/set_memory.h> 19 #include <asm/mshyperv.h> 20 #include <asm/hypervisor.h> 21 #include <asm/mtrr.h> 22 #include <asm/io_apic.h> 23 #include <asm/realmode.h> 24 #include <asm/e820/api.h> 25 #include <asm/desc.h> 26 #include <uapi/asm/vmx.h> 27 28 #ifdef CONFIG_AMD_MEM_ENCRYPT 29 30 #define GHCB_USAGE_HYPERV_CALL 1 31 32 union hv_ghcb { 33 struct ghcb ghcb; 34 struct { 35 u64 hypercalldata[509]; 36 u64 outputgpa; 37 union { 38 union { 39 struct { 40 u32 callcode : 16; 41 u32 isfast : 1; 42 u32 reserved1 : 14; 43 u32 isnested : 1; 44 u32 countofelements : 12; 45 u32 reserved2 : 4; 46 u32 repstartindex : 12; 47 u32 reserved3 : 4; 48 }; 49 u64 asuint64; 50 } hypercallinput; 51 union { 52 struct { 53 u16 callstatus; 54 u16 reserved1; 55 u32 elementsprocessed : 12; 56 u32 reserved2 : 20; 57 }; 58 u64 asunit64; 59 } hypercalloutput; 60 }; 61 u64 reserved2; 62 } hypercall; 63 } __packed __aligned(HV_HYP_PAGE_SIZE); 64 65 /* Only used in an SNP VM with the paravisor */ 66 static u16 hv_ghcb_version __ro_after_init; 67 68 /* Functions only used in an SNP VM with the paravisor go here. */ 69 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size) 70 { 71 union hv_ghcb *hv_ghcb; 72 void **ghcb_base; 73 unsigned long flags; 74 u64 status; 75 76 if (!hv_ghcb_pg) 77 return -EFAULT; 78 79 WARN_ON(in_nmi()); 80 81 local_irq_save(flags); 82 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg); 83 hv_ghcb = (union hv_ghcb *)*ghcb_base; 84 if (!hv_ghcb) { 85 local_irq_restore(flags); 86 return -EFAULT; 87 } 88 89 hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX; 90 hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL; 91 92 hv_ghcb->hypercall.outputgpa = (u64)output; 93 hv_ghcb->hypercall.hypercallinput.asuint64 = 0; 94 hv_ghcb->hypercall.hypercallinput.callcode = control; 95 96 if (input_size) 97 memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size); 98 99 VMGEXIT(); 100 101 hv_ghcb->ghcb.ghcb_usage = 0xffffffff; 102 memset(hv_ghcb->ghcb.save.valid_bitmap, 0, 103 sizeof(hv_ghcb->ghcb.save.valid_bitmap)); 104 105 status = hv_ghcb->hypercall.hypercalloutput.callstatus; 106 107 local_irq_restore(flags); 108 109 return status; 110 } 111 112 static inline u64 rd_ghcb_msr(void) 113 { 114 return __rdmsr(MSR_AMD64_SEV_ES_GHCB); 115 } 116 117 static inline void wr_ghcb_msr(u64 val) 118 { 119 native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val); 120 } 121 122 static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code, 123 u64 exit_info_1, u64 exit_info_2) 124 { 125 /* Fill in protocol and format specifiers */ 126 ghcb->protocol_version = hv_ghcb_version; 127 ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; 128 129 ghcb_set_sw_exit_code(ghcb, exit_code); 130 ghcb_set_sw_exit_info_1(ghcb, exit_info_1); 131 ghcb_set_sw_exit_info_2(ghcb, exit_info_2); 132 133 VMGEXIT(); 134 135 if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0)) 136 return ES_VMM_ERROR; 137 else 138 return ES_OK; 139 } 140 141 void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason) 142 { 143 u64 val = GHCB_MSR_TERM_REQ; 144 145 /* Tell the hypervisor what went wrong. */ 146 val |= GHCB_SEV_TERM_REASON(set, reason); 147 148 /* Request Guest Termination from Hypervisor */ 149 wr_ghcb_msr(val); 150 VMGEXIT(); 151 152 while (true) 153 asm volatile("hlt\n" : : : "memory"); 154 } 155 156 bool hv_ghcb_negotiate_protocol(void) 157 { 158 u64 ghcb_gpa; 159 u64 val; 160 161 /* Save ghcb page gpa. */ 162 ghcb_gpa = rd_ghcb_msr(); 163 164 /* Do the GHCB protocol version negotiation */ 165 wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ); 166 VMGEXIT(); 167 val = rd_ghcb_msr(); 168 169 if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP) 170 return false; 171 172 if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN || 173 GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX) 174 return false; 175 176 hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val), 177 GHCB_PROTOCOL_MAX); 178 179 /* Write ghcb page back after negotiating protocol. */ 180 wr_ghcb_msr(ghcb_gpa); 181 VMGEXIT(); 182 183 return true; 184 } 185 186 static void hv_ghcb_msr_write(u64 msr, u64 value) 187 { 188 union hv_ghcb *hv_ghcb; 189 void **ghcb_base; 190 unsigned long flags; 191 192 if (!hv_ghcb_pg) 193 return; 194 195 WARN_ON(in_nmi()); 196 197 local_irq_save(flags); 198 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg); 199 hv_ghcb = (union hv_ghcb *)*ghcb_base; 200 if (!hv_ghcb) { 201 local_irq_restore(flags); 202 return; 203 } 204 205 ghcb_set_rcx(&hv_ghcb->ghcb, msr); 206 ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value)); 207 ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value)); 208 209 if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0)) 210 pr_warn("Fail to write msr via ghcb %llx.\n", msr); 211 212 local_irq_restore(flags); 213 } 214 215 static void hv_ghcb_msr_read(u64 msr, u64 *value) 216 { 217 union hv_ghcb *hv_ghcb; 218 void **ghcb_base; 219 unsigned long flags; 220 221 /* Check size of union hv_ghcb here. */ 222 BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE); 223 224 if (!hv_ghcb_pg) 225 return; 226 227 WARN_ON(in_nmi()); 228 229 local_irq_save(flags); 230 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg); 231 hv_ghcb = (union hv_ghcb *)*ghcb_base; 232 if (!hv_ghcb) { 233 local_irq_restore(flags); 234 return; 235 } 236 237 ghcb_set_rcx(&hv_ghcb->ghcb, msr); 238 if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0)) 239 pr_warn("Fail to read msr via ghcb %llx.\n", msr); 240 else 241 *value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax) 242 | ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32); 243 local_irq_restore(flags); 244 } 245 246 /* Only used in a fully enlightened SNP VM, i.e. without the paravisor */ 247 static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE); 248 static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE); 249 static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa); 250 251 /* Functions only used in an SNP VM without the paravisor go here. */ 252 253 #define hv_populate_vmcb_seg(seg, gdtr_base) \ 254 do { \ 255 if (seg.selector) { \ 256 seg.base = 0; \ 257 seg.limit = HV_AP_SEGMENT_LIMIT; \ 258 seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5); \ 259 seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \ 260 } \ 261 } while (0) \ 262 263 static int snp_set_vmsa(void *va, bool vmsa) 264 { 265 u64 attrs; 266 267 /* 268 * Running at VMPL0 allows the kernel to change the VMSA bit for a page 269 * using the RMPADJUST instruction. However, for the instruction to 270 * succeed it must target the permissions of a lesser privileged 271 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST 272 * instruction in the AMD64 APM Volume 3). 273 */ 274 attrs = 1; 275 if (vmsa) 276 attrs |= RMPADJUST_VMSA_PAGE_BIT; 277 278 return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs); 279 } 280 281 static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa) 282 { 283 int err; 284 285 err = snp_set_vmsa(vmsa, false); 286 if (err) 287 pr_err("clear VMSA page failed (%u), leaking page\n", err); 288 else 289 free_page((unsigned long)vmsa); 290 } 291 292 int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip) 293 { 294 struct sev_es_save_area *vmsa = (struct sev_es_save_area *) 295 __get_free_page(GFP_KERNEL | __GFP_ZERO); 296 struct sev_es_save_area *cur_vmsa; 297 struct desc_ptr gdtr; 298 u64 ret, retry = 5; 299 struct hv_enable_vp_vtl *start_vp_input; 300 unsigned long flags; 301 int cpu, vp_index; 302 303 if (!vmsa) 304 return -ENOMEM; 305 306 /* Find the Hyper-V VP index which might be not the same as APIC ID */ 307 vp_index = hv_apicid_to_vp_index(apic_id); 308 if (vp_index < 0 || vp_index > ms_hyperv.max_vp_index) 309 return -EINVAL; 310 311 /* 312 * Find the Linux CPU number for addressing the per-CPU data, and it 313 * might not be the same as APIC ID. 314 */ 315 for_each_present_cpu(cpu) { 316 if (arch_match_cpu_phys_id(cpu, apic_id)) 317 break; 318 } 319 if (cpu >= nr_cpu_ids) 320 return -EINVAL; 321 322 native_store_gdt(&gdtr); 323 324 vmsa->gdtr.base = gdtr.address; 325 vmsa->gdtr.limit = gdtr.size; 326 327 asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector)); 328 hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base); 329 330 asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector)); 331 hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base); 332 333 asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector)); 334 hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base); 335 336 asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector)); 337 hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base); 338 339 vmsa->efer = native_read_msr(MSR_EFER); 340 341 vmsa->cr4 = native_read_cr4(); 342 vmsa->cr3 = __native_read_cr3(); 343 vmsa->cr0 = native_read_cr0(); 344 345 vmsa->xcr0 = 1; 346 vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT; 347 vmsa->rip = (u64)secondary_startup_64_no_verify; 348 vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE]; 349 350 /* 351 * Set the SNP-specific fields for this VMSA: 352 * VMPL level 353 * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits) 354 */ 355 vmsa->vmpl = 0; 356 vmsa->sev_features = sev_status >> 2; 357 358 ret = snp_set_vmsa(vmsa, true); 359 if (ret) { 360 pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret); 361 free_page((u64)vmsa); 362 return ret; 363 } 364 365 local_irq_save(flags); 366 start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg; 367 memset(start_vp_input, 0, sizeof(*start_vp_input)); 368 start_vp_input->partition_id = -1; 369 start_vp_input->vp_index = vp_index; 370 start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl; 371 *(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1; 372 373 do { 374 ret = hv_do_hypercall(HVCALL_START_VP, 375 start_vp_input, NULL); 376 } while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--); 377 378 local_irq_restore(flags); 379 380 if (!hv_result_success(ret)) { 381 pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret); 382 snp_cleanup_vmsa(vmsa); 383 vmsa = NULL; 384 } 385 386 cur_vmsa = per_cpu(hv_sev_vmsa, cpu); 387 /* Free up any previous VMSA page */ 388 if (cur_vmsa) 389 snp_cleanup_vmsa(cur_vmsa); 390 391 /* Record the current VMSA page */ 392 per_cpu(hv_sev_vmsa, cpu) = vmsa; 393 394 return ret; 395 } 396 397 #else 398 static inline void hv_ghcb_msr_write(u64 msr, u64 value) {} 399 static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {} 400 #endif /* CONFIG_AMD_MEM_ENCRYPT */ 401 402 #ifdef CONFIG_INTEL_TDX_GUEST 403 static void hv_tdx_msr_write(u64 msr, u64 val) 404 { 405 struct tdx_module_args args = { 406 .r10 = TDX_HYPERCALL_STANDARD, 407 .r11 = EXIT_REASON_MSR_WRITE, 408 .r12 = msr, 409 .r13 = val, 410 }; 411 412 u64 ret = __tdx_hypercall(&args); 413 414 WARN_ONCE(ret, "Failed to emulate MSR write: %lld\n", ret); 415 } 416 417 static void hv_tdx_msr_read(u64 msr, u64 *val) 418 { 419 struct tdx_module_args args = { 420 .r10 = TDX_HYPERCALL_STANDARD, 421 .r11 = EXIT_REASON_MSR_READ, 422 .r12 = msr, 423 }; 424 425 u64 ret = __tdx_hypercall(&args); 426 427 if (WARN_ONCE(ret, "Failed to emulate MSR read: %lld\n", ret)) 428 *val = 0; 429 else 430 *val = args.r11; 431 } 432 433 u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2) 434 { 435 struct tdx_module_args args = { }; 436 437 args.r10 = control; 438 args.rdx = param1; 439 args.r8 = param2; 440 441 (void)__tdx_hypercall(&args); 442 443 return args.r11; 444 } 445 446 #else 447 static inline void hv_tdx_msr_write(u64 msr, u64 value) {} 448 static inline void hv_tdx_msr_read(u64 msr, u64 *value) {} 449 #endif /* CONFIG_INTEL_TDX_GUEST */ 450 451 #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) 452 void hv_ivm_msr_write(u64 msr, u64 value) 453 { 454 if (!ms_hyperv.paravisor_present) 455 return; 456 457 if (hv_isolation_type_tdx()) 458 hv_tdx_msr_write(msr, value); 459 else if (hv_isolation_type_snp()) 460 hv_ghcb_msr_write(msr, value); 461 } 462 463 void hv_ivm_msr_read(u64 msr, u64 *value) 464 { 465 if (!ms_hyperv.paravisor_present) 466 return; 467 468 if (hv_isolation_type_tdx()) 469 hv_tdx_msr_read(msr, value); 470 else if (hv_isolation_type_snp()) 471 hv_ghcb_msr_read(msr, value); 472 } 473 474 /* 475 * hv_mark_gpa_visibility - Set pages visible to host via hvcall. 476 * 477 * In Isolation VM, all guest memory is encrypted from host and guest 478 * needs to set memory visible to host via hvcall before sharing memory 479 * with host. 480 */ 481 static int hv_mark_gpa_visibility(u16 count, const u64 pfn[], 482 enum hv_mem_host_visibility visibility) 483 { 484 struct hv_gpa_range_for_visibility *input; 485 u64 hv_status; 486 unsigned long flags; 487 488 /* no-op if partition isolation is not enabled */ 489 if (!hv_is_isolation_supported()) 490 return 0; 491 492 if (count > HV_MAX_MODIFY_GPA_REP_COUNT) { 493 pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count, 494 HV_MAX_MODIFY_GPA_REP_COUNT); 495 return -EINVAL; 496 } 497 498 local_irq_save(flags); 499 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 500 501 if (unlikely(!input)) { 502 local_irq_restore(flags); 503 return -EINVAL; 504 } 505 506 input->partition_id = HV_PARTITION_ID_SELF; 507 input->host_visibility = visibility; 508 input->reserved0 = 0; 509 input->reserved1 = 0; 510 memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn)); 511 hv_status = hv_do_rep_hypercall( 512 HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count, 513 0, input, NULL); 514 local_irq_restore(flags); 515 516 if (hv_result_success(hv_status)) 517 return 0; 518 else 519 return -EFAULT; 520 } 521 522 /* 523 * When transitioning memory between encrypted and decrypted, the caller 524 * of set_memory_encrypted() or set_memory_decrypted() is responsible for 525 * ensuring that the memory isn't in use and isn't referenced while the 526 * transition is in progress. The transition has multiple steps, and the 527 * memory is in an inconsistent state until all steps are complete. A 528 * reference while the state is inconsistent could result in an exception 529 * that can't be cleanly fixed up. 530 * 531 * But the Linux kernel load_unaligned_zeropad() mechanism could cause a 532 * stray reference that can't be prevented by the caller, so Linux has 533 * specific code to handle this case. But when the #VC and #VE exceptions 534 * routed to a paravisor, the specific code doesn't work. To avoid this 535 * problem, mark the pages as "not present" while the transition is in 536 * progress. If load_unaligned_zeropad() causes a stray reference, a normal 537 * page fault is generated instead of #VC or #VE, and the page-fault-based 538 * handlers for load_unaligned_zeropad() resolve the reference. When the 539 * transition is complete, hv_vtom_set_host_visibility() marks the pages 540 * as "present" again. 541 */ 542 static int hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc) 543 { 544 return set_memory_np(kbuffer, pagecount); 545 } 546 547 /* 548 * hv_vtom_set_host_visibility - Set specified memory visible to host. 549 * 550 * In Isolation VM, all guest memory is encrypted from host and guest 551 * needs to set memory visible to host via hvcall before sharing memory 552 * with host. This function works as wrap of hv_mark_gpa_visibility() 553 * with memory base and size. 554 */ 555 static int hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc) 556 { 557 enum hv_mem_host_visibility visibility = enc ? 558 VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE; 559 u64 *pfn_array; 560 phys_addr_t paddr; 561 int i, pfn, err; 562 void *vaddr; 563 int ret = 0; 564 565 pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); 566 if (!pfn_array) { 567 ret = -ENOMEM; 568 goto err_set_memory_p; 569 } 570 571 for (i = 0, pfn = 0; i < pagecount; i++) { 572 /* 573 * Use slow_virt_to_phys() because the PRESENT bit has been 574 * temporarily cleared in the PTEs. slow_virt_to_phys() works 575 * without the PRESENT bit while virt_to_hvpfn() or similar 576 * does not. 577 */ 578 vaddr = (void *)kbuffer + (i * HV_HYP_PAGE_SIZE); 579 paddr = slow_virt_to_phys(vaddr); 580 pfn_array[pfn] = paddr >> HV_HYP_PAGE_SHIFT; 581 pfn++; 582 583 if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) { 584 ret = hv_mark_gpa_visibility(pfn, pfn_array, 585 visibility); 586 if (ret) 587 goto err_free_pfn_array; 588 pfn = 0; 589 } 590 } 591 592 err_free_pfn_array: 593 kfree(pfn_array); 594 595 err_set_memory_p: 596 /* 597 * Set the PTE PRESENT bits again to revert what hv_vtom_clear_present() 598 * did. Do this even if there is an error earlier in this function in 599 * order to avoid leaving the memory range in a "broken" state. Setting 600 * the PRESENT bits shouldn't fail, but return an error if it does. 601 */ 602 err = set_memory_p(kbuffer, pagecount); 603 if (err && !ret) 604 ret = err; 605 606 return ret; 607 } 608 609 static bool hv_vtom_tlb_flush_required(bool private) 610 { 611 /* 612 * Since hv_vtom_clear_present() marks the PTEs as "not present" 613 * and flushes the TLB, they can't be in the TLB. That makes the 614 * flush controlled by this function redundant, so return "false". 615 */ 616 return false; 617 } 618 619 static bool hv_vtom_cache_flush_required(void) 620 { 621 return false; 622 } 623 624 static bool hv_is_private_mmio(u64 addr) 625 { 626 /* 627 * Hyper-V always provides a single IO-APIC in a guest VM. 628 * When a paravisor is used, it is emulated by the paravisor 629 * in the guest context and must be mapped private. 630 */ 631 if (addr >= HV_IOAPIC_BASE_ADDRESS && 632 addr < (HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE)) 633 return true; 634 635 /* Same with a vTPM */ 636 if (addr >= VTPM_BASE_ADDRESS && 637 addr < (VTPM_BASE_ADDRESS + PAGE_SIZE)) 638 return true; 639 640 return false; 641 } 642 643 void __init hv_vtom_init(void) 644 { 645 enum hv_isolation_type type = hv_get_isolation_type(); 646 647 switch (type) { 648 case HV_ISOLATION_TYPE_VBS: 649 fallthrough; 650 /* 651 * By design, a VM using vTOM doesn't see the SEV setting, 652 * so SEV initialization is bypassed and sev_status isn't set. 653 * Set it here to indicate a vTOM VM. 654 * 655 * Note: if CONFIG_AMD_MEM_ENCRYPT is not set, sev_status is 656 * defined as 0ULL, to which we can't assigned a value. 657 */ 658 #ifdef CONFIG_AMD_MEM_ENCRYPT 659 case HV_ISOLATION_TYPE_SNP: 660 sev_status = MSR_AMD64_SNP_VTOM; 661 cc_vendor = CC_VENDOR_AMD; 662 break; 663 #endif 664 665 case HV_ISOLATION_TYPE_TDX: 666 cc_vendor = CC_VENDOR_INTEL; 667 break; 668 669 default: 670 panic("hv_vtom_init: unsupported isolation type %d\n", type); 671 } 672 673 cc_set_mask(ms_hyperv.shared_gpa_boundary); 674 physical_mask &= ms_hyperv.shared_gpa_boundary - 1; 675 676 x86_platform.hyper.is_private_mmio = hv_is_private_mmio; 677 x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required; 678 x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required; 679 x86_platform.guest.enc_status_change_prepare = hv_vtom_clear_present; 680 x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility; 681 682 /* Set WB as the default cache mode. */ 683 guest_force_mtrr_state(NULL, 0, MTRR_TYPE_WRBACK); 684 } 685 686 #endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */ 687 688 enum hv_isolation_type hv_get_isolation_type(void) 689 { 690 if (!(ms_hyperv.priv_high & HV_ISOLATION)) 691 return HV_ISOLATION_TYPE_NONE; 692 return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b); 693 } 694 EXPORT_SYMBOL_GPL(hv_get_isolation_type); 695 696 /* 697 * hv_is_isolation_supported - Check system runs in the Hyper-V 698 * isolation VM. 699 */ 700 bool hv_is_isolation_supported(void) 701 { 702 if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) 703 return false; 704 705 if (!hypervisor_is_type(X86_HYPER_MS_HYPERV)) 706 return false; 707 708 return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE; 709 } 710 711 DEFINE_STATIC_KEY_FALSE(isolation_type_snp); 712 713 /* 714 * hv_isolation_type_snp - Check if the system runs in an AMD SEV-SNP based 715 * isolation VM. 716 */ 717 bool hv_isolation_type_snp(void) 718 { 719 return static_branch_unlikely(&isolation_type_snp); 720 } 721 722 DEFINE_STATIC_KEY_FALSE(isolation_type_tdx); 723 /* 724 * hv_isolation_type_tdx - Check if the system runs in an Intel TDX based 725 * isolated VM. 726 */ 727 bool hv_isolation_type_tdx(void) 728 { 729 return static_branch_unlikely(&isolation_type_tdx); 730 } 731