1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Hyper-V Isolation VM interface with paravisor and hypervisor 4 * 5 * Author: 6 * Tianyu Lan <Tianyu.Lan@microsoft.com> 7 */ 8 9 #include <linux/bitfield.h> 10 #include <linux/types.h> 11 #include <linux/slab.h> 12 #include <linux/cpu.h> 13 #include <linux/export.h> 14 #include <asm/svm.h> 15 #include <asm/sev.h> 16 #include <asm/io.h> 17 #include <asm/coco.h> 18 #include <asm/mem_encrypt.h> 19 #include <asm/set_memory.h> 20 #include <asm/mshyperv.h> 21 #include <asm/hypervisor.h> 22 #include <asm/mtrr.h> 23 #include <asm/io_apic.h> 24 #include <asm/realmode.h> 25 #include <asm/e820/api.h> 26 #include <asm/desc.h> 27 #include <asm/msr.h> 28 #include <asm/segment.h> 29 #include <uapi/asm/vmx.h> 30 31 #ifdef CONFIG_AMD_MEM_ENCRYPT 32 33 #define GHCB_USAGE_HYPERV_CALL 1 34 35 union hv_ghcb { 36 struct ghcb ghcb; 37 struct { 38 u64 hypercalldata[509]; 39 u64 outputgpa; 40 union { 41 union { 42 struct { 43 u32 callcode : 16; 44 u32 isfast : 1; 45 u32 reserved1 : 14; 46 u32 isnested : 1; 47 u32 countofelements : 12; 48 u32 reserved2 : 4; 49 u32 repstartindex : 12; 50 u32 reserved3 : 4; 51 }; 52 u64 asuint64; 53 } hypercallinput; 54 union { 55 struct { 56 u16 callstatus; 57 u16 reserved1; 58 u32 elementsprocessed : 12; 59 u32 reserved2 : 20; 60 }; 61 u64 asunit64; 62 } hypercalloutput; 63 }; 64 u64 reserved2; 65 } hypercall; 66 } __packed __aligned(HV_HYP_PAGE_SIZE); 67 68 /* Only used in an SNP VM with the paravisor */ 69 static u16 hv_ghcb_version __ro_after_init; 70 71 /* Functions only used in an SNP VM with the paravisor go here. */ 72 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size) 73 { 74 union hv_ghcb *hv_ghcb; 75 void **ghcb_base; 76 unsigned long flags; 77 u64 status; 78 79 if (!hv_ghcb_pg) 80 return -EFAULT; 81 82 WARN_ON(in_nmi()); 83 84 local_irq_save(flags); 85 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg); 86 hv_ghcb = (union hv_ghcb *)*ghcb_base; 87 if (!hv_ghcb) { 88 local_irq_restore(flags); 89 return -EFAULT; 90 } 91 92 hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX; 93 hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL; 94 95 hv_ghcb->hypercall.outputgpa = (u64)output; 96 hv_ghcb->hypercall.hypercallinput.asuint64 = 0; 97 hv_ghcb->hypercall.hypercallinput.callcode = control; 98 99 if (input_size) 100 memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size); 101 102 VMGEXIT(); 103 104 hv_ghcb->ghcb.ghcb_usage = 0xffffffff; 105 memset(hv_ghcb->ghcb.save.valid_bitmap, 0, 106 sizeof(hv_ghcb->ghcb.save.valid_bitmap)); 107 108 status = hv_ghcb->hypercall.hypercalloutput.callstatus; 109 110 local_irq_restore(flags); 111 112 return status; 113 } 114 115 static inline u64 rd_ghcb_msr(void) 116 { 117 return native_rdmsrq(MSR_AMD64_SEV_ES_GHCB); 118 } 119 120 static inline void wr_ghcb_msr(u64 val) 121 { 122 native_wrmsrq(MSR_AMD64_SEV_ES_GHCB, val); 123 } 124 125 static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code, 126 u64 exit_info_1, u64 exit_info_2) 127 { 128 /* Fill in protocol and format specifiers */ 129 ghcb->protocol_version = hv_ghcb_version; 130 ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; 131 132 ghcb_set_sw_exit_code(ghcb, exit_code); 133 ghcb_set_sw_exit_info_1(ghcb, exit_info_1); 134 ghcb_set_sw_exit_info_2(ghcb, exit_info_2); 135 136 VMGEXIT(); 137 138 if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0)) 139 return ES_VMM_ERROR; 140 else 141 return ES_OK; 142 } 143 144 void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason) 145 { 146 u64 val = GHCB_MSR_TERM_REQ; 147 148 /* Tell the hypervisor what went wrong. */ 149 val |= GHCB_SEV_TERM_REASON(set, reason); 150 151 /* Request Guest Termination from Hypervisor */ 152 wr_ghcb_msr(val); 153 VMGEXIT(); 154 155 while (true) 156 asm volatile("hlt\n" : : : "memory"); 157 } 158 159 bool hv_ghcb_negotiate_protocol(void) 160 { 161 u64 ghcb_gpa; 162 u64 val; 163 164 /* Save ghcb page gpa. */ 165 ghcb_gpa = rd_ghcb_msr(); 166 167 /* Do the GHCB protocol version negotiation */ 168 wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ); 169 VMGEXIT(); 170 val = rd_ghcb_msr(); 171 172 if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP) 173 return false; 174 175 if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN || 176 GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX) 177 return false; 178 179 hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val), 180 GHCB_PROTOCOL_MAX); 181 182 /* Write ghcb page back after negotiating protocol. */ 183 wr_ghcb_msr(ghcb_gpa); 184 VMGEXIT(); 185 186 return true; 187 } 188 189 static void hv_ghcb_msr_write(u64 msr, u64 value) 190 { 191 union hv_ghcb *hv_ghcb; 192 void **ghcb_base; 193 unsigned long flags; 194 195 if (!hv_ghcb_pg) 196 return; 197 198 WARN_ON(in_nmi()); 199 200 local_irq_save(flags); 201 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg); 202 hv_ghcb = (union hv_ghcb *)*ghcb_base; 203 if (!hv_ghcb) { 204 local_irq_restore(flags); 205 return; 206 } 207 208 ghcb_set_rcx(&hv_ghcb->ghcb, msr); 209 ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value)); 210 ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value)); 211 212 if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0)) 213 pr_warn("Fail to write msr via ghcb %llx.\n", msr); 214 215 local_irq_restore(flags); 216 } 217 218 static void hv_ghcb_msr_read(u64 msr, u64 *value) 219 { 220 union hv_ghcb *hv_ghcb; 221 void **ghcb_base; 222 unsigned long flags; 223 224 /* Check size of union hv_ghcb here. */ 225 BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE); 226 227 if (!hv_ghcb_pg) 228 return; 229 230 WARN_ON(in_nmi()); 231 232 local_irq_save(flags); 233 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg); 234 hv_ghcb = (union hv_ghcb *)*ghcb_base; 235 if (!hv_ghcb) { 236 local_irq_restore(flags); 237 return; 238 } 239 240 ghcb_set_rcx(&hv_ghcb->ghcb, msr); 241 if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0)) 242 pr_warn("Fail to read msr via ghcb %llx.\n", msr); 243 else 244 *value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax) 245 | ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32); 246 local_irq_restore(flags); 247 } 248 249 /* Only used in a fully enlightened SNP VM, i.e. without the paravisor */ 250 static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE); 251 static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE); 252 static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa); 253 254 /* Functions only used in an SNP VM without the paravisor go here. */ 255 256 #define hv_populate_vmcb_seg(seg, gdtr_base) \ 257 do { \ 258 if (seg.selector) { \ 259 seg.base = 0; \ 260 seg.limit = HV_AP_SEGMENT_LIMIT; \ 261 seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5); \ 262 seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \ 263 } \ 264 } while (0) \ 265 266 static int snp_set_vmsa(void *va, bool vmsa) 267 { 268 u64 attrs; 269 270 /* 271 * Running at VMPL0 allows the kernel to change the VMSA bit for a page 272 * using the RMPADJUST instruction. However, for the instruction to 273 * succeed it must target the permissions of a lesser privileged 274 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST 275 * instruction in the AMD64 APM Volume 3). 276 */ 277 attrs = 1; 278 if (vmsa) 279 attrs |= RMPADJUST_VMSA_PAGE_BIT; 280 281 return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs); 282 } 283 284 static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa) 285 { 286 int err; 287 288 err = snp_set_vmsa(vmsa, false); 289 if (err) 290 pr_err("clear VMSA page failed (%u), leaking page\n", err); 291 else 292 free_page((unsigned long)vmsa); 293 } 294 295 int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, unsigned int cpu) 296 { 297 struct sev_es_save_area *vmsa = (struct sev_es_save_area *) 298 __get_free_page(GFP_KERNEL | __GFP_ZERO); 299 struct sev_es_save_area *cur_vmsa; 300 struct desc_ptr gdtr; 301 u64 ret, retry = 5; 302 struct hv_enable_vp_vtl *start_vp_input; 303 unsigned long flags; 304 int vp_index; 305 306 if (!vmsa) 307 return -ENOMEM; 308 309 /* Find the Hyper-V VP index which might be not the same as APIC ID */ 310 vp_index = hv_apicid_to_vp_index(apic_id); 311 if (vp_index < 0 || vp_index > ms_hyperv.max_vp_index) 312 return -EINVAL; 313 314 native_store_gdt(&gdtr); 315 316 vmsa->gdtr.base = gdtr.address; 317 vmsa->gdtr.limit = gdtr.size; 318 319 savesegment(es, vmsa->es.selector); 320 hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base); 321 322 savesegment(cs, vmsa->cs.selector); 323 hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base); 324 325 savesegment(ss, vmsa->ss.selector); 326 hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base); 327 328 savesegment(ds, vmsa->ds.selector); 329 hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base); 330 331 vmsa->efer = native_read_msr(MSR_EFER); 332 333 vmsa->cr4 = native_read_cr4(); 334 vmsa->cr3 = __native_read_cr3(); 335 vmsa->cr0 = native_read_cr0(); 336 337 vmsa->xcr0 = 1; 338 vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT; 339 vmsa->rip = (u64)secondary_startup_64_no_verify; 340 vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE]; 341 342 /* 343 * Set the SNP-specific fields for this VMSA: 344 * VMPL level 345 * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits) 346 */ 347 vmsa->vmpl = 0; 348 vmsa->sev_features = sev_status >> 2; 349 350 ret = snp_set_vmsa(vmsa, true); 351 if (ret) { 352 pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret); 353 free_page((u64)vmsa); 354 return ret; 355 } 356 357 local_irq_save(flags); 358 start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg; 359 memset(start_vp_input, 0, sizeof(*start_vp_input)); 360 start_vp_input->partition_id = -1; 361 start_vp_input->vp_index = vp_index; 362 start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl; 363 *(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1; 364 365 do { 366 ret = hv_do_hypercall(HVCALL_START_VP, 367 start_vp_input, NULL); 368 } while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--); 369 370 local_irq_restore(flags); 371 372 if (!hv_result_success(ret)) { 373 pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret); 374 snp_cleanup_vmsa(vmsa); 375 vmsa = NULL; 376 } 377 378 cur_vmsa = per_cpu(hv_sev_vmsa, cpu); 379 /* Free up any previous VMSA page */ 380 if (cur_vmsa) 381 snp_cleanup_vmsa(cur_vmsa); 382 383 /* Record the current VMSA page */ 384 per_cpu(hv_sev_vmsa, cpu) = vmsa; 385 386 return ret; 387 } 388 389 u64 hv_snp_hypercall(u64 control, u64 param1, u64 param2) 390 { 391 u64 hv_status; 392 393 register u64 __r8 asm("r8") = param2; 394 asm volatile("vmmcall" 395 : "=a" (hv_status), 396 "+c" (control), "+d" (param1), "+r" (__r8) 397 : : "cc", "memory", "r9", "r10", "r11"); 398 399 return hv_status; 400 } 401 402 #else 403 static inline void hv_ghcb_msr_write(u64 msr, u64 value) {} 404 static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {} 405 u64 hv_snp_hypercall(u64 control, u64 param1, u64 param2) { return U64_MAX; } 406 #endif /* CONFIG_AMD_MEM_ENCRYPT */ 407 408 #ifdef CONFIG_INTEL_TDX_GUEST 409 static void hv_tdx_msr_write(u64 msr, u64 val) 410 { 411 struct tdx_module_args args = { 412 .r10 = TDX_HYPERCALL_STANDARD, 413 .r11 = EXIT_REASON_MSR_WRITE, 414 .r12 = msr, 415 .r13 = val, 416 }; 417 418 u64 ret = __tdx_hypercall(&args); 419 420 WARN_ONCE(ret, "Failed to emulate MSR write: %lld\n", ret); 421 } 422 423 static void hv_tdx_msr_read(u64 msr, u64 *val) 424 { 425 struct tdx_module_args args = { 426 .r10 = TDX_HYPERCALL_STANDARD, 427 .r11 = EXIT_REASON_MSR_READ, 428 .r12 = msr, 429 }; 430 431 u64 ret = __tdx_hypercall(&args); 432 433 if (WARN_ONCE(ret, "Failed to emulate MSR read: %lld\n", ret)) 434 *val = 0; 435 else 436 *val = args.r11; 437 } 438 439 u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2) 440 { 441 struct tdx_module_args args = { }; 442 443 args.r10 = control; 444 args.rdx = param1; 445 args.r8 = param2; 446 447 (void)__tdx_hypercall(&args); 448 449 return args.r11; 450 } 451 452 #else 453 static inline void hv_tdx_msr_write(u64 msr, u64 value) {} 454 static inline void hv_tdx_msr_read(u64 msr, u64 *value) {} 455 u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2) { return U64_MAX; } 456 #endif /* CONFIG_INTEL_TDX_GUEST */ 457 458 #if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) 459 void hv_ivm_msr_write(u64 msr, u64 value) 460 { 461 if (!ms_hyperv.paravisor_present) 462 return; 463 464 if (hv_isolation_type_tdx()) 465 hv_tdx_msr_write(msr, value); 466 else if (hv_isolation_type_snp()) 467 hv_ghcb_msr_write(msr, value); 468 } 469 470 void hv_ivm_msr_read(u64 msr, u64 *value) 471 { 472 if (!ms_hyperv.paravisor_present) 473 return; 474 475 if (hv_isolation_type_tdx()) 476 hv_tdx_msr_read(msr, value); 477 else if (hv_isolation_type_snp()) 478 hv_ghcb_msr_read(msr, value); 479 } 480 481 /* 482 * Keep track of the PFN regions which were shared with the host. The access 483 * must be revoked upon kexec/kdump (see hv_ivm_clear_host_access()). 484 */ 485 struct hv_enc_pfn_region { 486 struct list_head list; 487 u64 pfn; 488 int count; 489 }; 490 491 static LIST_HEAD(hv_list_enc); 492 static DEFINE_RAW_SPINLOCK(hv_list_enc_lock); 493 494 static int hv_list_enc_add(const u64 *pfn_list, int count) 495 { 496 struct hv_enc_pfn_region *ent; 497 unsigned long flags; 498 u64 pfn; 499 int i; 500 501 for (i = 0; i < count; i++) { 502 pfn = pfn_list[i]; 503 504 raw_spin_lock_irqsave(&hv_list_enc_lock, flags); 505 /* Check if the PFN already exists in some region first */ 506 list_for_each_entry(ent, &hv_list_enc, list) { 507 if ((ent->pfn <= pfn) && (ent->pfn + ent->count - 1 >= pfn)) 508 /* Nothing to do - pfn is already in the list */ 509 goto unlock_done; 510 } 511 512 /* 513 * Check if the PFN is adjacent to an existing region. Growing 514 * a region can make it adjacent to another one but merging is 515 * not (yet) implemented for simplicity. A PFN cannot be added 516 * to two regions to keep the logic in hv_list_enc_remove() 517 * correct. 518 */ 519 list_for_each_entry(ent, &hv_list_enc, list) { 520 if (ent->pfn + ent->count == pfn) { 521 /* Grow existing region up */ 522 ent->count++; 523 goto unlock_done; 524 } else if (pfn + 1 == ent->pfn) { 525 /* Grow existing region down */ 526 ent->pfn--; 527 ent->count++; 528 goto unlock_done; 529 } 530 } 531 raw_spin_unlock_irqrestore(&hv_list_enc_lock, flags); 532 533 /* No adjacent region found -- create a new one */ 534 ent = kzalloc(sizeof(struct hv_enc_pfn_region), GFP_KERNEL); 535 if (!ent) 536 return -ENOMEM; 537 538 ent->pfn = pfn; 539 ent->count = 1; 540 541 raw_spin_lock_irqsave(&hv_list_enc_lock, flags); 542 list_add(&ent->list, &hv_list_enc); 543 544 unlock_done: 545 raw_spin_unlock_irqrestore(&hv_list_enc_lock, flags); 546 } 547 548 return 0; 549 } 550 551 static int hv_list_enc_remove(const u64 *pfn_list, int count) 552 { 553 struct hv_enc_pfn_region *ent, *t; 554 struct hv_enc_pfn_region new_region; 555 unsigned long flags; 556 u64 pfn; 557 int i; 558 559 for (i = 0; i < count; i++) { 560 pfn = pfn_list[i]; 561 562 raw_spin_lock_irqsave(&hv_list_enc_lock, flags); 563 list_for_each_entry_safe(ent, t, &hv_list_enc, list) { 564 if (pfn == ent->pfn + ent->count - 1) { 565 /* Removing tail pfn */ 566 ent->count--; 567 if (!ent->count) { 568 list_del(&ent->list); 569 kfree(ent); 570 } 571 goto unlock_done; 572 } else if (pfn == ent->pfn) { 573 /* Removing head pfn */ 574 ent->count--; 575 ent->pfn++; 576 if (!ent->count) { 577 list_del(&ent->list); 578 kfree(ent); 579 } 580 goto unlock_done; 581 } else if (pfn > ent->pfn && pfn < ent->pfn + ent->count - 1) { 582 /* 583 * Removing a pfn in the middle. Cut off the tail 584 * of the existing region and create a template for 585 * the new one. 586 */ 587 new_region.pfn = pfn + 1; 588 new_region.count = ent->count - (pfn - ent->pfn + 1); 589 ent->count = pfn - ent->pfn; 590 goto unlock_split; 591 } 592 593 } 594 unlock_done: 595 raw_spin_unlock_irqrestore(&hv_list_enc_lock, flags); 596 continue; 597 598 unlock_split: 599 raw_spin_unlock_irqrestore(&hv_list_enc_lock, flags); 600 601 ent = kzalloc(sizeof(struct hv_enc_pfn_region), GFP_KERNEL); 602 if (!ent) 603 return -ENOMEM; 604 605 ent->pfn = new_region.pfn; 606 ent->count = new_region.count; 607 608 raw_spin_lock_irqsave(&hv_list_enc_lock, flags); 609 list_add(&ent->list, &hv_list_enc); 610 raw_spin_unlock_irqrestore(&hv_list_enc_lock, flags); 611 } 612 613 return 0; 614 } 615 616 /* Stop new private<->shared conversions */ 617 static void hv_vtom_kexec_begin(void) 618 { 619 if (!IS_ENABLED(CONFIG_KEXEC_CORE)) 620 return; 621 622 /* 623 * Crash kernel reaches here with interrupts disabled: can't wait for 624 * conversions to finish. 625 * 626 * If race happened, just report and proceed. 627 */ 628 if (!set_memory_enc_stop_conversion()) 629 pr_warn("Failed to stop shared<->private conversions\n"); 630 } 631 632 static void hv_vtom_kexec_finish(void) 633 { 634 struct hv_gpa_range_for_visibility *input; 635 struct hv_enc_pfn_region *ent; 636 unsigned long flags; 637 u64 hv_status; 638 int cur, i; 639 640 local_irq_save(flags); 641 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 642 643 if (unlikely(!input)) 644 goto out; 645 646 list_for_each_entry(ent, &hv_list_enc, list) { 647 for (i = 0, cur = 0; i < ent->count; i++) { 648 input->gpa_page_list[cur] = ent->pfn + i; 649 cur++; 650 651 if (cur == HV_MAX_MODIFY_GPA_REP_COUNT || i == ent->count - 1) { 652 input->partition_id = HV_PARTITION_ID_SELF; 653 input->host_visibility = VMBUS_PAGE_NOT_VISIBLE; 654 input->reserved0 = 0; 655 input->reserved1 = 0; 656 hv_status = hv_do_rep_hypercall( 657 HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, 658 cur, 0, input, NULL); 659 WARN_ON_ONCE(!hv_result_success(hv_status)); 660 cur = 0; 661 } 662 } 663 664 } 665 666 out: 667 local_irq_restore(flags); 668 } 669 670 /* 671 * hv_mark_gpa_visibility - Set pages visible to host via hvcall. 672 * 673 * In Isolation VM, all guest memory is encrypted from host and guest 674 * needs to set memory visible to host via hvcall before sharing memory 675 * with host. 676 */ 677 static int hv_mark_gpa_visibility(u16 count, const u64 pfn[], 678 enum hv_mem_host_visibility visibility) 679 { 680 struct hv_gpa_range_for_visibility *input; 681 u64 hv_status; 682 unsigned long flags; 683 int ret; 684 685 /* no-op if partition isolation is not enabled */ 686 if (!hv_is_isolation_supported()) 687 return 0; 688 689 if (count > HV_MAX_MODIFY_GPA_REP_COUNT) { 690 pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count, 691 HV_MAX_MODIFY_GPA_REP_COUNT); 692 return -EINVAL; 693 } 694 695 if (visibility == VMBUS_PAGE_NOT_VISIBLE) 696 ret = hv_list_enc_remove(pfn, count); 697 else 698 ret = hv_list_enc_add(pfn, count); 699 if (ret) 700 return ret; 701 702 local_irq_save(flags); 703 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 704 705 if (unlikely(!input)) { 706 local_irq_restore(flags); 707 return -EINVAL; 708 } 709 710 input->partition_id = HV_PARTITION_ID_SELF; 711 input->host_visibility = visibility; 712 input->reserved0 = 0; 713 input->reserved1 = 0; 714 memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn)); 715 hv_status = hv_do_rep_hypercall( 716 HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count, 717 0, input, NULL); 718 local_irq_restore(flags); 719 720 if (hv_result_success(hv_status)) 721 return 0; 722 723 if (visibility == VMBUS_PAGE_NOT_VISIBLE) 724 ret = hv_list_enc_add(pfn, count); 725 else 726 ret = hv_list_enc_remove(pfn, count); 727 /* 728 * There's no good way to recover from -ENOMEM here, the accounting is 729 * wrong either way. 730 */ 731 WARN_ON_ONCE(ret); 732 733 return -EFAULT; 734 } 735 736 /* 737 * When transitioning memory between encrypted and decrypted, the caller 738 * of set_memory_encrypted() or set_memory_decrypted() is responsible for 739 * ensuring that the memory isn't in use and isn't referenced while the 740 * transition is in progress. The transition has multiple steps, and the 741 * memory is in an inconsistent state until all steps are complete. A 742 * reference while the state is inconsistent could result in an exception 743 * that can't be cleanly fixed up. 744 * 745 * But the Linux kernel load_unaligned_zeropad() mechanism could cause a 746 * stray reference that can't be prevented by the caller, so Linux has 747 * specific code to handle this case. But when the #VC and #VE exceptions 748 * routed to a paravisor, the specific code doesn't work. To avoid this 749 * problem, mark the pages as "not present" while the transition is in 750 * progress. If load_unaligned_zeropad() causes a stray reference, a normal 751 * page fault is generated instead of #VC or #VE, and the page-fault-based 752 * handlers for load_unaligned_zeropad() resolve the reference. When the 753 * transition is complete, hv_vtom_set_host_visibility() marks the pages 754 * as "present" again. 755 */ 756 static int hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc) 757 { 758 return set_memory_np(kbuffer, pagecount); 759 } 760 761 /* 762 * hv_vtom_set_host_visibility - Set specified memory visible to host. 763 * 764 * In Isolation VM, all guest memory is encrypted from host and guest 765 * needs to set memory visible to host via hvcall before sharing memory 766 * with host. This function works as wrap of hv_mark_gpa_visibility() 767 * with memory base and size. 768 */ 769 static int hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc) 770 { 771 enum hv_mem_host_visibility visibility = enc ? 772 VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE; 773 u64 *pfn_array; 774 phys_addr_t paddr; 775 int i, pfn, err; 776 void *vaddr; 777 int ret = 0; 778 779 pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); 780 if (!pfn_array) { 781 ret = -ENOMEM; 782 goto err_set_memory_p; 783 } 784 785 for (i = 0, pfn = 0; i < pagecount; i++) { 786 /* 787 * Use slow_virt_to_phys() because the PRESENT bit has been 788 * temporarily cleared in the PTEs. slow_virt_to_phys() works 789 * without the PRESENT bit while virt_to_hvpfn() or similar 790 * does not. 791 */ 792 vaddr = (void *)kbuffer + (i * HV_HYP_PAGE_SIZE); 793 paddr = slow_virt_to_phys(vaddr); 794 pfn_array[pfn] = paddr >> HV_HYP_PAGE_SHIFT; 795 pfn++; 796 797 if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) { 798 ret = hv_mark_gpa_visibility(pfn, pfn_array, 799 visibility); 800 if (ret) 801 goto err_free_pfn_array; 802 pfn = 0; 803 } 804 } 805 806 err_free_pfn_array: 807 kfree(pfn_array); 808 809 err_set_memory_p: 810 /* 811 * Set the PTE PRESENT bits again to revert what hv_vtom_clear_present() 812 * did. Do this even if there is an error earlier in this function in 813 * order to avoid leaving the memory range in a "broken" state. Setting 814 * the PRESENT bits shouldn't fail, but return an error if it does. 815 */ 816 err = set_memory_p(kbuffer, pagecount); 817 if (err && !ret) 818 ret = err; 819 820 return ret; 821 } 822 823 static bool hv_vtom_tlb_flush_required(bool private) 824 { 825 /* 826 * Since hv_vtom_clear_present() marks the PTEs as "not present" 827 * and flushes the TLB, they can't be in the TLB. That makes the 828 * flush controlled by this function redundant, so return "false". 829 */ 830 return false; 831 } 832 833 static bool hv_vtom_cache_flush_required(void) 834 { 835 return false; 836 } 837 838 static bool hv_is_private_mmio(u64 addr) 839 { 840 /* 841 * Hyper-V always provides a single IO-APIC in a guest VM. 842 * When a paravisor is used, it is emulated by the paravisor 843 * in the guest context and must be mapped private. 844 */ 845 if (addr >= HV_IOAPIC_BASE_ADDRESS && 846 addr < (HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE)) 847 return true; 848 849 /* Same with a vTPM */ 850 if (addr >= VTPM_BASE_ADDRESS && 851 addr < (VTPM_BASE_ADDRESS + PAGE_SIZE)) 852 return true; 853 854 return false; 855 } 856 857 void __init hv_vtom_init(void) 858 { 859 enum hv_isolation_type type = hv_get_isolation_type(); 860 861 switch (type) { 862 case HV_ISOLATION_TYPE_VBS: 863 fallthrough; 864 /* 865 * By design, a VM using vTOM doesn't see the SEV setting, 866 * so SEV initialization is bypassed and sev_status isn't set. 867 * Set it here to indicate a vTOM VM. 868 * 869 * Note: if CONFIG_AMD_MEM_ENCRYPT is not set, sev_status is 870 * defined as 0ULL, to which we can't assigned a value. 871 */ 872 #ifdef CONFIG_AMD_MEM_ENCRYPT 873 case HV_ISOLATION_TYPE_SNP: 874 sev_status = MSR_AMD64_SNP_VTOM; 875 cc_vendor = CC_VENDOR_AMD; 876 break; 877 #endif 878 879 case HV_ISOLATION_TYPE_TDX: 880 cc_vendor = CC_VENDOR_INTEL; 881 break; 882 883 default: 884 panic("hv_vtom_init: unsupported isolation type %d\n", type); 885 } 886 887 cc_set_mask(ms_hyperv.shared_gpa_boundary); 888 physical_mask &= ms_hyperv.shared_gpa_boundary - 1; 889 890 x86_platform.hyper.is_private_mmio = hv_is_private_mmio; 891 x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required; 892 x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required; 893 x86_platform.guest.enc_status_change_prepare = hv_vtom_clear_present; 894 x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility; 895 x86_platform.guest.enc_kexec_begin = hv_vtom_kexec_begin; 896 x86_platform.guest.enc_kexec_finish = hv_vtom_kexec_finish; 897 898 /* Set WB as the default cache mode. */ 899 guest_force_mtrr_state(NULL, 0, MTRR_TYPE_WRBACK); 900 } 901 902 #endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */ 903 904 enum hv_isolation_type hv_get_isolation_type(void) 905 { 906 if (!(ms_hyperv.priv_high & HV_ISOLATION)) 907 return HV_ISOLATION_TYPE_NONE; 908 return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b); 909 } 910 EXPORT_SYMBOL_GPL(hv_get_isolation_type); 911 912 /* 913 * hv_is_isolation_supported - Check system runs in the Hyper-V 914 * isolation VM. 915 */ 916 bool hv_is_isolation_supported(void) 917 { 918 if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) 919 return false; 920 921 if (!hypervisor_is_type(X86_HYPER_MS_HYPERV)) 922 return false; 923 924 return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE; 925 } 926 927 DEFINE_STATIC_KEY_FALSE(isolation_type_snp); 928 929 /* 930 * hv_isolation_type_snp - Check if the system runs in an AMD SEV-SNP based 931 * isolation VM. 932 */ 933 bool hv_isolation_type_snp(void) 934 { 935 return static_branch_unlikely(&isolation_type_snp); 936 } 937 938 DEFINE_STATIC_KEY_FALSE(isolation_type_tdx); 939 /* 940 * hv_isolation_type_tdx - Check if the system runs in an Intel TDX based 941 * isolated VM. 942 */ 943 bool hv_isolation_type_tdx(void) 944 { 945 return static_branch_unlikely(&isolation_type_tdx); 946 } 947