1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021-2022 Intel Corporation */ 3 4 #undef pr_fmt 5 #define pr_fmt(fmt) "tdx: " fmt 6 7 #include <linux/cpufeature.h> 8 #include <linux/export.h> 9 #include <linux/io.h> 10 #include <asm/coco.h> 11 #include <asm/tdx.h> 12 #include <asm/vmx.h> 13 #include <asm/insn.h> 14 #include <asm/insn-eval.h> 15 #include <asm/pgtable.h> 16 17 /* MMIO direction */ 18 #define EPT_READ 0 19 #define EPT_WRITE 1 20 21 /* Port I/O direction */ 22 #define PORT_READ 0 23 #define PORT_WRITE 1 24 25 /* See Exit Qualification for I/O Instructions in VMX documentation */ 26 #define VE_IS_IO_IN(e) ((e) & BIT(3)) 27 #define VE_GET_IO_SIZE(e) (((e) & GENMASK(2, 0)) + 1) 28 #define VE_GET_PORT_NUM(e) ((e) >> 16) 29 #define VE_IS_IO_STRING(e) ((e) & BIT(4)) 30 31 #define ATTR_DEBUG BIT(0) 32 #define ATTR_SEPT_VE_DISABLE BIT(28) 33 34 /* TDX Module call error codes */ 35 #define TDCALL_RETURN_CODE(a) ((a) >> 32) 36 #define TDCALL_INVALID_OPERAND 0xc0000100 37 38 #define TDREPORT_SUBTYPE_0 0 39 40 /* Called from __tdx_hypercall() for unrecoverable failure */ 41 noinstr void __noreturn __tdx_hypercall_failed(void) 42 { 43 instrumentation_begin(); 44 panic("TDVMCALL failed. TDX module bug?"); 45 } 46 47 #ifdef CONFIG_KVM_GUEST 48 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2, 49 unsigned long p3, unsigned long p4) 50 { 51 struct tdx_module_args args = { 52 .r10 = nr, 53 .r11 = p1, 54 .r12 = p2, 55 .r13 = p3, 56 .r14 = p4, 57 }; 58 59 return __tdx_hypercall(&args); 60 } 61 EXPORT_SYMBOL_GPL(tdx_kvm_hypercall); 62 #endif 63 64 /* 65 * Used for TDX guests to make calls directly to the TD module. This 66 * should only be used for calls that have no legitimate reason to fail 67 * or where the kernel can not survive the call failing. 68 */ 69 static inline void tdcall(u64 fn, struct tdx_module_args *args) 70 { 71 if (__tdcall_ret(fn, args)) 72 panic("TDCALL %lld failed (Buggy TDX module!)\n", fn); 73 } 74 75 /** 76 * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT 77 * subtype 0) using TDG.MR.REPORT TDCALL. 78 * @reportdata: Address of the input buffer which contains user-defined 79 * REPORTDATA to be included into TDREPORT. 80 * @tdreport: Address of the output buffer to store TDREPORT. 81 * 82 * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module 83 * v1.0 specification for more information on TDG.MR.REPORT TDCALL. 84 * It is used in the TDX guest driver module to get the TDREPORT0. 85 * 86 * Return 0 on success, -EINVAL for invalid operands, or -EIO on 87 * other TDCALL failures. 88 */ 89 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport) 90 { 91 struct tdx_module_args args = { 92 .rcx = virt_to_phys(tdreport), 93 .rdx = virt_to_phys(reportdata), 94 .r8 = TDREPORT_SUBTYPE_0, 95 }; 96 u64 ret; 97 98 ret = __tdcall(TDG_MR_REPORT, &args); 99 if (ret) { 100 if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND) 101 return -EINVAL; 102 return -EIO; 103 } 104 105 return 0; 106 } 107 EXPORT_SYMBOL_GPL(tdx_mcall_get_report0); 108 109 static void __noreturn tdx_panic(const char *msg) 110 { 111 struct tdx_module_args args = { 112 .r10 = TDX_HYPERCALL_STANDARD, 113 .r11 = TDVMCALL_REPORT_FATAL_ERROR, 114 .r12 = 0, /* Error code: 0 is Panic */ 115 }; 116 union { 117 /* Define register order according to the GHCI */ 118 struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; }; 119 120 char str[64]; 121 } message; 122 123 /* VMM assumes '\0' in byte 65, if the message took all 64 bytes */ 124 strtomem_pad(message.str, msg, '\0'); 125 126 args.r8 = message.r8; 127 args.r9 = message.r9; 128 args.r14 = message.r14; 129 args.r15 = message.r15; 130 args.rdi = message.rdi; 131 args.rsi = message.rsi; 132 args.rbx = message.rbx; 133 args.rdx = message.rdx; 134 135 /* 136 * This hypercall should never return and it is not safe 137 * to keep the guest running. Call it forever if it 138 * happens to return. 139 */ 140 while (1) 141 __tdx_hypercall(&args); 142 } 143 144 static void tdx_parse_tdinfo(u64 *cc_mask) 145 { 146 struct tdx_module_args args = {}; 147 unsigned int gpa_width; 148 u64 td_attr; 149 150 /* 151 * TDINFO TDX module call is used to get the TD execution environment 152 * information like GPA width, number of available vcpus, debug mode 153 * information, etc. More details about the ABI can be found in TDX 154 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL 155 * [TDG.VP.INFO]. 156 */ 157 tdcall(TDG_VP_INFO, &args); 158 159 /* 160 * The highest bit of a guest physical address is the "sharing" bit. 161 * Set it for shared pages and clear it for private pages. 162 * 163 * The GPA width that comes out of this call is critical. TDX guests 164 * can not meaningfully run without it. 165 */ 166 gpa_width = args.rcx & GENMASK(5, 0); 167 *cc_mask = BIT_ULL(gpa_width - 1); 168 169 /* 170 * The kernel can not handle #VE's when accessing normal kernel 171 * memory. Ensure that no #VE will be delivered for accesses to 172 * TD-private memory. Only VMM-shared memory (MMIO) will #VE. 173 */ 174 td_attr = args.rdx; 175 if (!(td_attr & ATTR_SEPT_VE_DISABLE)) { 176 const char *msg = "TD misconfiguration: SEPT_VE_DISABLE attribute must be set."; 177 178 /* Relax SEPT_VE_DISABLE check for debug TD. */ 179 if (td_attr & ATTR_DEBUG) 180 pr_warn("%s\n", msg); 181 else 182 tdx_panic(msg); 183 } 184 } 185 186 /* 187 * The TDX module spec states that #VE may be injected for a limited set of 188 * reasons: 189 * 190 * - Emulation of the architectural #VE injection on EPT violation; 191 * 192 * - As a result of guest TD execution of a disallowed instruction, 193 * a disallowed MSR access, or CPUID virtualization; 194 * 195 * - A notification to the guest TD about anomalous behavior; 196 * 197 * The last one is opt-in and is not used by the kernel. 198 * 199 * The Intel Software Developer's Manual describes cases when instruction 200 * length field can be used in section "Information for VM Exits Due to 201 * Instruction Execution". 202 * 203 * For TDX, it ultimately means GET_VEINFO provides reliable instruction length 204 * information if #VE occurred due to instruction execution, but not for EPT 205 * violations. 206 */ 207 static int ve_instr_len(struct ve_info *ve) 208 { 209 switch (ve->exit_reason) { 210 case EXIT_REASON_HLT: 211 case EXIT_REASON_MSR_READ: 212 case EXIT_REASON_MSR_WRITE: 213 case EXIT_REASON_CPUID: 214 case EXIT_REASON_IO_INSTRUCTION: 215 /* It is safe to use ve->instr_len for #VE due instructions */ 216 return ve->instr_len; 217 case EXIT_REASON_EPT_VIOLATION: 218 /* 219 * For EPT violations, ve->insn_len is not defined. For those, 220 * the kernel must decode instructions manually and should not 221 * be using this function. 222 */ 223 WARN_ONCE(1, "ve->instr_len is not defined for EPT violations"); 224 return 0; 225 default: 226 WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason); 227 return ve->instr_len; 228 } 229 } 230 231 static u64 __cpuidle __halt(const bool irq_disabled) 232 { 233 struct tdx_module_args args = { 234 .r10 = TDX_HYPERCALL_STANDARD, 235 .r11 = hcall_func(EXIT_REASON_HLT), 236 .r12 = irq_disabled, 237 }; 238 239 /* 240 * Emulate HLT operation via hypercall. More info about ABI 241 * can be found in TDX Guest-Host-Communication Interface 242 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>. 243 * 244 * The VMM uses the "IRQ disabled" param to understand IRQ 245 * enabled status (RFLAGS.IF) of the TD guest and to determine 246 * whether or not it should schedule the halted vCPU if an 247 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM 248 * can keep the vCPU in virtual HLT, even if an IRQ is 249 * pending, without hanging/breaking the guest. 250 */ 251 return __tdx_hypercall(&args); 252 } 253 254 static int handle_halt(struct ve_info *ve) 255 { 256 const bool irq_disabled = irqs_disabled(); 257 258 if (__halt(irq_disabled)) 259 return -EIO; 260 261 return ve_instr_len(ve); 262 } 263 264 void __cpuidle tdx_safe_halt(void) 265 { 266 const bool irq_disabled = false; 267 268 /* 269 * Use WARN_ONCE() to report the failure. 270 */ 271 if (__halt(irq_disabled)) 272 WARN_ONCE(1, "HLT instruction emulation failed\n"); 273 } 274 275 static int read_msr(struct pt_regs *regs, struct ve_info *ve) 276 { 277 struct tdx_module_args args = { 278 .r10 = TDX_HYPERCALL_STANDARD, 279 .r11 = hcall_func(EXIT_REASON_MSR_READ), 280 .r12 = regs->cx, 281 }; 282 283 /* 284 * Emulate the MSR read via hypercall. More info about ABI 285 * can be found in TDX Guest-Host-Communication Interface 286 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>". 287 */ 288 if (__tdx_hypercall(&args)) 289 return -EIO; 290 291 regs->ax = lower_32_bits(args.r11); 292 regs->dx = upper_32_bits(args.r11); 293 return ve_instr_len(ve); 294 } 295 296 static int write_msr(struct pt_regs *regs, struct ve_info *ve) 297 { 298 struct tdx_module_args args = { 299 .r10 = TDX_HYPERCALL_STANDARD, 300 .r11 = hcall_func(EXIT_REASON_MSR_WRITE), 301 .r12 = regs->cx, 302 .r13 = (u64)regs->dx << 32 | regs->ax, 303 }; 304 305 /* 306 * Emulate the MSR write via hypercall. More info about ABI 307 * can be found in TDX Guest-Host-Communication Interface 308 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>". 309 */ 310 if (__tdx_hypercall(&args)) 311 return -EIO; 312 313 return ve_instr_len(ve); 314 } 315 316 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve) 317 { 318 struct tdx_module_args args = { 319 .r10 = TDX_HYPERCALL_STANDARD, 320 .r11 = hcall_func(EXIT_REASON_CPUID), 321 .r12 = regs->ax, 322 .r13 = regs->cx, 323 }; 324 325 /* 326 * Only allow VMM to control range reserved for hypervisor 327 * communication. 328 * 329 * Return all-zeros for any CPUID outside the range. It matches CPU 330 * behaviour for non-supported leaf. 331 */ 332 if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) { 333 regs->ax = regs->bx = regs->cx = regs->dx = 0; 334 return ve_instr_len(ve); 335 } 336 337 /* 338 * Emulate the CPUID instruction via a hypercall. More info about 339 * ABI can be found in TDX Guest-Host-Communication Interface 340 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>". 341 */ 342 if (__tdx_hypercall(&args)) 343 return -EIO; 344 345 /* 346 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of 347 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution. 348 * So copy the register contents back to pt_regs. 349 */ 350 regs->ax = args.r12; 351 regs->bx = args.r13; 352 regs->cx = args.r14; 353 regs->dx = args.r15; 354 355 return ve_instr_len(ve); 356 } 357 358 static bool mmio_read(int size, unsigned long addr, unsigned long *val) 359 { 360 struct tdx_module_args args = { 361 .r10 = TDX_HYPERCALL_STANDARD, 362 .r11 = hcall_func(EXIT_REASON_EPT_VIOLATION), 363 .r12 = size, 364 .r13 = EPT_READ, 365 .r14 = addr, 366 .r15 = *val, 367 }; 368 369 if (__tdx_hypercall(&args)) 370 return false; 371 372 *val = args.r11; 373 return true; 374 } 375 376 static bool mmio_write(int size, unsigned long addr, unsigned long val) 377 { 378 return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size, 379 EPT_WRITE, addr, val); 380 } 381 382 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve) 383 { 384 unsigned long *reg, val, vaddr; 385 char buffer[MAX_INSN_SIZE]; 386 enum insn_mmio_type mmio; 387 struct insn insn = {}; 388 int size, extend_size; 389 u8 extend_val = 0; 390 391 /* Only in-kernel MMIO is supported */ 392 if (WARN_ON_ONCE(user_mode(regs))) 393 return -EFAULT; 394 395 if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE)) 396 return -EFAULT; 397 398 if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64)) 399 return -EINVAL; 400 401 mmio = insn_decode_mmio(&insn, &size); 402 if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED)) 403 return -EINVAL; 404 405 if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) { 406 reg = insn_get_modrm_reg_ptr(&insn, regs); 407 if (!reg) 408 return -EINVAL; 409 } 410 411 /* 412 * Reject EPT violation #VEs that split pages. 413 * 414 * MMIO accesses are supposed to be naturally aligned and therefore 415 * never cross page boundaries. Seeing split page accesses indicates 416 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page. 417 * 418 * load_unaligned_zeropad() will recover using exception fixups. 419 */ 420 vaddr = (unsigned long)insn_get_addr_ref(&insn, regs); 421 if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE) 422 return -EFAULT; 423 424 /* Handle writes first */ 425 switch (mmio) { 426 case INSN_MMIO_WRITE: 427 memcpy(&val, reg, size); 428 if (!mmio_write(size, ve->gpa, val)) 429 return -EIO; 430 return insn.length; 431 case INSN_MMIO_WRITE_IMM: 432 val = insn.immediate.value; 433 if (!mmio_write(size, ve->gpa, val)) 434 return -EIO; 435 return insn.length; 436 case INSN_MMIO_READ: 437 case INSN_MMIO_READ_ZERO_EXTEND: 438 case INSN_MMIO_READ_SIGN_EXTEND: 439 /* Reads are handled below */ 440 break; 441 case INSN_MMIO_MOVS: 442 case INSN_MMIO_DECODE_FAILED: 443 /* 444 * MMIO was accessed with an instruction that could not be 445 * decoded or handled properly. It was likely not using io.h 446 * helpers or accessed MMIO accidentally. 447 */ 448 return -EINVAL; 449 default: 450 WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?"); 451 return -EINVAL; 452 } 453 454 /* Handle reads */ 455 if (!mmio_read(size, ve->gpa, &val)) 456 return -EIO; 457 458 switch (mmio) { 459 case INSN_MMIO_READ: 460 /* Zero-extend for 32-bit operation */ 461 extend_size = size == 4 ? sizeof(*reg) : 0; 462 break; 463 case INSN_MMIO_READ_ZERO_EXTEND: 464 /* Zero extend based on operand size */ 465 extend_size = insn.opnd_bytes; 466 break; 467 case INSN_MMIO_READ_SIGN_EXTEND: 468 /* Sign extend based on operand size */ 469 extend_size = insn.opnd_bytes; 470 if (size == 1 && val & BIT(7)) 471 extend_val = 0xFF; 472 else if (size > 1 && val & BIT(15)) 473 extend_val = 0xFF; 474 break; 475 default: 476 /* All other cases has to be covered with the first switch() */ 477 WARN_ON_ONCE(1); 478 return -EINVAL; 479 } 480 481 if (extend_size) 482 memset(reg, extend_val, extend_size); 483 memcpy(reg, &val, size); 484 return insn.length; 485 } 486 487 static bool handle_in(struct pt_regs *regs, int size, int port) 488 { 489 struct tdx_module_args args = { 490 .r10 = TDX_HYPERCALL_STANDARD, 491 .r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION), 492 .r12 = size, 493 .r13 = PORT_READ, 494 .r14 = port, 495 }; 496 u64 mask = GENMASK(BITS_PER_BYTE * size, 0); 497 bool success; 498 499 /* 500 * Emulate the I/O read via hypercall. More info about ABI can be found 501 * in TDX Guest-Host-Communication Interface (GHCI) section titled 502 * "TDG.VP.VMCALL<Instruction.IO>". 503 */ 504 success = !__tdx_hypercall(&args); 505 506 /* Update part of the register affected by the emulated instruction */ 507 regs->ax &= ~mask; 508 if (success) 509 regs->ax |= args.r11 & mask; 510 511 return success; 512 } 513 514 static bool handle_out(struct pt_regs *regs, int size, int port) 515 { 516 u64 mask = GENMASK(BITS_PER_BYTE * size, 0); 517 518 /* 519 * Emulate the I/O write via hypercall. More info about ABI can be found 520 * in TDX Guest-Host-Communication Interface (GHCI) section titled 521 * "TDG.VP.VMCALL<Instruction.IO>". 522 */ 523 return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size, 524 PORT_WRITE, port, regs->ax & mask); 525 } 526 527 /* 528 * Emulate I/O using hypercall. 529 * 530 * Assumes the IO instruction was using ax, which is enforced 531 * by the standard io.h macros. 532 * 533 * Return True on success or False on failure. 534 */ 535 static int handle_io(struct pt_regs *regs, struct ve_info *ve) 536 { 537 u32 exit_qual = ve->exit_qual; 538 int size, port; 539 bool in, ret; 540 541 if (VE_IS_IO_STRING(exit_qual)) 542 return -EIO; 543 544 in = VE_IS_IO_IN(exit_qual); 545 size = VE_GET_IO_SIZE(exit_qual); 546 port = VE_GET_PORT_NUM(exit_qual); 547 548 549 if (in) 550 ret = handle_in(regs, size, port); 551 else 552 ret = handle_out(regs, size, port); 553 if (!ret) 554 return -EIO; 555 556 return ve_instr_len(ve); 557 } 558 559 /* 560 * Early #VE exception handler. Only handles a subset of port I/O. 561 * Intended only for earlyprintk. If failed, return false. 562 */ 563 __init bool tdx_early_handle_ve(struct pt_regs *regs) 564 { 565 struct ve_info ve; 566 int insn_len; 567 568 tdx_get_ve_info(&ve); 569 570 if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION) 571 return false; 572 573 insn_len = handle_io(regs, &ve); 574 if (insn_len < 0) 575 return false; 576 577 regs->ip += insn_len; 578 return true; 579 } 580 581 void tdx_get_ve_info(struct ve_info *ve) 582 { 583 struct tdx_module_args args = {}; 584 585 /* 586 * Called during #VE handling to retrieve the #VE info from the 587 * TDX module. 588 * 589 * This has to be called early in #VE handling. A "nested" #VE which 590 * occurs before this will raise a #DF and is not recoverable. 591 * 592 * The call retrieves the #VE info from the TDX module, which also 593 * clears the "#VE valid" flag. This must be done before anything else 594 * because any #VE that occurs while the valid flag is set will lead to 595 * #DF. 596 * 597 * Note, the TDX module treats virtual NMIs as inhibited if the #VE 598 * valid flag is set. It means that NMI=>#VE will not result in a #DF. 599 */ 600 tdcall(TDG_VP_VEINFO_GET, &args); 601 602 /* Transfer the output parameters */ 603 ve->exit_reason = args.rcx; 604 ve->exit_qual = args.rdx; 605 ve->gla = args.r8; 606 ve->gpa = args.r9; 607 ve->instr_len = lower_32_bits(args.r10); 608 ve->instr_info = upper_32_bits(args.r10); 609 } 610 611 /* 612 * Handle the user initiated #VE. 613 * 614 * On success, returns the number of bytes RIP should be incremented (>=0) 615 * or -errno on error. 616 */ 617 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve) 618 { 619 switch (ve->exit_reason) { 620 case EXIT_REASON_CPUID: 621 return handle_cpuid(regs, ve); 622 default: 623 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason); 624 return -EIO; 625 } 626 } 627 628 static inline bool is_private_gpa(u64 gpa) 629 { 630 return gpa == cc_mkenc(gpa); 631 } 632 633 /* 634 * Handle the kernel #VE. 635 * 636 * On success, returns the number of bytes RIP should be incremented (>=0) 637 * or -errno on error. 638 */ 639 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve) 640 { 641 switch (ve->exit_reason) { 642 case EXIT_REASON_HLT: 643 return handle_halt(ve); 644 case EXIT_REASON_MSR_READ: 645 return read_msr(regs, ve); 646 case EXIT_REASON_MSR_WRITE: 647 return write_msr(regs, ve); 648 case EXIT_REASON_CPUID: 649 return handle_cpuid(regs, ve); 650 case EXIT_REASON_EPT_VIOLATION: 651 if (is_private_gpa(ve->gpa)) 652 panic("Unexpected EPT-violation on private memory."); 653 return handle_mmio(regs, ve); 654 case EXIT_REASON_IO_INSTRUCTION: 655 return handle_io(regs, ve); 656 default: 657 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason); 658 return -EIO; 659 } 660 } 661 662 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve) 663 { 664 int insn_len; 665 666 if (user_mode(regs)) 667 insn_len = virt_exception_user(regs, ve); 668 else 669 insn_len = virt_exception_kernel(regs, ve); 670 if (insn_len < 0) 671 return false; 672 673 /* After successful #VE handling, move the IP */ 674 regs->ip += insn_len; 675 676 return true; 677 } 678 679 static bool tdx_tlb_flush_required(bool private) 680 { 681 /* 682 * TDX guest is responsible for flushing TLB on private->shared 683 * transition. VMM is responsible for flushing on shared->private. 684 * 685 * The VMM _can't_ flush private addresses as it can't generate PAs 686 * with the guest's HKID. Shared memory isn't subject to integrity 687 * checking, i.e. the VMM doesn't need to flush for its own protection. 688 * 689 * There's no need to flush when converting from shared to private, 690 * as flushing is the VMM's responsibility in this case, e.g. it must 691 * flush to avoid integrity failures in the face of a buggy or 692 * malicious guest. 693 */ 694 return !private; 695 } 696 697 static bool tdx_cache_flush_required(void) 698 { 699 /* 700 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence. 701 * TDX doesn't have such capability. 702 * 703 * Flush cache unconditionally. 704 */ 705 return true; 706 } 707 708 /* 709 * Notify the VMM about page mapping conversion. More info about ABI 710 * can be found in TDX Guest-Host-Communication Interface (GHCI), 711 * section "TDG.VP.VMCALL<MapGPA>". 712 */ 713 static bool tdx_map_gpa(phys_addr_t start, phys_addr_t end, bool enc) 714 { 715 /* Retrying the hypercall a second time should succeed; use 3 just in case */ 716 const int max_retries_per_page = 3; 717 int retry_count = 0; 718 719 if (!enc) { 720 /* Set the shared (decrypted) bits: */ 721 start |= cc_mkdec(0); 722 end |= cc_mkdec(0); 723 } 724 725 while (retry_count < max_retries_per_page) { 726 struct tdx_module_args args = { 727 .r10 = TDX_HYPERCALL_STANDARD, 728 .r11 = TDVMCALL_MAP_GPA, 729 .r12 = start, 730 .r13 = end - start }; 731 732 u64 map_fail_paddr; 733 u64 ret = __tdx_hypercall(&args); 734 735 if (ret != TDVMCALL_STATUS_RETRY) 736 return !ret; 737 /* 738 * The guest must retry the operation for the pages in the 739 * region starting at the GPA specified in R11. R11 comes 740 * from the untrusted VMM. Sanity check it. 741 */ 742 map_fail_paddr = args.r11; 743 if (map_fail_paddr < start || map_fail_paddr >= end) 744 return false; 745 746 /* "Consume" a retry without forward progress */ 747 if (map_fail_paddr == start) { 748 retry_count++; 749 continue; 750 } 751 752 start = map_fail_paddr; 753 retry_count = 0; 754 } 755 756 return false; 757 } 758 759 /* 760 * Inform the VMM of the guest's intent for this physical page: shared with 761 * the VMM or private to the guest. The VMM is expected to change its mapping 762 * of the page in response. 763 */ 764 static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc) 765 { 766 phys_addr_t start = __pa(vaddr); 767 phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE); 768 769 if (!tdx_map_gpa(start, end, enc)) 770 return false; 771 772 /* shared->private conversion requires memory to be accepted before use */ 773 if (enc) 774 return tdx_accept_memory(start, end); 775 776 return true; 777 } 778 779 static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages, 780 bool enc) 781 { 782 /* 783 * Only handle shared->private conversion here. 784 * See the comment in tdx_early_init(). 785 */ 786 if (enc) 787 return tdx_enc_status_changed(vaddr, numpages, enc); 788 return true; 789 } 790 791 static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages, 792 bool enc) 793 { 794 /* 795 * Only handle private->shared conversion here. 796 * See the comment in tdx_early_init(). 797 */ 798 if (!enc) 799 return tdx_enc_status_changed(vaddr, numpages, enc); 800 return true; 801 } 802 803 void __init tdx_early_init(void) 804 { 805 struct tdx_module_args args = { 806 .rdx = TDCS_NOTIFY_ENABLES, 807 .r9 = -1ULL, 808 }; 809 u64 cc_mask; 810 u32 eax, sig[3]; 811 812 cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2], &sig[1]); 813 814 if (memcmp(TDX_IDENT, sig, sizeof(sig))) 815 return; 816 817 setup_force_cpu_cap(X86_FEATURE_TDX_GUEST); 818 819 /* TSC is the only reliable clock in TDX guest */ 820 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); 821 822 cc_vendor = CC_VENDOR_INTEL; 823 tdx_parse_tdinfo(&cc_mask); 824 cc_set_mask(cc_mask); 825 826 /* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */ 827 tdcall(TDG_VM_WR, &args); 828 829 /* 830 * All bits above GPA width are reserved and kernel treats shared bit 831 * as flag, not as part of physical address. 832 * 833 * Adjust physical mask to only cover valid GPA bits. 834 */ 835 physical_mask &= cc_mask - 1; 836 837 /* 838 * The kernel mapping should match the TDX metadata for the page. 839 * load_unaligned_zeropad() can touch memory *adjacent* to that which is 840 * owned by the caller and can catch even _momentary_ mismatches. Bad 841 * things happen on mismatch: 842 * 843 * - Private mapping => Shared Page == Guest shutdown 844 * - Shared mapping => Private Page == Recoverable #VE 845 * 846 * guest.enc_status_change_prepare() converts the page from 847 * shared=>private before the mapping becomes private. 848 * 849 * guest.enc_status_change_finish() converts the page from 850 * private=>shared after the mapping becomes private. 851 * 852 * In both cases there is a temporary shared mapping to a private page, 853 * which can result in a #VE. But, there is never a private mapping to 854 * a shared page. 855 */ 856 x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare; 857 x86_platform.guest.enc_status_change_finish = tdx_enc_status_change_finish; 858 859 x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required; 860 x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required; 861 862 /* 863 * TDX intercepts the RDMSR to read the X2APIC ID in the parallel 864 * bringup low level code. That raises #VE which cannot be handled 865 * there. 866 * 867 * Intel-TDX has a secure RDMSR hypercall, but that needs to be 868 * implemented seperately in the low level startup ASM code. 869 * Until that is in place, disable parallel bringup for TDX. 870 */ 871 x86_cpuinit.parallel_bringup = false; 872 873 pr_info("Guest detected\n"); 874 } 875