xref: /linux/arch/x86/coco/tdx/tdx.c (revision be4202228e685d580d75ac7597c0e7e50a63dd6c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2022 Intel Corporation */
3 
4 #undef pr_fmt
5 #define pr_fmt(fmt)     "tdx: " fmt
6 
7 #include <linux/cpufeature.h>
8 #include <linux/export.h>
9 #include <linux/io.h>
10 #include <linux/kexec.h>
11 #include <asm/coco.h>
12 #include <asm/tdx.h>
13 #include <asm/vmx.h>
14 #include <asm/ia32.h>
15 #include <asm/insn.h>
16 #include <asm/insn-eval.h>
17 #include <asm/pgtable.h>
18 #include <asm/set_memory.h>
19 #include <asm/traps.h>
20 
21 /* MMIO direction */
22 #define EPT_READ	0
23 #define EPT_WRITE	1
24 
25 /* Port I/O direction */
26 #define PORT_READ	0
27 #define PORT_WRITE	1
28 
29 /* See Exit Qualification for I/O Instructions in VMX documentation */
30 #define VE_IS_IO_IN(e)		((e) & BIT(3))
31 #define VE_GET_IO_SIZE(e)	(((e) & GENMASK(2, 0)) + 1)
32 #define VE_GET_PORT_NUM(e)	((e) >> 16)
33 #define VE_IS_IO_STRING(e)	((e) & BIT(4))
34 
35 #define ATTR_DEBUG		BIT(0)
36 #define ATTR_SEPT_VE_DISABLE	BIT(28)
37 
38 /* TDX Module call error codes */
39 #define TDCALL_RETURN_CODE(a)	((a) >> 32)
40 #define TDCALL_INVALID_OPERAND	0xc0000100
41 
42 #define TDREPORT_SUBTYPE_0	0
43 
44 static atomic_long_t nr_shared;
45 
46 /* Called from __tdx_hypercall() for unrecoverable failure */
__tdx_hypercall_failed(void)47 noinstr void __noreturn __tdx_hypercall_failed(void)
48 {
49 	instrumentation_begin();
50 	panic("TDVMCALL failed. TDX module bug?");
51 }
52 
53 #ifdef CONFIG_KVM_GUEST
tdx_kvm_hypercall(unsigned int nr,unsigned long p1,unsigned long p2,unsigned long p3,unsigned long p4)54 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
55 		       unsigned long p3, unsigned long p4)
56 {
57 	struct tdx_module_args args = {
58 		.r10 = nr,
59 		.r11 = p1,
60 		.r12 = p2,
61 		.r13 = p3,
62 		.r14 = p4,
63 	};
64 
65 	return __tdx_hypercall(&args);
66 }
67 EXPORT_SYMBOL_GPL(tdx_kvm_hypercall);
68 #endif
69 
70 /*
71  * Used for TDX guests to make calls directly to the TD module.  This
72  * should only be used for calls that have no legitimate reason to fail
73  * or where the kernel can not survive the call failing.
74  */
tdcall(u64 fn,struct tdx_module_args * args)75 static inline void tdcall(u64 fn, struct tdx_module_args *args)
76 {
77 	if (__tdcall_ret(fn, args))
78 		panic("TDCALL %lld failed (Buggy TDX module!)\n", fn);
79 }
80 
81 /* Read TD-scoped metadata */
tdg_vm_rd(u64 field,u64 * value)82 static inline u64 tdg_vm_rd(u64 field, u64 *value)
83 {
84 	struct tdx_module_args args = {
85 		.rdx = field,
86 	};
87 	u64 ret;
88 
89 	ret = __tdcall_ret(TDG_VM_RD, &args);
90 	*value = args.r8;
91 
92 	return ret;
93 }
94 
95 /* Write TD-scoped metadata */
tdg_vm_wr(u64 field,u64 value,u64 mask)96 static inline u64 tdg_vm_wr(u64 field, u64 value, u64 mask)
97 {
98 	struct tdx_module_args args = {
99 		.rdx = field,
100 		.r8 = value,
101 		.r9 = mask,
102 	};
103 
104 	return __tdcall(TDG_VM_WR, &args);
105 }
106 
107 /**
108  * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
109  *                           subtype 0) using TDG.MR.REPORT TDCALL.
110  * @reportdata: Address of the input buffer which contains user-defined
111  *              REPORTDATA to be included into TDREPORT.
112  * @tdreport: Address of the output buffer to store TDREPORT.
113  *
114  * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module
115  * v1.0 specification for more information on TDG.MR.REPORT TDCALL.
116  * It is used in the TDX guest driver module to get the TDREPORT0.
117  *
118  * Return 0 on success, -EINVAL for invalid operands, or -EIO on
119  * other TDCALL failures.
120  */
tdx_mcall_get_report0(u8 * reportdata,u8 * tdreport)121 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
122 {
123 	struct tdx_module_args args = {
124 		.rcx = virt_to_phys(tdreport),
125 		.rdx = virt_to_phys(reportdata),
126 		.r8 = TDREPORT_SUBTYPE_0,
127 	};
128 	u64 ret;
129 
130 	ret = __tdcall(TDG_MR_REPORT, &args);
131 	if (ret) {
132 		if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND)
133 			return -EINVAL;
134 		return -EIO;
135 	}
136 
137 	return 0;
138 }
139 EXPORT_SYMBOL_GPL(tdx_mcall_get_report0);
140 
141 /**
142  * tdx_hcall_get_quote() - Wrapper to request TD Quote using GetQuote
143  *                         hypercall.
144  * @buf: Address of the directly mapped shared kernel buffer which
145  *       contains TDREPORT. The same buffer will be used by VMM to
146  *       store the generated TD Quote output.
147  * @size: size of the tdquote buffer (4KB-aligned).
148  *
149  * Refer to section titled "TDG.VP.VMCALL<GetQuote>" in the TDX GHCI
150  * v1.0 specification for more information on GetQuote hypercall.
151  * It is used in the TDX guest driver module to get the TD Quote.
152  *
153  * Return 0 on success or error code on failure.
154  */
tdx_hcall_get_quote(u8 * buf,size_t size)155 u64 tdx_hcall_get_quote(u8 *buf, size_t size)
156 {
157 	/* Since buf is a shared memory, set the shared (decrypted) bits */
158 	return _tdx_hypercall(TDVMCALL_GET_QUOTE, cc_mkdec(virt_to_phys(buf)), size, 0, 0);
159 }
160 EXPORT_SYMBOL_GPL(tdx_hcall_get_quote);
161 
tdx_panic(const char * msg)162 static void __noreturn tdx_panic(const char *msg)
163 {
164 	struct tdx_module_args args = {
165 		.r10 = TDX_HYPERCALL_STANDARD,
166 		.r11 = TDVMCALL_REPORT_FATAL_ERROR,
167 		.r12 = 0, /* Error code: 0 is Panic */
168 	};
169 	union {
170 		/* Define register order according to the GHCI */
171 		struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; };
172 
173 		char str[64];
174 	} message;
175 
176 	/* VMM assumes '\0' in byte 65, if the message took all 64 bytes */
177 	strtomem_pad(message.str, msg, '\0');
178 
179 	args.r8  = message.r8;
180 	args.r9  = message.r9;
181 	args.r14 = message.r14;
182 	args.r15 = message.r15;
183 	args.rdi = message.rdi;
184 	args.rsi = message.rsi;
185 	args.rbx = message.rbx;
186 	args.rdx = message.rdx;
187 
188 	/*
189 	 * This hypercall should never return and it is not safe
190 	 * to keep the guest running. Call it forever if it
191 	 * happens to return.
192 	 */
193 	while (1)
194 		__tdx_hypercall(&args);
195 }
196 
197 /*
198  * The kernel cannot handle #VEs when accessing normal kernel memory. Ensure
199  * that no #VE will be delivered for accesses to TD-private memory.
200  *
201  * TDX 1.0 does not allow the guest to disable SEPT #VE on its own. The VMM
202  * controls if the guest will receive such #VE with TD attribute
203  * ATTR_SEPT_VE_DISABLE.
204  *
205  * Newer TDX modules allow the guest to control if it wants to receive SEPT
206  * violation #VEs.
207  *
208  * Check if the feature is available and disable SEPT #VE if possible.
209  *
210  * If the TD is allowed to disable/enable SEPT #VEs, the ATTR_SEPT_VE_DISABLE
211  * attribute is no longer reliable. It reflects the initial state of the
212  * control for the TD, but it will not be updated if someone (e.g. bootloader)
213  * changes it before the kernel starts. Kernel must check TDCS_TD_CTLS bit to
214  * determine if SEPT #VEs are enabled or disabled.
215  */
disable_sept_ve(u64 td_attr)216 static void disable_sept_ve(u64 td_attr)
217 {
218 	const char *msg = "TD misconfiguration: SEPT #VE has to be disabled";
219 	bool debug = td_attr & ATTR_DEBUG;
220 	u64 config, controls;
221 
222 	/* Is this TD allowed to disable SEPT #VE */
223 	tdg_vm_rd(TDCS_CONFIG_FLAGS, &config);
224 	if (!(config & TDCS_CONFIG_FLEXIBLE_PENDING_VE)) {
225 		/* No SEPT #VE controls for the guest: check the attribute */
226 		if (td_attr & ATTR_SEPT_VE_DISABLE)
227 			return;
228 
229 		/* Relax SEPT_VE_DISABLE check for debug TD for backtraces */
230 		if (debug)
231 			pr_warn("%s\n", msg);
232 		else
233 			tdx_panic(msg);
234 		return;
235 	}
236 
237 	/* Check if SEPT #VE has been disabled before us */
238 	tdg_vm_rd(TDCS_TD_CTLS, &controls);
239 	if (controls & TD_CTLS_PENDING_VE_DISABLE)
240 		return;
241 
242 	/* Keep #VEs enabled for splats in debugging environments */
243 	if (debug)
244 		return;
245 
246 	/* Disable SEPT #VEs */
247 	tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_PENDING_VE_DISABLE,
248 		  TD_CTLS_PENDING_VE_DISABLE);
249 }
250 
251 /*
252  * TDX 1.0 generates a #VE when accessing topology-related CPUID leafs (0xB and
253  * 0x1F) and the X2APIC_APICID MSR. The kernel returns all zeros on CPUID #VEs.
254  * In practice, this means that the kernel can only boot with a plain topology.
255  * Any complications will cause problems.
256  *
257  * The ENUM_TOPOLOGY feature allows the VMM to provide topology information.
258  * Enabling the feature  eliminates topology-related #VEs: the TDX module
259  * virtualizes accesses to the CPUID leafs and the MSR.
260  *
261  * Enable ENUM_TOPOLOGY if it is available.
262  */
enable_cpu_topology_enumeration(void)263 static void enable_cpu_topology_enumeration(void)
264 {
265 	u64 configured;
266 
267 	/* Has the VMM provided a valid topology configuration? */
268 	tdg_vm_rd(TDCS_TOPOLOGY_ENUM_CONFIGURED, &configured);
269 	if (!configured) {
270 		pr_err("VMM did not configure X2APIC_IDs properly\n");
271 		return;
272 	}
273 
274 	tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_ENUM_TOPOLOGY, TD_CTLS_ENUM_TOPOLOGY);
275 }
276 
tdx_setup(u64 * cc_mask)277 static void tdx_setup(u64 *cc_mask)
278 {
279 	struct tdx_module_args args = {};
280 	unsigned int gpa_width;
281 	u64 td_attr;
282 
283 	/*
284 	 * TDINFO TDX module call is used to get the TD execution environment
285 	 * information like GPA width, number of available vcpus, debug mode
286 	 * information, etc. More details about the ABI can be found in TDX
287 	 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL
288 	 * [TDG.VP.INFO].
289 	 */
290 	tdcall(TDG_VP_INFO, &args);
291 
292 	/*
293 	 * The highest bit of a guest physical address is the "sharing" bit.
294 	 * Set it for shared pages and clear it for private pages.
295 	 *
296 	 * The GPA width that comes out of this call is critical. TDX guests
297 	 * can not meaningfully run without it.
298 	 */
299 	gpa_width = args.rcx & GENMASK(5, 0);
300 	*cc_mask = BIT_ULL(gpa_width - 1);
301 
302 	td_attr = args.rdx;
303 
304 	/* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */
305 	tdg_vm_wr(TDCS_NOTIFY_ENABLES, 0, -1ULL);
306 
307 	disable_sept_ve(td_attr);
308 	enable_cpu_topology_enumeration();
309 }
310 
311 /*
312  * The TDX module spec states that #VE may be injected for a limited set of
313  * reasons:
314  *
315  *  - Emulation of the architectural #VE injection on EPT violation;
316  *
317  *  - As a result of guest TD execution of a disallowed instruction,
318  *    a disallowed MSR access, or CPUID virtualization;
319  *
320  *  - A notification to the guest TD about anomalous behavior;
321  *
322  * The last one is opt-in and is not used by the kernel.
323  *
324  * The Intel Software Developer's Manual describes cases when instruction
325  * length field can be used in section "Information for VM Exits Due to
326  * Instruction Execution".
327  *
328  * For TDX, it ultimately means GET_VEINFO provides reliable instruction length
329  * information if #VE occurred due to instruction execution, but not for EPT
330  * violations.
331  */
ve_instr_len(struct ve_info * ve)332 static int ve_instr_len(struct ve_info *ve)
333 {
334 	switch (ve->exit_reason) {
335 	case EXIT_REASON_HLT:
336 	case EXIT_REASON_MSR_READ:
337 	case EXIT_REASON_MSR_WRITE:
338 	case EXIT_REASON_CPUID:
339 	case EXIT_REASON_IO_INSTRUCTION:
340 		/* It is safe to use ve->instr_len for #VE due instructions */
341 		return ve->instr_len;
342 	case EXIT_REASON_EPT_VIOLATION:
343 		/*
344 		 * For EPT violations, ve->insn_len is not defined. For those,
345 		 * the kernel must decode instructions manually and should not
346 		 * be using this function.
347 		 */
348 		WARN_ONCE(1, "ve->instr_len is not defined for EPT violations");
349 		return 0;
350 	default:
351 		WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason);
352 		return ve->instr_len;
353 	}
354 }
355 
__halt(const bool irq_disabled)356 static u64 __cpuidle __halt(const bool irq_disabled)
357 {
358 	struct tdx_module_args args = {
359 		.r10 = TDX_HYPERCALL_STANDARD,
360 		.r11 = hcall_func(EXIT_REASON_HLT),
361 		.r12 = irq_disabled,
362 	};
363 
364 	/*
365 	 * Emulate HLT operation via hypercall. More info about ABI
366 	 * can be found in TDX Guest-Host-Communication Interface
367 	 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>.
368 	 *
369 	 * The VMM uses the "IRQ disabled" param to understand IRQ
370 	 * enabled status (RFLAGS.IF) of the TD guest and to determine
371 	 * whether or not it should schedule the halted vCPU if an
372 	 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM
373 	 * can keep the vCPU in virtual HLT, even if an IRQ is
374 	 * pending, without hanging/breaking the guest.
375 	 */
376 	return __tdx_hypercall(&args);
377 }
378 
handle_halt(struct ve_info * ve)379 static int handle_halt(struct ve_info *ve)
380 {
381 	const bool irq_disabled = irqs_disabled();
382 
383 	if (__halt(irq_disabled))
384 		return -EIO;
385 
386 	return ve_instr_len(ve);
387 }
388 
tdx_safe_halt(void)389 void __cpuidle tdx_safe_halt(void)
390 {
391 	const bool irq_disabled = false;
392 
393 	/*
394 	 * Use WARN_ONCE() to report the failure.
395 	 */
396 	if (__halt(irq_disabled))
397 		WARN_ONCE(1, "HLT instruction emulation failed\n");
398 }
399 
read_msr(struct pt_regs * regs,struct ve_info * ve)400 static int read_msr(struct pt_regs *regs, struct ve_info *ve)
401 {
402 	struct tdx_module_args args = {
403 		.r10 = TDX_HYPERCALL_STANDARD,
404 		.r11 = hcall_func(EXIT_REASON_MSR_READ),
405 		.r12 = regs->cx,
406 	};
407 
408 	/*
409 	 * Emulate the MSR read via hypercall. More info about ABI
410 	 * can be found in TDX Guest-Host-Communication Interface
411 	 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>".
412 	 */
413 	if (__tdx_hypercall(&args))
414 		return -EIO;
415 
416 	regs->ax = lower_32_bits(args.r11);
417 	regs->dx = upper_32_bits(args.r11);
418 	return ve_instr_len(ve);
419 }
420 
write_msr(struct pt_regs * regs,struct ve_info * ve)421 static int write_msr(struct pt_regs *regs, struct ve_info *ve)
422 {
423 	struct tdx_module_args args = {
424 		.r10 = TDX_HYPERCALL_STANDARD,
425 		.r11 = hcall_func(EXIT_REASON_MSR_WRITE),
426 		.r12 = regs->cx,
427 		.r13 = (u64)regs->dx << 32 | regs->ax,
428 	};
429 
430 	/*
431 	 * Emulate the MSR write via hypercall. More info about ABI
432 	 * can be found in TDX Guest-Host-Communication Interface
433 	 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>".
434 	 */
435 	if (__tdx_hypercall(&args))
436 		return -EIO;
437 
438 	return ve_instr_len(ve);
439 }
440 
handle_cpuid(struct pt_regs * regs,struct ve_info * ve)441 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve)
442 {
443 	struct tdx_module_args args = {
444 		.r10 = TDX_HYPERCALL_STANDARD,
445 		.r11 = hcall_func(EXIT_REASON_CPUID),
446 		.r12 = regs->ax,
447 		.r13 = regs->cx,
448 	};
449 
450 	/*
451 	 * Only allow VMM to control range reserved for hypervisor
452 	 * communication.
453 	 *
454 	 * Return all-zeros for any CPUID outside the range. It matches CPU
455 	 * behaviour for non-supported leaf.
456 	 */
457 	if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) {
458 		regs->ax = regs->bx = regs->cx = regs->dx = 0;
459 		return ve_instr_len(ve);
460 	}
461 
462 	/*
463 	 * Emulate the CPUID instruction via a hypercall. More info about
464 	 * ABI can be found in TDX Guest-Host-Communication Interface
465 	 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>".
466 	 */
467 	if (__tdx_hypercall(&args))
468 		return -EIO;
469 
470 	/*
471 	 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of
472 	 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution.
473 	 * So copy the register contents back to pt_regs.
474 	 */
475 	regs->ax = args.r12;
476 	regs->bx = args.r13;
477 	regs->cx = args.r14;
478 	regs->dx = args.r15;
479 
480 	return ve_instr_len(ve);
481 }
482 
mmio_read(int size,unsigned long addr,unsigned long * val)483 static bool mmio_read(int size, unsigned long addr, unsigned long *val)
484 {
485 	struct tdx_module_args args = {
486 		.r10 = TDX_HYPERCALL_STANDARD,
487 		.r11 = hcall_func(EXIT_REASON_EPT_VIOLATION),
488 		.r12 = size,
489 		.r13 = EPT_READ,
490 		.r14 = addr,
491 	};
492 
493 	if (__tdx_hypercall(&args))
494 		return false;
495 
496 	*val = args.r11;
497 	return true;
498 }
499 
mmio_write(int size,unsigned long addr,unsigned long val)500 static bool mmio_write(int size, unsigned long addr, unsigned long val)
501 {
502 	return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size,
503 			       EPT_WRITE, addr, val);
504 }
505 
handle_mmio(struct pt_regs * regs,struct ve_info * ve)506 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
507 {
508 	unsigned long *reg, val, vaddr;
509 	char buffer[MAX_INSN_SIZE];
510 	enum insn_mmio_type mmio;
511 	struct insn insn = {};
512 	int size, extend_size;
513 	u8 extend_val = 0;
514 
515 	/* Only in-kernel MMIO is supported */
516 	if (WARN_ON_ONCE(user_mode(regs)))
517 		return -EFAULT;
518 
519 	if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE))
520 		return -EFAULT;
521 
522 	if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64))
523 		return -EINVAL;
524 
525 	mmio = insn_decode_mmio(&insn, &size);
526 	if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED))
527 		return -EINVAL;
528 
529 	if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
530 		reg = insn_get_modrm_reg_ptr(&insn, regs);
531 		if (!reg)
532 			return -EINVAL;
533 	}
534 
535 	if (!fault_in_kernel_space(ve->gla)) {
536 		WARN_ONCE(1, "Access to userspace address is not supported");
537 		return -EINVAL;
538 	}
539 
540 	/*
541 	 * Reject EPT violation #VEs that split pages.
542 	 *
543 	 * MMIO accesses are supposed to be naturally aligned and therefore
544 	 * never cross page boundaries. Seeing split page accesses indicates
545 	 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page.
546 	 *
547 	 * load_unaligned_zeropad() will recover using exception fixups.
548 	 */
549 	vaddr = (unsigned long)insn_get_addr_ref(&insn, regs);
550 	if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
551 		return -EFAULT;
552 
553 	/* Handle writes first */
554 	switch (mmio) {
555 	case INSN_MMIO_WRITE:
556 		memcpy(&val, reg, size);
557 		if (!mmio_write(size, ve->gpa, val))
558 			return -EIO;
559 		return insn.length;
560 	case INSN_MMIO_WRITE_IMM:
561 		val = insn.immediate.value;
562 		if (!mmio_write(size, ve->gpa, val))
563 			return -EIO;
564 		return insn.length;
565 	case INSN_MMIO_READ:
566 	case INSN_MMIO_READ_ZERO_EXTEND:
567 	case INSN_MMIO_READ_SIGN_EXTEND:
568 		/* Reads are handled below */
569 		break;
570 	case INSN_MMIO_MOVS:
571 	case INSN_MMIO_DECODE_FAILED:
572 		/*
573 		 * MMIO was accessed with an instruction that could not be
574 		 * decoded or handled properly. It was likely not using io.h
575 		 * helpers or accessed MMIO accidentally.
576 		 */
577 		return -EINVAL;
578 	default:
579 		WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?");
580 		return -EINVAL;
581 	}
582 
583 	/* Handle reads */
584 	if (!mmio_read(size, ve->gpa, &val))
585 		return -EIO;
586 
587 	switch (mmio) {
588 	case INSN_MMIO_READ:
589 		/* Zero-extend for 32-bit operation */
590 		extend_size = size == 4 ? sizeof(*reg) : 0;
591 		break;
592 	case INSN_MMIO_READ_ZERO_EXTEND:
593 		/* Zero extend based on operand size */
594 		extend_size = insn.opnd_bytes;
595 		break;
596 	case INSN_MMIO_READ_SIGN_EXTEND:
597 		/* Sign extend based on operand size */
598 		extend_size = insn.opnd_bytes;
599 		if (size == 1 && val & BIT(7))
600 			extend_val = 0xFF;
601 		else if (size > 1 && val & BIT(15))
602 			extend_val = 0xFF;
603 		break;
604 	default:
605 		/* All other cases has to be covered with the first switch() */
606 		WARN_ON_ONCE(1);
607 		return -EINVAL;
608 	}
609 
610 	if (extend_size)
611 		memset(reg, extend_val, extend_size);
612 	memcpy(reg, &val, size);
613 	return insn.length;
614 }
615 
handle_in(struct pt_regs * regs,int size,int port)616 static bool handle_in(struct pt_regs *regs, int size, int port)
617 {
618 	struct tdx_module_args args = {
619 		.r10 = TDX_HYPERCALL_STANDARD,
620 		.r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
621 		.r12 = size,
622 		.r13 = PORT_READ,
623 		.r14 = port,
624 	};
625 	u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
626 	bool success;
627 
628 	/*
629 	 * Emulate the I/O read via hypercall. More info about ABI can be found
630 	 * in TDX Guest-Host-Communication Interface (GHCI) section titled
631 	 * "TDG.VP.VMCALL<Instruction.IO>".
632 	 */
633 	success = !__tdx_hypercall(&args);
634 
635 	/* Update part of the register affected by the emulated instruction */
636 	regs->ax &= ~mask;
637 	if (success)
638 		regs->ax |= args.r11 & mask;
639 
640 	return success;
641 }
642 
handle_out(struct pt_regs * regs,int size,int port)643 static bool handle_out(struct pt_regs *regs, int size, int port)
644 {
645 	u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
646 
647 	/*
648 	 * Emulate the I/O write via hypercall. More info about ABI can be found
649 	 * in TDX Guest-Host-Communication Interface (GHCI) section titled
650 	 * "TDG.VP.VMCALL<Instruction.IO>".
651 	 */
652 	return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size,
653 			       PORT_WRITE, port, regs->ax & mask);
654 }
655 
656 /*
657  * Emulate I/O using hypercall.
658  *
659  * Assumes the IO instruction was using ax, which is enforced
660  * by the standard io.h macros.
661  *
662  * Return True on success or False on failure.
663  */
handle_io(struct pt_regs * regs,struct ve_info * ve)664 static int handle_io(struct pt_regs *regs, struct ve_info *ve)
665 {
666 	u32 exit_qual = ve->exit_qual;
667 	int size, port;
668 	bool in, ret;
669 
670 	if (VE_IS_IO_STRING(exit_qual))
671 		return -EIO;
672 
673 	in   = VE_IS_IO_IN(exit_qual);
674 	size = VE_GET_IO_SIZE(exit_qual);
675 	port = VE_GET_PORT_NUM(exit_qual);
676 
677 
678 	if (in)
679 		ret = handle_in(regs, size, port);
680 	else
681 		ret = handle_out(regs, size, port);
682 	if (!ret)
683 		return -EIO;
684 
685 	return ve_instr_len(ve);
686 }
687 
688 /*
689  * Early #VE exception handler. Only handles a subset of port I/O.
690  * Intended only for earlyprintk. If failed, return false.
691  */
tdx_early_handle_ve(struct pt_regs * regs)692 __init bool tdx_early_handle_ve(struct pt_regs *regs)
693 {
694 	struct ve_info ve;
695 	int insn_len;
696 
697 	tdx_get_ve_info(&ve);
698 
699 	if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION)
700 		return false;
701 
702 	insn_len = handle_io(regs, &ve);
703 	if (insn_len < 0)
704 		return false;
705 
706 	regs->ip += insn_len;
707 	return true;
708 }
709 
tdx_get_ve_info(struct ve_info * ve)710 void tdx_get_ve_info(struct ve_info *ve)
711 {
712 	struct tdx_module_args args = {};
713 
714 	/*
715 	 * Called during #VE handling to retrieve the #VE info from the
716 	 * TDX module.
717 	 *
718 	 * This has to be called early in #VE handling.  A "nested" #VE which
719 	 * occurs before this will raise a #DF and is not recoverable.
720 	 *
721 	 * The call retrieves the #VE info from the TDX module, which also
722 	 * clears the "#VE valid" flag. This must be done before anything else
723 	 * because any #VE that occurs while the valid flag is set will lead to
724 	 * #DF.
725 	 *
726 	 * Note, the TDX module treats virtual NMIs as inhibited if the #VE
727 	 * valid flag is set. It means that NMI=>#VE will not result in a #DF.
728 	 */
729 	tdcall(TDG_VP_VEINFO_GET, &args);
730 
731 	/* Transfer the output parameters */
732 	ve->exit_reason = args.rcx;
733 	ve->exit_qual   = args.rdx;
734 	ve->gla         = args.r8;
735 	ve->gpa         = args.r9;
736 	ve->instr_len   = lower_32_bits(args.r10);
737 	ve->instr_info  = upper_32_bits(args.r10);
738 }
739 
740 /*
741  * Handle the user initiated #VE.
742  *
743  * On success, returns the number of bytes RIP should be incremented (>=0)
744  * or -errno on error.
745  */
virt_exception_user(struct pt_regs * regs,struct ve_info * ve)746 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve)
747 {
748 	switch (ve->exit_reason) {
749 	case EXIT_REASON_CPUID:
750 		return handle_cpuid(regs, ve);
751 	default:
752 		pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
753 		return -EIO;
754 	}
755 }
756 
is_private_gpa(u64 gpa)757 static inline bool is_private_gpa(u64 gpa)
758 {
759 	return gpa == cc_mkenc(gpa);
760 }
761 
762 /*
763  * Handle the kernel #VE.
764  *
765  * On success, returns the number of bytes RIP should be incremented (>=0)
766  * or -errno on error.
767  */
virt_exception_kernel(struct pt_regs * regs,struct ve_info * ve)768 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve)
769 {
770 	switch (ve->exit_reason) {
771 	case EXIT_REASON_HLT:
772 		return handle_halt(ve);
773 	case EXIT_REASON_MSR_READ:
774 		return read_msr(regs, ve);
775 	case EXIT_REASON_MSR_WRITE:
776 		return write_msr(regs, ve);
777 	case EXIT_REASON_CPUID:
778 		return handle_cpuid(regs, ve);
779 	case EXIT_REASON_EPT_VIOLATION:
780 		if (is_private_gpa(ve->gpa))
781 			panic("Unexpected EPT-violation on private memory.");
782 		return handle_mmio(regs, ve);
783 	case EXIT_REASON_IO_INSTRUCTION:
784 		return handle_io(regs, ve);
785 	default:
786 		pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
787 		return -EIO;
788 	}
789 }
790 
tdx_handle_virt_exception(struct pt_regs * regs,struct ve_info * ve)791 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve)
792 {
793 	int insn_len;
794 
795 	if (user_mode(regs))
796 		insn_len = virt_exception_user(regs, ve);
797 	else
798 		insn_len = virt_exception_kernel(regs, ve);
799 	if (insn_len < 0)
800 		return false;
801 
802 	/* After successful #VE handling, move the IP */
803 	regs->ip += insn_len;
804 
805 	return true;
806 }
807 
tdx_tlb_flush_required(bool private)808 static bool tdx_tlb_flush_required(bool private)
809 {
810 	/*
811 	 * TDX guest is responsible for flushing TLB on private->shared
812 	 * transition. VMM is responsible for flushing on shared->private.
813 	 *
814 	 * The VMM _can't_ flush private addresses as it can't generate PAs
815 	 * with the guest's HKID.  Shared memory isn't subject to integrity
816 	 * checking, i.e. the VMM doesn't need to flush for its own protection.
817 	 *
818 	 * There's no need to flush when converting from shared to private,
819 	 * as flushing is the VMM's responsibility in this case, e.g. it must
820 	 * flush to avoid integrity failures in the face of a buggy or
821 	 * malicious guest.
822 	 */
823 	return !private;
824 }
825 
tdx_cache_flush_required(void)826 static bool tdx_cache_flush_required(void)
827 {
828 	/*
829 	 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence.
830 	 * TDX doesn't have such capability.
831 	 *
832 	 * Flush cache unconditionally.
833 	 */
834 	return true;
835 }
836 
837 /*
838  * Notify the VMM about page mapping conversion. More info about ABI
839  * can be found in TDX Guest-Host-Communication Interface (GHCI),
840  * section "TDG.VP.VMCALL<MapGPA>".
841  */
tdx_map_gpa(phys_addr_t start,phys_addr_t end,bool enc)842 static bool tdx_map_gpa(phys_addr_t start, phys_addr_t end, bool enc)
843 {
844 	/* Retrying the hypercall a second time should succeed; use 3 just in case */
845 	const int max_retries_per_page = 3;
846 	int retry_count = 0;
847 
848 	if (!enc) {
849 		/* Set the shared (decrypted) bits: */
850 		start |= cc_mkdec(0);
851 		end   |= cc_mkdec(0);
852 	}
853 
854 	while (retry_count < max_retries_per_page) {
855 		struct tdx_module_args args = {
856 			.r10 = TDX_HYPERCALL_STANDARD,
857 			.r11 = TDVMCALL_MAP_GPA,
858 			.r12 = start,
859 			.r13 = end - start };
860 
861 		u64 map_fail_paddr;
862 		u64 ret = __tdx_hypercall(&args);
863 
864 		if (ret != TDVMCALL_STATUS_RETRY)
865 			return !ret;
866 		/*
867 		 * The guest must retry the operation for the pages in the
868 		 * region starting at the GPA specified in R11. R11 comes
869 		 * from the untrusted VMM. Sanity check it.
870 		 */
871 		map_fail_paddr = args.r11;
872 		if (map_fail_paddr < start || map_fail_paddr >= end)
873 			return false;
874 
875 		/* "Consume" a retry without forward progress */
876 		if (map_fail_paddr == start) {
877 			retry_count++;
878 			continue;
879 		}
880 
881 		start = map_fail_paddr;
882 		retry_count = 0;
883 	}
884 
885 	return false;
886 }
887 
888 /*
889  * Inform the VMM of the guest's intent for this physical page: shared with
890  * the VMM or private to the guest.  The VMM is expected to change its mapping
891  * of the page in response.
892  */
tdx_enc_status_changed(unsigned long vaddr,int numpages,bool enc)893 static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
894 {
895 	phys_addr_t start = __pa(vaddr);
896 	phys_addr_t end   = __pa(vaddr + numpages * PAGE_SIZE);
897 
898 	if (!tdx_map_gpa(start, end, enc))
899 		return false;
900 
901 	/* shared->private conversion requires memory to be accepted before use */
902 	if (enc)
903 		return tdx_accept_memory(start, end);
904 
905 	return true;
906 }
907 
tdx_enc_status_change_prepare(unsigned long vaddr,int numpages,bool enc)908 static int tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
909 					 bool enc)
910 {
911 	/*
912 	 * Only handle shared->private conversion here.
913 	 * See the comment in tdx_early_init().
914 	 */
915 	if (enc && !tdx_enc_status_changed(vaddr, numpages, enc))
916 		return -EIO;
917 
918 	return 0;
919 }
920 
tdx_enc_status_change_finish(unsigned long vaddr,int numpages,bool enc)921 static int tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
922 					 bool enc)
923 {
924 	/*
925 	 * Only handle private->shared conversion here.
926 	 * See the comment in tdx_early_init().
927 	 */
928 	if (!enc && !tdx_enc_status_changed(vaddr, numpages, enc))
929 		return -EIO;
930 
931 	if (enc)
932 		atomic_long_sub(numpages, &nr_shared);
933 	else
934 		atomic_long_add(numpages, &nr_shared);
935 
936 	return 0;
937 }
938 
939 /* Stop new private<->shared conversions */
tdx_kexec_begin(void)940 static void tdx_kexec_begin(void)
941 {
942 	if (!IS_ENABLED(CONFIG_KEXEC_CORE))
943 		return;
944 
945 	/*
946 	 * Crash kernel reaches here with interrupts disabled: can't wait for
947 	 * conversions to finish.
948 	 *
949 	 * If race happened, just report and proceed.
950 	 */
951 	if (!set_memory_enc_stop_conversion())
952 		pr_warn("Failed to stop shared<->private conversions\n");
953 }
954 
955 /* Walk direct mapping and convert all shared memory back to private */
tdx_kexec_finish(void)956 static void tdx_kexec_finish(void)
957 {
958 	unsigned long addr, end;
959 	long found = 0, shared;
960 
961 	if (!IS_ENABLED(CONFIG_KEXEC_CORE))
962 		return;
963 
964 	lockdep_assert_irqs_disabled();
965 
966 	addr = PAGE_OFFSET;
967 	end  = PAGE_OFFSET + get_max_mapped();
968 
969 	while (addr < end) {
970 		unsigned long size;
971 		unsigned int level;
972 		pte_t *pte;
973 
974 		pte = lookup_address(addr, &level);
975 		size = page_level_size(level);
976 
977 		if (pte && pte_decrypted(*pte)) {
978 			int pages = size / PAGE_SIZE;
979 
980 			/*
981 			 * Touching memory with shared bit set triggers implicit
982 			 * conversion to shared.
983 			 *
984 			 * Make sure nobody touches the shared range from
985 			 * now on.
986 			 */
987 			set_pte(pte, __pte(0));
988 
989 			/*
990 			 * Memory encryption state persists across kexec.
991 			 * If tdx_enc_status_changed() fails in the first
992 			 * kernel, it leaves memory in an unknown state.
993 			 *
994 			 * If that memory remains shared, accessing it in the
995 			 * *next* kernel through a private mapping will result
996 			 * in an unrecoverable guest shutdown.
997 			 *
998 			 * The kdump kernel boot is not impacted as it uses
999 			 * a pre-reserved memory range that is always private.
1000 			 * However, gathering crash information could lead to
1001 			 * a crash if it accesses unconverted memory through
1002 			 * a private mapping which is possible when accessing
1003 			 * that memory through /proc/vmcore, for example.
1004 			 *
1005 			 * In all cases, print error info in order to leave
1006 			 * enough bread crumbs for debugging.
1007 			 */
1008 			if (!tdx_enc_status_changed(addr, pages, true)) {
1009 				pr_err("Failed to unshare range %#lx-%#lx\n",
1010 				       addr, addr + size);
1011 			}
1012 
1013 			found += pages;
1014 		}
1015 
1016 		addr += size;
1017 	}
1018 
1019 	__flush_tlb_all();
1020 
1021 	shared = atomic_long_read(&nr_shared);
1022 	if (shared != found) {
1023 		pr_err("shared page accounting is off\n");
1024 		pr_err("nr_shared = %ld, nr_found = %ld\n", shared, found);
1025 	}
1026 }
1027 
tdx_early_init(void)1028 void __init tdx_early_init(void)
1029 {
1030 	u64 cc_mask;
1031 	u32 eax, sig[3];
1032 
1033 	cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2],  &sig[1]);
1034 
1035 	if (memcmp(TDX_IDENT, sig, sizeof(sig)))
1036 		return;
1037 
1038 	setup_force_cpu_cap(X86_FEATURE_TDX_GUEST);
1039 
1040 	/* TSC is the only reliable clock in TDX guest */
1041 	setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
1042 
1043 	cc_vendor = CC_VENDOR_INTEL;
1044 
1045 	/* Configure the TD */
1046 	tdx_setup(&cc_mask);
1047 
1048 	cc_set_mask(cc_mask);
1049 
1050 	/*
1051 	 * All bits above GPA width are reserved and kernel treats shared bit
1052 	 * as flag, not as part of physical address.
1053 	 *
1054 	 * Adjust physical mask to only cover valid GPA bits.
1055 	 */
1056 	physical_mask &= cc_mask - 1;
1057 
1058 	/*
1059 	 * The kernel mapping should match the TDX metadata for the page.
1060 	 * load_unaligned_zeropad() can touch memory *adjacent* to that which is
1061 	 * owned by the caller and can catch even _momentary_ mismatches.  Bad
1062 	 * things happen on mismatch:
1063 	 *
1064 	 *   - Private mapping => Shared Page  == Guest shutdown
1065          *   - Shared mapping  => Private Page == Recoverable #VE
1066 	 *
1067 	 * guest.enc_status_change_prepare() converts the page from
1068 	 * shared=>private before the mapping becomes private.
1069 	 *
1070 	 * guest.enc_status_change_finish() converts the page from
1071 	 * private=>shared after the mapping becomes private.
1072 	 *
1073 	 * In both cases there is a temporary shared mapping to a private page,
1074 	 * which can result in a #VE.  But, there is never a private mapping to
1075 	 * a shared page.
1076 	 */
1077 	x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare;
1078 	x86_platform.guest.enc_status_change_finish  = tdx_enc_status_change_finish;
1079 
1080 	x86_platform.guest.enc_cache_flush_required  = tdx_cache_flush_required;
1081 	x86_platform.guest.enc_tlb_flush_required    = tdx_tlb_flush_required;
1082 
1083 	x86_platform.guest.enc_kexec_begin	     = tdx_kexec_begin;
1084 	x86_platform.guest.enc_kexec_finish	     = tdx_kexec_finish;
1085 
1086 	/*
1087 	 * TDX intercepts the RDMSR to read the X2APIC ID in the parallel
1088 	 * bringup low level code. That raises #VE which cannot be handled
1089 	 * there.
1090 	 *
1091 	 * Intel-TDX has a secure RDMSR hypercall, but that needs to be
1092 	 * implemented separately in the low level startup ASM code.
1093 	 * Until that is in place, disable parallel bringup for TDX.
1094 	 */
1095 	x86_cpuinit.parallel_bringup = false;
1096 
1097 	pr_info("Guest detected\n");
1098 }
1099