xref: /linux/arch/x86/include/asm/tdx.h (revision 45d8b572fac3aa8b49d53c946b3685eaf78a2824)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (C) 2021-2022 Intel Corporation */
3 #ifndef _ASM_X86_TDX_H
4 #define _ASM_X86_TDX_H
5 
6 #include <linux/init.h>
7 #include <linux/bits.h>
8 
9 #include <asm/errno.h>
10 #include <asm/ptrace.h>
11 #include <asm/trapnr.h>
12 #include <asm/shared/tdx.h>
13 
14 /*
15  * SW-defined error codes.
16  *
17  * Bits 47:40 == 0xFF indicate Reserved status code class that never used by
18  * TDX module.
19  */
20 #define TDX_ERROR			_BITUL(63)
21 #define TDX_SW_ERROR			(TDX_ERROR | GENMASK_ULL(47, 40))
22 #define TDX_SEAMCALL_VMFAILINVALID	(TDX_SW_ERROR | _UL(0xFFFF0000))
23 
24 #define TDX_SEAMCALL_GP			(TDX_SW_ERROR | X86_TRAP_GP)
25 #define TDX_SEAMCALL_UD			(TDX_SW_ERROR | X86_TRAP_UD)
26 
27 /*
28  * TDX module SEAMCALL leaf function error codes
29  */
30 #define TDX_SUCCESS		0ULL
31 #define TDX_RND_NO_ENTROPY	0x8000020300000000ULL
32 
33 #ifndef __ASSEMBLY__
34 
35 #include <uapi/asm/mce.h>
36 
37 /*
38  * Used by the #VE exception handler to gather the #VE exception
39  * info from the TDX module. This is a software only structure
40  * and not part of the TDX module/VMM ABI.
41  */
42 struct ve_info {
43 	u64 exit_reason;
44 	u64 exit_qual;
45 	/* Guest Linear (virtual) Address */
46 	u64 gla;
47 	/* Guest Physical Address */
48 	u64 gpa;
49 	u32 instr_len;
50 	u32 instr_info;
51 };
52 
53 #ifdef CONFIG_INTEL_TDX_GUEST
54 
55 void __init tdx_early_init(void);
56 
57 void tdx_get_ve_info(struct ve_info *ve);
58 
59 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve);
60 
61 void tdx_safe_halt(void);
62 
63 bool tdx_early_handle_ve(struct pt_regs *regs);
64 
65 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport);
66 
67 u64 tdx_hcall_get_quote(u8 *buf, size_t size);
68 
69 #else
70 
71 static inline void tdx_early_init(void) { };
72 static inline void tdx_safe_halt(void) { };
73 
74 static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; }
75 
76 #endif /* CONFIG_INTEL_TDX_GUEST */
77 
78 #if defined(CONFIG_KVM_GUEST) && defined(CONFIG_INTEL_TDX_GUEST)
79 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
80 		       unsigned long p3, unsigned long p4);
81 #else
82 static inline long tdx_kvm_hypercall(unsigned int nr, unsigned long p1,
83 				     unsigned long p2, unsigned long p3,
84 				     unsigned long p4)
85 {
86 	return -ENODEV;
87 }
88 #endif /* CONFIG_INTEL_TDX_GUEST && CONFIG_KVM_GUEST */
89 
90 #ifdef CONFIG_INTEL_TDX_HOST
91 u64 __seamcall(u64 fn, struct tdx_module_args *args);
92 u64 __seamcall_ret(u64 fn, struct tdx_module_args *args);
93 u64 __seamcall_saved_ret(u64 fn, struct tdx_module_args *args);
94 void tdx_init(void);
95 
96 #include <asm/archrandom.h>
97 
98 typedef u64 (*sc_func_t)(u64 fn, struct tdx_module_args *args);
99 
100 static inline u64 sc_retry(sc_func_t func, u64 fn,
101 			   struct tdx_module_args *args)
102 {
103 	int retry = RDRAND_RETRY_LOOPS;
104 	u64 ret;
105 
106 	do {
107 		ret = func(fn, args);
108 	} while (ret == TDX_RND_NO_ENTROPY && --retry);
109 
110 	return ret;
111 }
112 
113 #define seamcall(_fn, _args)		sc_retry(__seamcall, (_fn), (_args))
114 #define seamcall_ret(_fn, _args)	sc_retry(__seamcall_ret, (_fn), (_args))
115 #define seamcall_saved_ret(_fn, _args)	sc_retry(__seamcall_saved_ret, (_fn), (_args))
116 int tdx_cpu_enable(void);
117 int tdx_enable(void);
118 const char *tdx_dump_mce_info(struct mce *m);
119 #else
120 static inline void tdx_init(void) { }
121 static inline int tdx_cpu_enable(void) { return -ENODEV; }
122 static inline int tdx_enable(void)  { return -ENODEV; }
123 static inline const char *tdx_dump_mce_info(struct mce *m) { return NULL; }
124 #endif	/* CONFIG_INTEL_TDX_HOST */
125 
126 #endif /* !__ASSEMBLY__ */
127 #endif /* _ASM_X86_TDX_H */
128