xref: /linux/arch/x86/include/asm/tdx.h (revision fd02aa45bda6d2f2fedcab70e828867332ef7e1c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (C) 2021-2022 Intel Corporation */
3 #ifndef _ASM_X86_TDX_H
4 #define _ASM_X86_TDX_H
5 
6 #include <linux/init.h>
7 #include <linux/bits.h>
8 #include <linux/mmzone.h>
9 
10 #include <asm/errno.h>
11 #include <asm/ptrace.h>
12 #include <asm/trapnr.h>
13 #include <asm/shared/tdx.h>
14 
15 /*
16  * SW-defined error codes.
17  *
18  * Bits 47:40 == 0xFF indicate Reserved status code class that never used by
19  * TDX module.
20  */
21 #define TDX_ERROR			_BITUL(63)
22 #define TDX_NON_RECOVERABLE		_BITUL(62)
23 #define TDX_SW_ERROR			(TDX_ERROR | GENMASK_ULL(47, 40))
24 #define TDX_SEAMCALL_VMFAILINVALID	(TDX_SW_ERROR | _UL(0xFFFF0000))
25 
26 #define TDX_SEAMCALL_GP			(TDX_SW_ERROR | X86_TRAP_GP)
27 #define TDX_SEAMCALL_UD			(TDX_SW_ERROR | X86_TRAP_UD)
28 
29 /*
30  * TDX module SEAMCALL leaf function error codes
31  */
32 #define TDX_SUCCESS		0ULL
33 #define TDX_RND_NO_ENTROPY	0x8000020300000000ULL
34 
35 #ifndef __ASSEMBLER__
36 
37 #include <uapi/asm/mce.h>
38 #include <asm/tdx_global_metadata.h>
39 #include <linux/pgtable.h>
40 
41 /*
42  * Used by the #VE exception handler to gather the #VE exception
43  * info from the TDX module. This is a software only structure
44  * and not part of the TDX module/VMM ABI.
45  */
46 struct ve_info {
47 	u64 exit_reason;
48 	u64 exit_qual;
49 	/* Guest Linear (virtual) Address */
50 	u64 gla;
51 	/* Guest Physical Address */
52 	u64 gpa;
53 	u32 instr_len;
54 	u32 instr_info;
55 };
56 
57 #ifdef CONFIG_INTEL_TDX_GUEST
58 
59 void __init tdx_early_init(void);
60 
61 void tdx_get_ve_info(struct ve_info *ve);
62 
63 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve);
64 
65 void tdx_halt(void);
66 
67 bool tdx_early_handle_ve(struct pt_regs *regs);
68 
69 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport);
70 
71 u64 tdx_hcall_get_quote(u8 *buf, size_t size);
72 
73 void __init tdx_dump_attributes(u64 td_attr);
74 void __init tdx_dump_td_ctls(u64 td_ctls);
75 
76 #else
77 
78 static inline void tdx_early_init(void) { };
79 static inline void tdx_halt(void) { };
80 
81 static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; }
82 
83 #endif /* CONFIG_INTEL_TDX_GUEST */
84 
85 #if defined(CONFIG_KVM_GUEST) && defined(CONFIG_INTEL_TDX_GUEST)
86 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
87 		       unsigned long p3, unsigned long p4);
88 #else
89 static inline long tdx_kvm_hypercall(unsigned int nr, unsigned long p1,
90 				     unsigned long p2, unsigned long p3,
91 				     unsigned long p4)
92 {
93 	return -ENODEV;
94 }
95 #endif /* CONFIG_INTEL_TDX_GUEST && CONFIG_KVM_GUEST */
96 
97 #ifdef CONFIG_INTEL_TDX_HOST
98 u64 __seamcall(u64 fn, struct tdx_module_args *args);
99 u64 __seamcall_ret(u64 fn, struct tdx_module_args *args);
100 u64 __seamcall_saved_ret(u64 fn, struct tdx_module_args *args);
101 void tdx_init(void);
102 
103 #include <asm/archrandom.h>
104 
105 typedef u64 (*sc_func_t)(u64 fn, struct tdx_module_args *args);
106 
107 static inline u64 sc_retry(sc_func_t func, u64 fn,
108 			   struct tdx_module_args *args)
109 {
110 	int retry = RDRAND_RETRY_LOOPS;
111 	u64 ret;
112 
113 	do {
114 		ret = func(fn, args);
115 	} while (ret == TDX_RND_NO_ENTROPY && --retry);
116 
117 	return ret;
118 }
119 
120 #define seamcall(_fn, _args)		sc_retry(__seamcall, (_fn), (_args))
121 #define seamcall_ret(_fn, _args)	sc_retry(__seamcall_ret, (_fn), (_args))
122 #define seamcall_saved_ret(_fn, _args)	sc_retry(__seamcall_saved_ret, (_fn), (_args))
123 int tdx_cpu_enable(void);
124 int tdx_enable(void);
125 const char *tdx_dump_mce_info(struct mce *m);
126 const struct tdx_sys_info *tdx_get_sysinfo(void);
127 
128 int tdx_guest_keyid_alloc(void);
129 u32 tdx_get_nr_guest_keyids(void);
130 void tdx_guest_keyid_free(unsigned int keyid);
131 
132 struct tdx_td {
133 	/* TD root structure: */
134 	struct page *tdr_page;
135 
136 	int tdcs_nr_pages;
137 	/* TD control structure: */
138 	struct page **tdcs_pages;
139 
140 	/* Size of `tdcx_pages` in struct tdx_vp */
141 	int tdcx_nr_pages;
142 };
143 
144 struct tdx_vp {
145 	/* TDVP root page */
146 	struct page *tdvpr_page;
147 
148 	/* TD vCPU control structure: */
149 	struct page **tdcx_pages;
150 };
151 
152 static inline u64 mk_keyed_paddr(u16 hkid, struct page *page)
153 {
154 	u64 ret;
155 
156 	ret = page_to_phys(page);
157 	/* KeyID bits are just above the physical address bits: */
158 	ret |= (u64)hkid << boot_cpu_data.x86_phys_bits;
159 
160 	return ret;
161 }
162 
163 static inline int pg_level_to_tdx_sept_level(enum pg_level level)
164 {
165         WARN_ON_ONCE(level == PG_LEVEL_NONE);
166         return level - 1;
167 }
168 
169 u64 tdh_vp_enter(struct tdx_vp *vp, struct tdx_module_args *args);
170 u64 tdh_mng_addcx(struct tdx_td *td, struct page *tdcs_page);
171 u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, struct page *page, struct page *source, u64 *ext_err1, u64 *ext_err2);
172 u64 tdh_mem_sept_add(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2);
173 u64 tdh_vp_addcx(struct tdx_vp *vp, struct page *tdcx_page);
174 u64 tdh_mem_page_aug(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2);
175 u64 tdh_mem_range_block(struct tdx_td *td, u64 gpa, int level, u64 *ext_err1, u64 *ext_err2);
176 u64 tdh_mng_key_config(struct tdx_td *td);
177 u64 tdh_mng_create(struct tdx_td *td, u16 hkid);
178 u64 tdh_vp_create(struct tdx_td *td, struct tdx_vp *vp);
179 u64 tdh_mng_rd(struct tdx_td *td, u64 field, u64 *data);
180 u64 tdh_mr_extend(struct tdx_td *td, u64 gpa, u64 *ext_err1, u64 *ext_err2);
181 u64 tdh_mr_finalize(struct tdx_td *td);
182 u64 tdh_vp_flush(struct tdx_vp *vp);
183 u64 tdh_mng_vpflushdone(struct tdx_td *td);
184 u64 tdh_mng_key_freeid(struct tdx_td *td);
185 u64 tdh_mng_init(struct tdx_td *td, u64 td_params, u64 *extended_err);
186 u64 tdh_vp_init(struct tdx_vp *vp, u64 initial_rcx, u32 x2apicid);
187 u64 tdh_vp_rd(struct tdx_vp *vp, u64 field, u64 *data);
188 u64 tdh_vp_wr(struct tdx_vp *vp, u64 field, u64 data, u64 mask);
189 u64 tdh_phymem_page_reclaim(struct page *page, u64 *tdx_pt, u64 *tdx_owner, u64 *tdx_size);
190 u64 tdh_mem_track(struct tdx_td *tdr);
191 u64 tdh_mem_page_remove(struct tdx_td *td, u64 gpa, u64 level, u64 *ext_err1, u64 *ext_err2);
192 u64 tdh_phymem_cache_wb(bool resume);
193 u64 tdh_phymem_page_wbinvd_tdr(struct tdx_td *td);
194 u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page);
195 #else
196 static inline void tdx_init(void) { }
197 static inline int tdx_cpu_enable(void) { return -ENODEV; }
198 static inline int tdx_enable(void)  { return -ENODEV; }
199 static inline u32 tdx_get_nr_guest_keyids(void) { return 0; }
200 static inline const char *tdx_dump_mce_info(struct mce *m) { return NULL; }
201 static inline const struct tdx_sys_info *tdx_get_sysinfo(void) { return NULL; }
202 #endif	/* CONFIG_INTEL_TDX_HOST */
203 
204 #endif /* !__ASSEMBLER__ */
205 #endif /* _ASM_X86_TDX_H */
206