xref: /linux/arch/x86/include/asm/tdx.h (revision 9afe652958c3ee88f24df1e4a97f298afce89407)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (C) 2021-2022 Intel Corporation */
3 #ifndef _ASM_X86_TDX_H
4 #define _ASM_X86_TDX_H
5 
6 #include <linux/init.h>
7 #include <linux/bits.h>
8 #include <linux/mmzone.h>
9 
10 #include <asm/errno.h>
11 #include <asm/ptrace.h>
12 #include <asm/trapnr.h>
13 #include <asm/shared/tdx.h>
14 
15 /*
16  * SW-defined error codes.
17  *
18  * Bits 47:40 == 0xFF indicate Reserved status code class that never used by
19  * TDX module.
20  */
21 #define TDX_ERROR			_BITUL(63)
22 #define TDX_NON_RECOVERABLE		_BITUL(62)
23 #define TDX_SW_ERROR			(TDX_ERROR | GENMASK_ULL(47, 40))
24 #define TDX_SEAMCALL_VMFAILINVALID	(TDX_SW_ERROR | _UL(0xFFFF0000))
25 
26 #define TDX_SEAMCALL_GP			(TDX_SW_ERROR | X86_TRAP_GP)
27 #define TDX_SEAMCALL_UD			(TDX_SW_ERROR | X86_TRAP_UD)
28 
29 /*
30  * TDX module SEAMCALL leaf function error codes
31  */
32 #define TDX_SUCCESS		0ULL
33 #define TDX_RND_NO_ENTROPY	0x8000020300000000ULL
34 
35 #ifndef __ASSEMBLER__
36 
37 #include <uapi/asm/mce.h>
38 #include <asm/tdx_global_metadata.h>
39 #include <linux/pgtable.h>
40 
41 /*
42  * Used by the #VE exception handler to gather the #VE exception
43  * info from the TDX module. This is a software only structure
44  * and not part of the TDX module/VMM ABI.
45  */
46 struct ve_info {
47 	u64 exit_reason;
48 	u64 exit_qual;
49 	/* Guest Linear (virtual) Address */
50 	u64 gla;
51 	/* Guest Physical Address */
52 	u64 gpa;
53 	u32 instr_len;
54 	u32 instr_info;
55 };
56 
57 #ifdef CONFIG_INTEL_TDX_GUEST
58 
59 void __init tdx_early_init(void);
60 
61 void tdx_get_ve_info(struct ve_info *ve);
62 
63 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve);
64 
65 void tdx_halt(void);
66 
67 bool tdx_early_handle_ve(struct pt_regs *regs);
68 
69 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport);
70 
71 int tdx_mcall_extend_rtmr(u8 index, u8 *data);
72 
73 u64 tdx_hcall_get_quote(u8 *buf, size_t size);
74 
75 void __init tdx_dump_attributes(u64 td_attr);
76 void __init tdx_dump_td_ctls(u64 td_ctls);
77 
78 #else
79 
tdx_early_init(void)80 static inline void tdx_early_init(void) { };
tdx_halt(void)81 static inline void tdx_halt(void) { };
82 
tdx_early_handle_ve(struct pt_regs * regs)83 static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; }
84 
85 #endif /* CONFIG_INTEL_TDX_GUEST */
86 
87 #if defined(CONFIG_KVM_GUEST) && defined(CONFIG_INTEL_TDX_GUEST)
88 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
89 		       unsigned long p3, unsigned long p4);
90 #else
tdx_kvm_hypercall(unsigned int nr,unsigned long p1,unsigned long p2,unsigned long p3,unsigned long p4)91 static inline long tdx_kvm_hypercall(unsigned int nr, unsigned long p1,
92 				     unsigned long p2, unsigned long p3,
93 				     unsigned long p4)
94 {
95 	return -ENODEV;
96 }
97 #endif /* CONFIG_INTEL_TDX_GUEST && CONFIG_KVM_GUEST */
98 
99 #ifdef CONFIG_INTEL_TDX_HOST
100 u64 __seamcall(u64 fn, struct tdx_module_args *args);
101 u64 __seamcall_ret(u64 fn, struct tdx_module_args *args);
102 u64 __seamcall_saved_ret(u64 fn, struct tdx_module_args *args);
103 void tdx_init(void);
104 
105 #include <asm/archrandom.h>
106 
107 typedef u64 (*sc_func_t)(u64 fn, struct tdx_module_args *args);
108 
sc_retry(sc_func_t func,u64 fn,struct tdx_module_args * args)109 static __always_inline u64 sc_retry(sc_func_t func, u64 fn,
110 			   struct tdx_module_args *args)
111 {
112 	int retry = RDRAND_RETRY_LOOPS;
113 	u64 ret;
114 
115 	do {
116 		ret = func(fn, args);
117 	} while (ret == TDX_RND_NO_ENTROPY && --retry);
118 
119 	return ret;
120 }
121 
122 #define seamcall(_fn, _args)		sc_retry(__seamcall, (_fn), (_args))
123 #define seamcall_ret(_fn, _args)	sc_retry(__seamcall_ret, (_fn), (_args))
124 #define seamcall_saved_ret(_fn, _args)	sc_retry(__seamcall_saved_ret, (_fn), (_args))
125 int tdx_cpu_enable(void);
126 int tdx_enable(void);
127 const char *tdx_dump_mce_info(struct mce *m);
128 const struct tdx_sys_info *tdx_get_sysinfo(void);
129 
130 int tdx_guest_keyid_alloc(void);
131 u32 tdx_get_nr_guest_keyids(void);
132 void tdx_guest_keyid_free(unsigned int keyid);
133 
134 struct tdx_td {
135 	/* TD root structure: */
136 	struct page *tdr_page;
137 
138 	int tdcs_nr_pages;
139 	/* TD control structure: */
140 	struct page **tdcs_pages;
141 
142 	/* Size of `tdcx_pages` in struct tdx_vp */
143 	int tdcx_nr_pages;
144 };
145 
146 struct tdx_vp {
147 	/* TDVP root page */
148 	struct page *tdvpr_page;
149 
150 	/* TD vCPU control structure: */
151 	struct page **tdcx_pages;
152 };
153 
mk_keyed_paddr(u16 hkid,struct page * page)154 static inline u64 mk_keyed_paddr(u16 hkid, struct page *page)
155 {
156 	u64 ret;
157 
158 	ret = page_to_phys(page);
159 	/* KeyID bits are just above the physical address bits: */
160 	ret |= (u64)hkid << boot_cpu_data.x86_phys_bits;
161 
162 	return ret;
163 }
164 
pg_level_to_tdx_sept_level(enum pg_level level)165 static inline int pg_level_to_tdx_sept_level(enum pg_level level)
166 {
167         WARN_ON_ONCE(level == PG_LEVEL_NONE);
168         return level - 1;
169 }
170 
171 u64 tdh_vp_enter(struct tdx_vp *vp, struct tdx_module_args *args);
172 u64 tdh_mng_addcx(struct tdx_td *td, struct page *tdcs_page);
173 u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, struct page *page, struct page *source, u64 *ext_err1, u64 *ext_err2);
174 u64 tdh_mem_sept_add(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2);
175 u64 tdh_vp_addcx(struct tdx_vp *vp, struct page *tdcx_page);
176 u64 tdh_mem_page_aug(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2);
177 u64 tdh_mem_range_block(struct tdx_td *td, u64 gpa, int level, u64 *ext_err1, u64 *ext_err2);
178 u64 tdh_mng_key_config(struct tdx_td *td);
179 u64 tdh_mng_create(struct tdx_td *td, u16 hkid);
180 u64 tdh_vp_create(struct tdx_td *td, struct tdx_vp *vp);
181 u64 tdh_mng_rd(struct tdx_td *td, u64 field, u64 *data);
182 u64 tdh_mr_extend(struct tdx_td *td, u64 gpa, u64 *ext_err1, u64 *ext_err2);
183 u64 tdh_mr_finalize(struct tdx_td *td);
184 u64 tdh_vp_flush(struct tdx_vp *vp);
185 u64 tdh_mng_vpflushdone(struct tdx_td *td);
186 u64 tdh_mng_key_freeid(struct tdx_td *td);
187 u64 tdh_mng_init(struct tdx_td *td, u64 td_params, u64 *extended_err);
188 u64 tdh_vp_init(struct tdx_vp *vp, u64 initial_rcx, u32 x2apicid);
189 u64 tdh_vp_rd(struct tdx_vp *vp, u64 field, u64 *data);
190 u64 tdh_vp_wr(struct tdx_vp *vp, u64 field, u64 data, u64 mask);
191 u64 tdh_phymem_page_reclaim(struct page *page, u64 *tdx_pt, u64 *tdx_owner, u64 *tdx_size);
192 u64 tdh_mem_track(struct tdx_td *tdr);
193 u64 tdh_mem_page_remove(struct tdx_td *td, u64 gpa, u64 level, u64 *ext_err1, u64 *ext_err2);
194 u64 tdh_phymem_cache_wb(bool resume);
195 u64 tdh_phymem_page_wbinvd_tdr(struct tdx_td *td);
196 u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page);
197 #else
tdx_init(void)198 static inline void tdx_init(void) { }
tdx_cpu_enable(void)199 static inline int tdx_cpu_enable(void) { return -ENODEV; }
tdx_enable(void)200 static inline int tdx_enable(void)  { return -ENODEV; }
tdx_get_nr_guest_keyids(void)201 static inline u32 tdx_get_nr_guest_keyids(void) { return 0; }
tdx_dump_mce_info(struct mce * m)202 static inline const char *tdx_dump_mce_info(struct mce *m) { return NULL; }
tdx_get_sysinfo(void)203 static inline const struct tdx_sys_info *tdx_get_sysinfo(void) { return NULL; }
204 #endif	/* CONFIG_INTEL_TDX_HOST */
205 
206 #endif /* !__ASSEMBLER__ */
207 #endif /* _ASM_X86_TDX_H */
208