1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_TDX_H
3 #define __KVM_X86_VMX_TDX_H
4
5 #include "tdx_arch.h"
6 #include "tdx_errno.h"
7
8 #ifdef CONFIG_KVM_INTEL_TDX
9 #include "common.h"
10
11 int tdx_bringup(void);
12 void tdx_cleanup(void);
13
14 extern bool enable_tdx;
15
16 /* TDX module hardware states. These follow the TDX module OP_STATEs. */
17 enum kvm_tdx_state {
18 TD_STATE_UNINITIALIZED = 0,
19 TD_STATE_INITIALIZED,
20 TD_STATE_RUNNABLE,
21 };
22
23 struct kvm_tdx {
24 struct kvm kvm;
25
26 struct misc_cg *misc_cg;
27 int hkid;
28 enum kvm_tdx_state state;
29
30 u64 attributes;
31 u64 xfam;
32
33 u64 tsc_offset;
34 u64 tsc_multiplier;
35
36 struct tdx_td td;
37
38 /* For KVM_TDX_INIT_MEM_REGION. */
39 atomic64_t nr_premapped;
40
41 /*
42 * Prevent vCPUs from TD entry to ensure SEPT zap related SEAMCALLs do
43 * not contend with tdh_vp_enter() and TDCALLs.
44 * Set/unset is protected with kvm->mmu_lock.
45 */
46 bool wait_for_sept_zap;
47 };
48
49 /* TDX module vCPU states */
50 enum vcpu_tdx_state {
51 VCPU_TD_STATE_UNINITIALIZED = 0,
52 VCPU_TD_STATE_INITIALIZED,
53 };
54
55 struct vcpu_tdx {
56 struct kvm_vcpu vcpu;
57 struct vcpu_vt vt;
58 u64 ext_exit_qualification;
59 gpa_t exit_gpa;
60 struct tdx_module_args vp_enter_args;
61
62 struct tdx_vp vp;
63
64 struct list_head cpu_list;
65
66 u64 vp_enter_ret;
67
68 enum vcpu_tdx_state state;
69 bool guest_entered;
70
71 u64 map_gpa_next;
72 u64 map_gpa_end;
73 };
74
75 void tdh_vp_rd_failed(struct vcpu_tdx *tdx, char *uclass, u32 field, u64 err);
76 void tdh_vp_wr_failed(struct vcpu_tdx *tdx, char *uclass, char *op, u32 field,
77 u64 val, u64 err);
78
td_tdcs_exec_read64(struct kvm_tdx * kvm_tdx,u32 field)79 static __always_inline u64 td_tdcs_exec_read64(struct kvm_tdx *kvm_tdx, u32 field)
80 {
81 u64 err, data;
82
83 err = tdh_mng_rd(&kvm_tdx->td, TDCS_EXEC(field), &data);
84 if (unlikely(err)) {
85 pr_err("TDH_MNG_RD[EXEC.0x%x] failed: 0x%llx\n", field, err);
86 return 0;
87 }
88 return data;
89 }
90
tdvps_vmcs_check(u32 field,u8 bits)91 static __always_inline void tdvps_vmcs_check(u32 field, u8 bits)
92 {
93 #define VMCS_ENC_ACCESS_TYPE_MASK 0x1UL
94 #define VMCS_ENC_ACCESS_TYPE_FULL 0x0UL
95 #define VMCS_ENC_ACCESS_TYPE_HIGH 0x1UL
96 #define VMCS_ENC_ACCESS_TYPE(field) ((field) & VMCS_ENC_ACCESS_TYPE_MASK)
97
98 /* TDX is 64bit only. HIGH field isn't supported. */
99 BUILD_BUG_ON_MSG(__builtin_constant_p(field) &&
100 VMCS_ENC_ACCESS_TYPE(field) == VMCS_ENC_ACCESS_TYPE_HIGH,
101 "Read/Write to TD VMCS *_HIGH fields not supported");
102
103 BUILD_BUG_ON(bits != 16 && bits != 32 && bits != 64);
104
105 #define VMCS_ENC_WIDTH_MASK GENMASK(14, 13)
106 #define VMCS_ENC_WIDTH_16BIT (0UL << 13)
107 #define VMCS_ENC_WIDTH_64BIT (1UL << 13)
108 #define VMCS_ENC_WIDTH_32BIT (2UL << 13)
109 #define VMCS_ENC_WIDTH_NATURAL (3UL << 13)
110 #define VMCS_ENC_WIDTH(field) ((field) & VMCS_ENC_WIDTH_MASK)
111
112 /* TDX is 64bit only. i.e. natural width = 64bit. */
113 BUILD_BUG_ON_MSG(bits != 64 && __builtin_constant_p(field) &&
114 (VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_64BIT ||
115 VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_NATURAL),
116 "Invalid TD VMCS access for 64-bit field");
117 BUILD_BUG_ON_MSG(bits != 32 && __builtin_constant_p(field) &&
118 VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_32BIT,
119 "Invalid TD VMCS access for 32-bit field");
120 BUILD_BUG_ON_MSG(bits != 16 && __builtin_constant_p(field) &&
121 VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_16BIT,
122 "Invalid TD VMCS access for 16-bit field");
123 }
124
tdvps_management_check(u64 field,u8 bits)125 static __always_inline void tdvps_management_check(u64 field, u8 bits) {}
tdvps_state_non_arch_check(u64 field,u8 bits)126 static __always_inline void tdvps_state_non_arch_check(u64 field, u8 bits) {}
127
128 #define TDX_BUILD_TDVPS_ACCESSORS(bits, uclass, lclass) \
129 static __always_inline u##bits td_##lclass##_read##bits(struct vcpu_tdx *tdx, \
130 u32 field) \
131 { \
132 u64 err, data; \
133 \
134 tdvps_##lclass##_check(field, bits); \
135 err = tdh_vp_rd(&tdx->vp, TDVPS_##uclass(field), &data); \
136 if (unlikely(err)) { \
137 tdh_vp_rd_failed(tdx, #uclass, field, err); \
138 return 0; \
139 } \
140 return (u##bits)data; \
141 } \
142 static __always_inline void td_##lclass##_write##bits(struct vcpu_tdx *tdx, \
143 u32 field, u##bits val) \
144 { \
145 u64 err; \
146 \
147 tdvps_##lclass##_check(field, bits); \
148 err = tdh_vp_wr(&tdx->vp, TDVPS_##uclass(field), val, \
149 GENMASK_ULL(bits - 1, 0)); \
150 if (unlikely(err)) \
151 tdh_vp_wr_failed(tdx, #uclass, " = ", field, (u64)val, err); \
152 } \
153 static __always_inline void td_##lclass##_setbit##bits(struct vcpu_tdx *tdx, \
154 u32 field, u64 bit) \
155 { \
156 u64 err; \
157 \
158 tdvps_##lclass##_check(field, bits); \
159 err = tdh_vp_wr(&tdx->vp, TDVPS_##uclass(field), bit, bit); \
160 if (unlikely(err)) \
161 tdh_vp_wr_failed(tdx, #uclass, " |= ", field, bit, err); \
162 } \
163 static __always_inline void td_##lclass##_clearbit##bits(struct vcpu_tdx *tdx, \
164 u32 field, u64 bit) \
165 { \
166 u64 err; \
167 \
168 tdvps_##lclass##_check(field, bits); \
169 err = tdh_vp_wr(&tdx->vp, TDVPS_##uclass(field), 0, bit); \
170 if (unlikely(err)) \
171 tdh_vp_wr_failed(tdx, #uclass, " &= ~", field, bit, err);\
172 }
173
174
175 bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu);
176 int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err);
177
178 TDX_BUILD_TDVPS_ACCESSORS(16, VMCS, vmcs);
179 TDX_BUILD_TDVPS_ACCESSORS(32, VMCS, vmcs);
180 TDX_BUILD_TDVPS_ACCESSORS(64, VMCS, vmcs);
181
182 TDX_BUILD_TDVPS_ACCESSORS(8, MANAGEMENT, management);
183 TDX_BUILD_TDVPS_ACCESSORS(64, STATE_NON_ARCH, state_non_arch);
184
185 #else
tdx_bringup(void)186 static inline int tdx_bringup(void) { return 0; }
tdx_cleanup(void)187 static inline void tdx_cleanup(void) {}
188
189 #define enable_tdx 0
190
191 struct kvm_tdx {
192 struct kvm kvm;
193 };
194
195 struct vcpu_tdx {
196 struct kvm_vcpu vcpu;
197 };
198
tdx_interrupt_allowed(struct kvm_vcpu * vcpu)199 static inline bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu) { return false; }
tdx_complete_emulated_msr(struct kvm_vcpu * vcpu,int err)200 static inline int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) { return 0; }
201
202 #endif
203
204 #endif
205