xref: /linux/arch/x86/kvm/vmx/tdx.c (revision bca5cfbb694d66a1c482d0c347eee80f6afbc870)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cleanup.h>
3 #include <linux/cpu.h>
4 #include <asm/cpufeature.h>
5 #include <asm/fpu/xcr.h>
6 #include <linux/misc_cgroup.h>
7 #include <linux/mmu_context.h>
8 #include <asm/tdx.h>
9 #include "capabilities.h"
10 #include "mmu.h"
11 #include "x86_ops.h"
12 #include "lapic.h"
13 #include "tdx.h"
14 #include "vmx.h"
15 #include "mmu/spte.h"
16 #include "common.h"
17 #include "posted_intr.h"
18 #include "irq.h"
19 #include <trace/events/kvm.h>
20 #include "trace.h"
21 
22 #pragma GCC poison to_vmx
23 
24 #undef pr_fmt
25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 
27 #define pr_tdx_error(__fn, __err)	\
28 	pr_err_ratelimited("SEAMCALL %s failed: 0x%llx\n", #__fn, __err)
29 
30 #define __pr_tdx_error_N(__fn_str, __err, __fmt, ...)		\
31 	pr_err_ratelimited("SEAMCALL " __fn_str " failed: 0x%llx, " __fmt,  __err,  __VA_ARGS__)
32 
33 #define pr_tdx_error_1(__fn, __err, __rcx)		\
34 	__pr_tdx_error_N(#__fn, __err, "rcx 0x%llx\n", __rcx)
35 
36 #define pr_tdx_error_2(__fn, __err, __rcx, __rdx)	\
37 	__pr_tdx_error_N(#__fn, __err, "rcx 0x%llx, rdx 0x%llx\n", __rcx, __rdx)
38 
39 #define pr_tdx_error_3(__fn, __err, __rcx, __rdx, __r8)	\
40 	__pr_tdx_error_N(#__fn, __err, "rcx 0x%llx, rdx 0x%llx, r8 0x%llx\n", __rcx, __rdx, __r8)
41 
42 bool enable_tdx __ro_after_init;
43 module_param_named(tdx, enable_tdx, bool, 0444);
44 
45 #define TDX_SHARED_BIT_PWL_5 gpa_to_gfn(BIT_ULL(51))
46 #define TDX_SHARED_BIT_PWL_4 gpa_to_gfn(BIT_ULL(47))
47 
48 static enum cpuhp_state tdx_cpuhp_state;
49 
50 static const struct tdx_sys_info *tdx_sysinfo;
51 
52 void tdh_vp_rd_failed(struct vcpu_tdx *tdx, char *uclass, u32 field, u64 err)
53 {
54 	KVM_BUG_ON(1, tdx->vcpu.kvm);
55 	pr_err("TDH_VP_RD[%s.0x%x] failed 0x%llx\n", uclass, field, err);
56 }
57 
58 void tdh_vp_wr_failed(struct vcpu_tdx *tdx, char *uclass, char *op, u32 field,
59 		      u64 val, u64 err)
60 {
61 	KVM_BUG_ON(1, tdx->vcpu.kvm);
62 	pr_err("TDH_VP_WR[%s.0x%x]%s0x%llx failed: 0x%llx\n", uclass, field, op, val, err);
63 }
64 
65 #define KVM_SUPPORTED_TD_ATTRS (TDX_TD_ATTR_SEPT_VE_DISABLE)
66 
67 static __always_inline struct kvm_tdx *to_kvm_tdx(struct kvm *kvm)
68 {
69 	return container_of(kvm, struct kvm_tdx, kvm);
70 }
71 
72 static __always_inline struct vcpu_tdx *to_tdx(struct kvm_vcpu *vcpu)
73 {
74 	return container_of(vcpu, struct vcpu_tdx, vcpu);
75 }
76 
77 static u64 tdx_get_supported_attrs(const struct tdx_sys_info_td_conf *td_conf)
78 {
79 	u64 val = KVM_SUPPORTED_TD_ATTRS;
80 
81 	if ((val & td_conf->attributes_fixed1) != td_conf->attributes_fixed1)
82 		return 0;
83 
84 	val &= td_conf->attributes_fixed0;
85 
86 	return val;
87 }
88 
89 static u64 tdx_get_supported_xfam(const struct tdx_sys_info_td_conf *td_conf)
90 {
91 	u64 val = kvm_caps.supported_xcr0 | kvm_caps.supported_xss;
92 
93 	if ((val & td_conf->xfam_fixed1) != td_conf->xfam_fixed1)
94 		return 0;
95 
96 	val &= td_conf->xfam_fixed0;
97 
98 	return val;
99 }
100 
101 static int tdx_get_guest_phys_addr_bits(const u32 eax)
102 {
103 	return (eax & GENMASK(23, 16)) >> 16;
104 }
105 
106 static u32 tdx_set_guest_phys_addr_bits(const u32 eax, int addr_bits)
107 {
108 	return (eax & ~GENMASK(23, 16)) | (addr_bits & 0xff) << 16;
109 }
110 
111 #define TDX_FEATURE_TSX (__feature_bit(X86_FEATURE_HLE) | __feature_bit(X86_FEATURE_RTM))
112 
113 static bool has_tsx(const struct kvm_cpuid_entry2 *entry)
114 {
115 	return entry->function == 7 && entry->index == 0 &&
116 	       (entry->ebx & TDX_FEATURE_TSX);
117 }
118 
119 static void clear_tsx(struct kvm_cpuid_entry2 *entry)
120 {
121 	entry->ebx &= ~TDX_FEATURE_TSX;
122 }
123 
124 static bool has_waitpkg(const struct kvm_cpuid_entry2 *entry)
125 {
126 	return entry->function == 7 && entry->index == 0 &&
127 	       (entry->ecx & __feature_bit(X86_FEATURE_WAITPKG));
128 }
129 
130 static void clear_waitpkg(struct kvm_cpuid_entry2 *entry)
131 {
132 	entry->ecx &= ~__feature_bit(X86_FEATURE_WAITPKG);
133 }
134 
135 static void tdx_clear_unsupported_cpuid(struct kvm_cpuid_entry2 *entry)
136 {
137 	if (has_tsx(entry))
138 		clear_tsx(entry);
139 
140 	if (has_waitpkg(entry))
141 		clear_waitpkg(entry);
142 }
143 
144 static bool tdx_unsupported_cpuid(const struct kvm_cpuid_entry2 *entry)
145 {
146 	return has_tsx(entry) || has_waitpkg(entry);
147 }
148 
149 #define KVM_TDX_CPUID_NO_SUBLEAF	((__u32)-1)
150 
151 static void td_init_cpuid_entry2(struct kvm_cpuid_entry2 *entry, unsigned char idx)
152 {
153 	const struct tdx_sys_info_td_conf *td_conf = &tdx_sysinfo->td_conf;
154 
155 	entry->function = (u32)td_conf->cpuid_config_leaves[idx];
156 	entry->index = td_conf->cpuid_config_leaves[idx] >> 32;
157 	entry->eax = (u32)td_conf->cpuid_config_values[idx][0];
158 	entry->ebx = td_conf->cpuid_config_values[idx][0] >> 32;
159 	entry->ecx = (u32)td_conf->cpuid_config_values[idx][1];
160 	entry->edx = td_conf->cpuid_config_values[idx][1] >> 32;
161 
162 	if (entry->index == KVM_TDX_CPUID_NO_SUBLEAF)
163 		entry->index = 0;
164 
165 	/*
166 	 * The TDX module doesn't allow configuring the guest phys addr bits
167 	 * (EAX[23:16]).  However, KVM uses it as an interface to the userspace
168 	 * to configure the GPAW.  Report these bits as configurable.
169 	 */
170 	if (entry->function == 0x80000008)
171 		entry->eax = tdx_set_guest_phys_addr_bits(entry->eax, 0xff);
172 
173 	tdx_clear_unsupported_cpuid(entry);
174 }
175 
176 static int init_kvm_tdx_caps(const struct tdx_sys_info_td_conf *td_conf,
177 			     struct kvm_tdx_capabilities *caps)
178 {
179 	int i;
180 
181 	caps->supported_attrs = tdx_get_supported_attrs(td_conf);
182 	if (!caps->supported_attrs)
183 		return -EIO;
184 
185 	caps->supported_xfam = tdx_get_supported_xfam(td_conf);
186 	if (!caps->supported_xfam)
187 		return -EIO;
188 
189 	caps->cpuid.nent = td_conf->num_cpuid_config;
190 
191 	for (i = 0; i < td_conf->num_cpuid_config; i++)
192 		td_init_cpuid_entry2(&caps->cpuid.entries[i], i);
193 
194 	return 0;
195 }
196 
197 /*
198  * Some SEAMCALLs acquire the TDX module globally, and can fail with
199  * TDX_OPERAND_BUSY.  Use a global mutex to serialize these SEAMCALLs.
200  */
201 static DEFINE_MUTEX(tdx_lock);
202 
203 static atomic_t nr_configured_hkid;
204 
205 static bool tdx_operand_busy(u64 err)
206 {
207 	return (err & TDX_SEAMCALL_STATUS_MASK) == TDX_OPERAND_BUSY;
208 }
209 
210 
211 /*
212  * A per-CPU list of TD vCPUs associated with a given CPU.
213  * Protected by interrupt mask. Only manipulated by the CPU owning this per-CPU
214  * list.
215  * - When a vCPU is loaded onto a CPU, it is removed from the per-CPU list of
216  *   the old CPU during the IPI callback running on the old CPU, and then added
217  *   to the per-CPU list of the new CPU.
218  * - When a TD is tearing down, all vCPUs are disassociated from their current
219  *   running CPUs and removed from the per-CPU list during the IPI callback
220  *   running on those CPUs.
221  * - When a CPU is brought down, traverse the per-CPU list to disassociate all
222  *   associated TD vCPUs and remove them from the per-CPU list.
223  */
224 static DEFINE_PER_CPU(struct list_head, associated_tdvcpus);
225 
226 static __always_inline unsigned long tdvmcall_exit_type(struct kvm_vcpu *vcpu)
227 {
228 	return to_tdx(vcpu)->vp_enter_args.r10;
229 }
230 
231 static __always_inline unsigned long tdvmcall_leaf(struct kvm_vcpu *vcpu)
232 {
233 	return to_tdx(vcpu)->vp_enter_args.r11;
234 }
235 
236 static __always_inline void tdvmcall_set_return_code(struct kvm_vcpu *vcpu,
237 						     long val)
238 {
239 	to_tdx(vcpu)->vp_enter_args.r10 = val;
240 }
241 
242 static __always_inline void tdvmcall_set_return_val(struct kvm_vcpu *vcpu,
243 						    unsigned long val)
244 {
245 	to_tdx(vcpu)->vp_enter_args.r11 = val;
246 }
247 
248 static inline void tdx_hkid_free(struct kvm_tdx *kvm_tdx)
249 {
250 	tdx_guest_keyid_free(kvm_tdx->hkid);
251 	kvm_tdx->hkid = -1;
252 	atomic_dec(&nr_configured_hkid);
253 	misc_cg_uncharge(MISC_CG_RES_TDX, kvm_tdx->misc_cg, 1);
254 	put_misc_cg(kvm_tdx->misc_cg);
255 	kvm_tdx->misc_cg = NULL;
256 }
257 
258 static inline bool is_hkid_assigned(struct kvm_tdx *kvm_tdx)
259 {
260 	return kvm_tdx->hkid > 0;
261 }
262 
263 static inline void tdx_disassociate_vp(struct kvm_vcpu *vcpu)
264 {
265 	lockdep_assert_irqs_disabled();
266 
267 	list_del(&to_tdx(vcpu)->cpu_list);
268 
269 	/*
270 	 * Ensure tdx->cpu_list is updated before setting vcpu->cpu to -1,
271 	 * otherwise, a different CPU can see vcpu->cpu = -1 and add the vCPU
272 	 * to its list before it's deleted from this CPU's list.
273 	 */
274 	smp_wmb();
275 
276 	vcpu->cpu = -1;
277 }
278 
279 static void tdx_clear_page(struct page *page)
280 {
281 	const void *zero_page = (const void *) page_to_virt(ZERO_PAGE(0));
282 	void *dest = page_to_virt(page);
283 	unsigned long i;
284 
285 	/*
286 	 * The page could have been poisoned.  MOVDIR64B also clears
287 	 * the poison bit so the kernel can safely use the page again.
288 	 */
289 	for (i = 0; i < PAGE_SIZE; i += 64)
290 		movdir64b(dest + i, zero_page);
291 	/*
292 	 * MOVDIR64B store uses WC buffer.  Prevent following memory reads
293 	 * from seeing potentially poisoned cache.
294 	 */
295 	__mb();
296 }
297 
298 static void tdx_no_vcpus_enter_start(struct kvm *kvm)
299 {
300 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
301 
302 	lockdep_assert_held_write(&kvm->mmu_lock);
303 
304 	WRITE_ONCE(kvm_tdx->wait_for_sept_zap, true);
305 
306 	kvm_make_all_cpus_request(kvm, KVM_REQ_OUTSIDE_GUEST_MODE);
307 }
308 
309 static void tdx_no_vcpus_enter_stop(struct kvm *kvm)
310 {
311 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
312 
313 	lockdep_assert_held_write(&kvm->mmu_lock);
314 
315 	WRITE_ONCE(kvm_tdx->wait_for_sept_zap, false);
316 }
317 
318 /* TDH.PHYMEM.PAGE.RECLAIM is allowed only when destroying the TD. */
319 static int __tdx_reclaim_page(struct page *page)
320 {
321 	u64 err, rcx, rdx, r8;
322 
323 	err = tdh_phymem_page_reclaim(page, &rcx, &rdx, &r8);
324 
325 	/*
326 	 * No need to check for TDX_OPERAND_BUSY; all TD pages are freed
327 	 * before the HKID is released and control pages have also been
328 	 * released at this point, so there is no possibility of contention.
329 	 */
330 	if (WARN_ON_ONCE(err)) {
331 		pr_tdx_error_3(TDH_PHYMEM_PAGE_RECLAIM, err, rcx, rdx, r8);
332 		return -EIO;
333 	}
334 	return 0;
335 }
336 
337 static int tdx_reclaim_page(struct page *page)
338 {
339 	int r;
340 
341 	r = __tdx_reclaim_page(page);
342 	if (!r)
343 		tdx_clear_page(page);
344 	return r;
345 }
346 
347 
348 /*
349  * Reclaim the TD control page(s) which are crypto-protected by TDX guest's
350  * private KeyID.  Assume the cache associated with the TDX private KeyID has
351  * been flushed.
352  */
353 static void tdx_reclaim_control_page(struct page *ctrl_page)
354 {
355 	/*
356 	 * Leak the page if the kernel failed to reclaim the page.
357 	 * The kernel cannot use it safely anymore.
358 	 */
359 	if (tdx_reclaim_page(ctrl_page))
360 		return;
361 
362 	__free_page(ctrl_page);
363 }
364 
365 struct tdx_flush_vp_arg {
366 	struct kvm_vcpu *vcpu;
367 	u64 err;
368 };
369 
370 static void tdx_flush_vp(void *_arg)
371 {
372 	struct tdx_flush_vp_arg *arg = _arg;
373 	struct kvm_vcpu *vcpu = arg->vcpu;
374 	u64 err;
375 
376 	arg->err = 0;
377 	lockdep_assert_irqs_disabled();
378 
379 	/* Task migration can race with CPU offlining. */
380 	if (unlikely(vcpu->cpu != raw_smp_processor_id()))
381 		return;
382 
383 	/*
384 	 * No need to do TDH_VP_FLUSH if the vCPU hasn't been initialized.  The
385 	 * list tracking still needs to be updated so that it's correct if/when
386 	 * the vCPU does get initialized.
387 	 */
388 	if (to_tdx(vcpu)->state != VCPU_TD_STATE_UNINITIALIZED) {
389 		/*
390 		 * No need to retry.  TDX Resources needed for TDH.VP.FLUSH are:
391 		 * TDVPR as exclusive, TDR as shared, and TDCS as shared.  This
392 		 * vp flush function is called when destructing vCPU/TD or vCPU
393 		 * migration.  No other thread uses TDVPR in those cases.
394 		 */
395 		err = tdh_vp_flush(&to_tdx(vcpu)->vp);
396 		if (unlikely(err && err != TDX_VCPU_NOT_ASSOCIATED)) {
397 			/*
398 			 * This function is called in IPI context. Do not use
399 			 * printk to avoid console semaphore.
400 			 * The caller prints out the error message, instead.
401 			 */
402 			if (err)
403 				arg->err = err;
404 		}
405 	}
406 
407 	tdx_disassociate_vp(vcpu);
408 }
409 
410 static void tdx_flush_vp_on_cpu(struct kvm_vcpu *vcpu)
411 {
412 	struct tdx_flush_vp_arg arg = {
413 		.vcpu = vcpu,
414 	};
415 	int cpu = vcpu->cpu;
416 
417 	if (unlikely(cpu == -1))
418 		return;
419 
420 	smp_call_function_single(cpu, tdx_flush_vp, &arg, 1);
421 	if (KVM_BUG_ON(arg.err, vcpu->kvm))
422 		pr_tdx_error(TDH_VP_FLUSH, arg.err);
423 }
424 
425 void tdx_disable_virtualization_cpu(void)
426 {
427 	int cpu = raw_smp_processor_id();
428 	struct list_head *tdvcpus = &per_cpu(associated_tdvcpus, cpu);
429 	struct tdx_flush_vp_arg arg;
430 	struct vcpu_tdx *tdx, *tmp;
431 	unsigned long flags;
432 
433 	local_irq_save(flags);
434 	/* Safe variant needed as tdx_disassociate_vp() deletes the entry. */
435 	list_for_each_entry_safe(tdx, tmp, tdvcpus, cpu_list) {
436 		arg.vcpu = &tdx->vcpu;
437 		tdx_flush_vp(&arg);
438 	}
439 	local_irq_restore(flags);
440 }
441 
442 #define TDX_SEAMCALL_RETRIES 10000
443 
444 static void smp_func_do_phymem_cache_wb(void *unused)
445 {
446 	u64 err = 0;
447 	bool resume;
448 	int i;
449 
450 	/*
451 	 * TDH.PHYMEM.CACHE.WB flushes caches associated with any TDX private
452 	 * KeyID on the package or core.  The TDX module may not finish the
453 	 * cache flush but return TDX_INTERRUPTED_RESUMEABLE instead.  The
454 	 * kernel should retry it until it returns success w/o rescheduling.
455 	 */
456 	for (i = TDX_SEAMCALL_RETRIES; i > 0; i--) {
457 		resume = !!err;
458 		err = tdh_phymem_cache_wb(resume);
459 		switch (err) {
460 		case TDX_INTERRUPTED_RESUMABLE:
461 			continue;
462 		case TDX_NO_HKID_READY_TO_WBCACHE:
463 			err = TDX_SUCCESS; /* Already done by other thread */
464 			fallthrough;
465 		default:
466 			goto out;
467 		}
468 	}
469 
470 out:
471 	if (WARN_ON_ONCE(err))
472 		pr_tdx_error(TDH_PHYMEM_CACHE_WB, err);
473 }
474 
475 void tdx_mmu_release_hkid(struct kvm *kvm)
476 {
477 	bool packages_allocated, targets_allocated;
478 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
479 	cpumask_var_t packages, targets;
480 	struct kvm_vcpu *vcpu;
481 	unsigned long j;
482 	int i;
483 	u64 err;
484 
485 	if (!is_hkid_assigned(kvm_tdx))
486 		return;
487 
488 	packages_allocated = zalloc_cpumask_var(&packages, GFP_KERNEL);
489 	targets_allocated = zalloc_cpumask_var(&targets, GFP_KERNEL);
490 	cpus_read_lock();
491 
492 	kvm_for_each_vcpu(j, vcpu, kvm)
493 		tdx_flush_vp_on_cpu(vcpu);
494 
495 	/*
496 	 * TDH.PHYMEM.CACHE.WB tries to acquire the TDX module global lock
497 	 * and can fail with TDX_OPERAND_BUSY when it fails to get the lock.
498 	 * Multiple TDX guests can be destroyed simultaneously. Take the
499 	 * mutex to prevent it from getting error.
500 	 */
501 	mutex_lock(&tdx_lock);
502 
503 	/*
504 	 * Releasing HKID is in vm_destroy().
505 	 * After the above flushing vps, there should be no more vCPU
506 	 * associations, as all vCPU fds have been released at this stage.
507 	 */
508 	err = tdh_mng_vpflushdone(&kvm_tdx->td);
509 	if (err == TDX_FLUSHVP_NOT_DONE)
510 		goto out;
511 	if (KVM_BUG_ON(err, kvm)) {
512 		pr_tdx_error(TDH_MNG_VPFLUSHDONE, err);
513 		pr_err("tdh_mng_vpflushdone() failed. HKID %d is leaked.\n",
514 		       kvm_tdx->hkid);
515 		goto out;
516 	}
517 
518 	for_each_online_cpu(i) {
519 		if (packages_allocated &&
520 		    cpumask_test_and_set_cpu(topology_physical_package_id(i),
521 					     packages))
522 			continue;
523 		if (targets_allocated)
524 			cpumask_set_cpu(i, targets);
525 	}
526 	if (targets_allocated)
527 		on_each_cpu_mask(targets, smp_func_do_phymem_cache_wb, NULL, true);
528 	else
529 		on_each_cpu(smp_func_do_phymem_cache_wb, NULL, true);
530 	/*
531 	 * In the case of error in smp_func_do_phymem_cache_wb(), the following
532 	 * tdh_mng_key_freeid() will fail.
533 	 */
534 	err = tdh_mng_key_freeid(&kvm_tdx->td);
535 	if (KVM_BUG_ON(err, kvm)) {
536 		pr_tdx_error(TDH_MNG_KEY_FREEID, err);
537 		pr_err("tdh_mng_key_freeid() failed. HKID %d is leaked.\n",
538 		       kvm_tdx->hkid);
539 	} else {
540 		tdx_hkid_free(kvm_tdx);
541 	}
542 
543 out:
544 	mutex_unlock(&tdx_lock);
545 	cpus_read_unlock();
546 	free_cpumask_var(targets);
547 	free_cpumask_var(packages);
548 }
549 
550 static void tdx_reclaim_td_control_pages(struct kvm *kvm)
551 {
552 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
553 	u64 err;
554 	int i;
555 
556 	/*
557 	 * tdx_mmu_release_hkid() failed to reclaim HKID.  Something went wrong
558 	 * heavily with TDX module.  Give up freeing TD pages.  As the function
559 	 * already warned, don't warn it again.
560 	 */
561 	if (is_hkid_assigned(kvm_tdx))
562 		return;
563 
564 	if (kvm_tdx->td.tdcs_pages) {
565 		for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
566 			if (!kvm_tdx->td.tdcs_pages[i])
567 				continue;
568 
569 			tdx_reclaim_control_page(kvm_tdx->td.tdcs_pages[i]);
570 		}
571 		kfree(kvm_tdx->td.tdcs_pages);
572 		kvm_tdx->td.tdcs_pages = NULL;
573 	}
574 
575 	if (!kvm_tdx->td.tdr_page)
576 		return;
577 
578 	if (__tdx_reclaim_page(kvm_tdx->td.tdr_page))
579 		return;
580 
581 	/*
582 	 * Use a SEAMCALL to ask the TDX module to flush the cache based on the
583 	 * KeyID. TDX module may access TDR while operating on TD (Especially
584 	 * when it is reclaiming TDCS).
585 	 */
586 	err = tdh_phymem_page_wbinvd_tdr(&kvm_tdx->td);
587 	if (KVM_BUG_ON(err, kvm)) {
588 		pr_tdx_error(TDH_PHYMEM_PAGE_WBINVD, err);
589 		return;
590 	}
591 	tdx_clear_page(kvm_tdx->td.tdr_page);
592 
593 	__free_page(kvm_tdx->td.tdr_page);
594 	kvm_tdx->td.tdr_page = NULL;
595 }
596 
597 void tdx_vm_destroy(struct kvm *kvm)
598 {
599 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
600 
601 	tdx_reclaim_td_control_pages(kvm);
602 
603 	kvm_tdx->state = TD_STATE_UNINITIALIZED;
604 }
605 
606 static int tdx_do_tdh_mng_key_config(void *param)
607 {
608 	struct kvm_tdx *kvm_tdx = param;
609 	u64 err;
610 
611 	/* TDX_RND_NO_ENTROPY related retries are handled by sc_retry() */
612 	err = tdh_mng_key_config(&kvm_tdx->td);
613 
614 	if (KVM_BUG_ON(err, &kvm_tdx->kvm)) {
615 		pr_tdx_error(TDH_MNG_KEY_CONFIG, err);
616 		return -EIO;
617 	}
618 
619 	return 0;
620 }
621 
622 int tdx_vm_init(struct kvm *kvm)
623 {
624 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
625 
626 	kvm->arch.has_protected_state = true;
627 	kvm->arch.has_private_mem = true;
628 	kvm->arch.disabled_quirks |= KVM_X86_QUIRK_IGNORE_GUEST_PAT;
629 
630 	/*
631 	 * Because guest TD is protected, VMM can't parse the instruction in TD.
632 	 * Instead, guest uses MMIO hypercall.  For unmodified device driver,
633 	 * #VE needs to be injected for MMIO and #VE handler in TD converts MMIO
634 	 * instruction into MMIO hypercall.
635 	 *
636 	 * SPTE value for MMIO needs to be setup so that #VE is injected into
637 	 * TD instead of triggering EPT MISCONFIG.
638 	 * - RWX=0 so that EPT violation is triggered.
639 	 * - suppress #VE bit is cleared to inject #VE.
640 	 */
641 	kvm_mmu_set_mmio_spte_value(kvm, 0);
642 
643 	/*
644 	 * TDX has its own limit of maximum vCPUs it can support for all
645 	 * TDX guests in addition to KVM_MAX_VCPUS.  TDX module reports
646 	 * such limit via the MAX_VCPU_PER_TD global metadata.  In
647 	 * practice, it reflects the number of logical CPUs that ALL
648 	 * platforms that the TDX module supports can possibly have.
649 	 *
650 	 * Limit TDX guest's maximum vCPUs to the number of logical CPUs
651 	 * the platform has.  Simply forwarding the MAX_VCPU_PER_TD to
652 	 * userspace would result in an unpredictable ABI.
653 	 */
654 	kvm->max_vcpus = min_t(int, kvm->max_vcpus, num_present_cpus());
655 
656 	kvm_tdx->state = TD_STATE_UNINITIALIZED;
657 
658 	return 0;
659 }
660 
661 int tdx_vcpu_create(struct kvm_vcpu *vcpu)
662 {
663 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
664 	struct vcpu_tdx *tdx = to_tdx(vcpu);
665 
666 	if (kvm_tdx->state != TD_STATE_INITIALIZED)
667 		return -EIO;
668 
669 	/*
670 	 * TDX module mandates APICv, which requires an in-kernel local APIC.
671 	 * Disallow an in-kernel I/O APIC, because level-triggered interrupts
672 	 * and thus the I/O APIC as a whole can't be faithfully emulated in KVM.
673 	 */
674 	if (!irqchip_split(vcpu->kvm))
675 		return -EINVAL;
676 
677 	fpstate_set_confidential(&vcpu->arch.guest_fpu);
678 	vcpu->arch.apic->guest_apic_protected = true;
679 	INIT_LIST_HEAD(&tdx->vt.pi_wakeup_list);
680 
681 	vcpu->arch.efer = EFER_SCE | EFER_LME | EFER_LMA | EFER_NX;
682 
683 	vcpu->arch.switch_db_regs = KVM_DEBUGREG_AUTO_SWITCH;
684 	vcpu->arch.cr0_guest_owned_bits = -1ul;
685 	vcpu->arch.cr4_guest_owned_bits = -1ul;
686 
687 	/* KVM can't change TSC offset/multiplier as TDX module manages them. */
688 	vcpu->arch.guest_tsc_protected = true;
689 	vcpu->arch.tsc_offset = kvm_tdx->tsc_offset;
690 	vcpu->arch.l1_tsc_offset = vcpu->arch.tsc_offset;
691 	vcpu->arch.tsc_scaling_ratio = kvm_tdx->tsc_multiplier;
692 	vcpu->arch.l1_tsc_scaling_ratio = kvm_tdx->tsc_multiplier;
693 
694 	vcpu->arch.guest_state_protected =
695 		!(to_kvm_tdx(vcpu->kvm)->attributes & TDX_TD_ATTR_DEBUG);
696 
697 	if ((kvm_tdx->xfam & XFEATURE_MASK_XTILE) == XFEATURE_MASK_XTILE)
698 		vcpu->arch.xfd_no_write_intercept = true;
699 
700 	tdx->vt.pi_desc.nv = POSTED_INTR_VECTOR;
701 	__pi_set_sn(&tdx->vt.pi_desc);
702 
703 	tdx->state = VCPU_TD_STATE_UNINITIALIZED;
704 
705 	return 0;
706 }
707 
708 void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
709 {
710 	struct vcpu_tdx *tdx = to_tdx(vcpu);
711 
712 	vmx_vcpu_pi_load(vcpu, cpu);
713 	if (vcpu->cpu == cpu || !is_hkid_assigned(to_kvm_tdx(vcpu->kvm)))
714 		return;
715 
716 	tdx_flush_vp_on_cpu(vcpu);
717 
718 	KVM_BUG_ON(cpu != raw_smp_processor_id(), vcpu->kvm);
719 	local_irq_disable();
720 	/*
721 	 * Pairs with the smp_wmb() in tdx_disassociate_vp() to ensure
722 	 * vcpu->cpu is read before tdx->cpu_list.
723 	 */
724 	smp_rmb();
725 
726 	list_add(&tdx->cpu_list, &per_cpu(associated_tdvcpus, cpu));
727 	local_irq_enable();
728 }
729 
730 bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu)
731 {
732 	/*
733 	 * KVM can't get the interrupt status of TDX guest and it assumes
734 	 * interrupt is always allowed unless TDX guest calls TDVMCALL with HLT,
735 	 * which passes the interrupt blocked flag.
736 	 */
737 	return vmx_get_exit_reason(vcpu).basic != EXIT_REASON_HLT ||
738 	       !to_tdx(vcpu)->vp_enter_args.r12;
739 }
740 
741 bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu)
742 {
743 	u64 vcpu_state_details;
744 
745 	if (pi_has_pending_interrupt(vcpu))
746 		return true;
747 
748 	/*
749 	 * Only check RVI pending for HALTED case with IRQ enabled.
750 	 * For non-HLT cases, KVM doesn't care about STI/SS shadows.  And if the
751 	 * interrupt was pending before TD exit, then it _must_ be blocked,
752 	 * otherwise the interrupt would have been serviced at the instruction
753 	 * boundary.
754 	 */
755 	if (vmx_get_exit_reason(vcpu).basic != EXIT_REASON_HLT ||
756 	    to_tdx(vcpu)->vp_enter_args.r12)
757 		return false;
758 
759 	vcpu_state_details =
760 		td_state_non_arch_read64(to_tdx(vcpu), TD_VCPU_STATE_DETAILS_NON_ARCH);
761 
762 	return tdx_vcpu_state_details_intr_pending(vcpu_state_details);
763 }
764 
765 /*
766  * Compared to vmx_prepare_switch_to_guest(), there is not much to do
767  * as SEAMCALL/SEAMRET calls take care of most of save and restore.
768  */
769 void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
770 {
771 	struct vcpu_vt *vt = to_vt(vcpu);
772 
773 	if (vt->guest_state_loaded)
774 		return;
775 
776 	if (likely(is_64bit_mm(current->mm)))
777 		vt->msr_host_kernel_gs_base = current->thread.gsbase;
778 	else
779 		vt->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
780 
781 	vt->host_debugctlmsr = get_debugctlmsr();
782 
783 	vt->guest_state_loaded = true;
784 }
785 
786 struct tdx_uret_msr {
787 	u32 msr;
788 	unsigned int slot;
789 	u64 defval;
790 };
791 
792 static struct tdx_uret_msr tdx_uret_msrs[] = {
793 	{.msr = MSR_SYSCALL_MASK, .defval = 0x20200 },
794 	{.msr = MSR_STAR,},
795 	{.msr = MSR_LSTAR,},
796 	{.msr = MSR_TSC_AUX,},
797 };
798 
799 static void tdx_user_return_msr_update_cache(void)
800 {
801 	int i;
802 
803 	for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++)
804 		kvm_user_return_msr_update_cache(tdx_uret_msrs[i].slot,
805 						 tdx_uret_msrs[i].defval);
806 }
807 
808 static void tdx_prepare_switch_to_host(struct kvm_vcpu *vcpu)
809 {
810 	struct vcpu_vt *vt = to_vt(vcpu);
811 	struct vcpu_tdx *tdx = to_tdx(vcpu);
812 
813 	if (!vt->guest_state_loaded)
814 		return;
815 
816 	++vcpu->stat.host_state_reload;
817 	wrmsrl(MSR_KERNEL_GS_BASE, vt->msr_host_kernel_gs_base);
818 
819 	if (tdx->guest_entered) {
820 		tdx_user_return_msr_update_cache();
821 		tdx->guest_entered = false;
822 	}
823 
824 	vt->guest_state_loaded = false;
825 }
826 
827 void tdx_vcpu_put(struct kvm_vcpu *vcpu)
828 {
829 	vmx_vcpu_pi_put(vcpu);
830 	tdx_prepare_switch_to_host(vcpu);
831 }
832 
833 void tdx_vcpu_free(struct kvm_vcpu *vcpu)
834 {
835 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
836 	struct vcpu_tdx *tdx = to_tdx(vcpu);
837 	int i;
838 
839 	/*
840 	 * It is not possible to reclaim pages while hkid is assigned. It might
841 	 * be assigned if:
842 	 * 1. the TD VM is being destroyed but freeing hkid failed, in which
843 	 * case the pages are leaked
844 	 * 2. TD VCPU creation failed and this on the error path, in which case
845 	 * there is nothing to do anyway
846 	 */
847 	if (is_hkid_assigned(kvm_tdx))
848 		return;
849 
850 	if (tdx->vp.tdcx_pages) {
851 		for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
852 			if (tdx->vp.tdcx_pages[i])
853 				tdx_reclaim_control_page(tdx->vp.tdcx_pages[i]);
854 		}
855 		kfree(tdx->vp.tdcx_pages);
856 		tdx->vp.tdcx_pages = NULL;
857 	}
858 	if (tdx->vp.tdvpr_page) {
859 		tdx_reclaim_control_page(tdx->vp.tdvpr_page);
860 		tdx->vp.tdvpr_page = 0;
861 	}
862 
863 	tdx->state = VCPU_TD_STATE_UNINITIALIZED;
864 }
865 
866 int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu)
867 {
868 	if (unlikely(to_tdx(vcpu)->state != VCPU_TD_STATE_INITIALIZED ||
869 		     to_kvm_tdx(vcpu->kvm)->state != TD_STATE_RUNNABLE))
870 		return -EINVAL;
871 
872 	return 1;
873 }
874 
875 static __always_inline u32 tdcall_to_vmx_exit_reason(struct kvm_vcpu *vcpu)
876 {
877 	switch (tdvmcall_leaf(vcpu)) {
878 	case EXIT_REASON_CPUID:
879 	case EXIT_REASON_HLT:
880 	case EXIT_REASON_IO_INSTRUCTION:
881 	case EXIT_REASON_MSR_READ:
882 	case EXIT_REASON_MSR_WRITE:
883 		return tdvmcall_leaf(vcpu);
884 	case EXIT_REASON_EPT_VIOLATION:
885 		return EXIT_REASON_EPT_MISCONFIG;
886 	default:
887 		break;
888 	}
889 
890 	return EXIT_REASON_TDCALL;
891 }
892 
893 static __always_inline u32 tdx_to_vmx_exit_reason(struct kvm_vcpu *vcpu)
894 {
895 	struct vcpu_tdx *tdx = to_tdx(vcpu);
896 	u32 exit_reason;
897 
898 	switch (tdx->vp_enter_ret & TDX_SEAMCALL_STATUS_MASK) {
899 	case TDX_SUCCESS:
900 	case TDX_NON_RECOVERABLE_VCPU:
901 	case TDX_NON_RECOVERABLE_TD:
902 	case TDX_NON_RECOVERABLE_TD_NON_ACCESSIBLE:
903 	case TDX_NON_RECOVERABLE_TD_WRONG_APIC_MODE:
904 		break;
905 	default:
906 		return -1u;
907 	}
908 
909 	exit_reason = tdx->vp_enter_ret;
910 
911 	switch (exit_reason) {
912 	case EXIT_REASON_TDCALL:
913 		if (tdvmcall_exit_type(vcpu))
914 			return EXIT_REASON_VMCALL;
915 
916 		return tdcall_to_vmx_exit_reason(vcpu);
917 	case EXIT_REASON_EPT_MISCONFIG:
918 		/*
919 		 * Defer KVM_BUG_ON() until tdx_handle_exit() because this is in
920 		 * non-instrumentable code with interrupts disabled.
921 		 */
922 		return -1u;
923 	default:
924 		break;
925 	}
926 
927 	return exit_reason;
928 }
929 
930 static noinstr void tdx_vcpu_enter_exit(struct kvm_vcpu *vcpu)
931 {
932 	struct vcpu_tdx *tdx = to_tdx(vcpu);
933 	struct vcpu_vt *vt = to_vt(vcpu);
934 
935 	guest_state_enter_irqoff();
936 
937 	tdx->vp_enter_ret = tdh_vp_enter(&tdx->vp, &tdx->vp_enter_args);
938 
939 	vt->exit_reason.full = tdx_to_vmx_exit_reason(vcpu);
940 
941 	vt->exit_qualification = tdx->vp_enter_args.rcx;
942 	tdx->ext_exit_qualification = tdx->vp_enter_args.rdx;
943 	tdx->exit_gpa = tdx->vp_enter_args.r8;
944 	vt->exit_intr_info = tdx->vp_enter_args.r9;
945 
946 	vmx_handle_nmi(vcpu);
947 
948 	guest_state_exit_irqoff();
949 }
950 
951 static bool tdx_failed_vmentry(struct kvm_vcpu *vcpu)
952 {
953 	return vmx_get_exit_reason(vcpu).failed_vmentry &&
954 	       vmx_get_exit_reason(vcpu).full != -1u;
955 }
956 
957 static fastpath_t tdx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
958 {
959 	u64 vp_enter_ret = to_tdx(vcpu)->vp_enter_ret;
960 
961 	/*
962 	 * TDX_OPERAND_BUSY could be returned for SEPT due to 0-step mitigation
963 	 * or for TD EPOCH due to contention with TDH.MEM.TRACK on TDH.VP.ENTER.
964 	 *
965 	 * When KVM requests KVM_REQ_OUTSIDE_GUEST_MODE, which has both
966 	 * KVM_REQUEST_WAIT and KVM_REQUEST_NO_ACTION set, it requires target
967 	 * vCPUs leaving fastpath so that interrupt can be enabled to ensure the
968 	 * IPIs can be delivered. Return EXIT_FASTPATH_EXIT_HANDLED instead of
969 	 * EXIT_FASTPATH_REENTER_GUEST to exit fastpath, otherwise, the
970 	 * requester may be blocked endlessly.
971 	 */
972 	if (unlikely(tdx_operand_busy(vp_enter_ret)))
973 		return EXIT_FASTPATH_EXIT_HANDLED;
974 
975 	return EXIT_FASTPATH_NONE;
976 }
977 
978 #define TDX_REGS_AVAIL_SET	(BIT_ULL(VCPU_EXREG_EXIT_INFO_1) | \
979 				 BIT_ULL(VCPU_EXREG_EXIT_INFO_2) | \
980 				 BIT_ULL(VCPU_REGS_RAX) | \
981 				 BIT_ULL(VCPU_REGS_RBX) | \
982 				 BIT_ULL(VCPU_REGS_RCX) | \
983 				 BIT_ULL(VCPU_REGS_RDX) | \
984 				 BIT_ULL(VCPU_REGS_RBP) | \
985 				 BIT_ULL(VCPU_REGS_RSI) | \
986 				 BIT_ULL(VCPU_REGS_RDI) | \
987 				 BIT_ULL(VCPU_REGS_R8) | \
988 				 BIT_ULL(VCPU_REGS_R9) | \
989 				 BIT_ULL(VCPU_REGS_R10) | \
990 				 BIT_ULL(VCPU_REGS_R11) | \
991 				 BIT_ULL(VCPU_REGS_R12) | \
992 				 BIT_ULL(VCPU_REGS_R13) | \
993 				 BIT_ULL(VCPU_REGS_R14) | \
994 				 BIT_ULL(VCPU_REGS_R15))
995 
996 static void tdx_load_host_xsave_state(struct kvm_vcpu *vcpu)
997 {
998 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
999 
1000 	/*
1001 	 * All TDX hosts support PKRU; but even if they didn't,
1002 	 * vcpu->arch.host_pkru would be 0 and the wrpkru would be
1003 	 * skipped.
1004 	 */
1005 	if (vcpu->arch.host_pkru != 0)
1006 		wrpkru(vcpu->arch.host_pkru);
1007 
1008 	if (kvm_host.xcr0 != (kvm_tdx->xfam & kvm_caps.supported_xcr0))
1009 		xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0);
1010 
1011 	/*
1012 	 * Likewise, even if a TDX hosts didn't support XSS both arms of
1013 	 * the comparison would be 0 and the wrmsrl would be skipped.
1014 	 */
1015 	if (kvm_host.xss != (kvm_tdx->xfam & kvm_caps.supported_xss))
1016 		wrmsrl(MSR_IA32_XSS, kvm_host.xss);
1017 }
1018 
1019 #define TDX_DEBUGCTL_PRESERVED (DEBUGCTLMSR_BTF | \
1020 				DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI | \
1021 				DEBUGCTLMSR_FREEZE_IN_SMM)
1022 
1023 fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
1024 {
1025 	struct vcpu_tdx *tdx = to_tdx(vcpu);
1026 	struct vcpu_vt *vt = to_vt(vcpu);
1027 
1028 	/*
1029 	 * force_immediate_exit requires vCPU entering for events injection with
1030 	 * an immediately exit followed. But The TDX module doesn't guarantee
1031 	 * entry, it's already possible for KVM to _think_ it completely entry
1032 	 * to the guest without actually having done so.
1033 	 * Since KVM never needs to force an immediate exit for TDX, and can't
1034 	 * do direct injection, just warn on force_immediate_exit.
1035 	 */
1036 	WARN_ON_ONCE(force_immediate_exit);
1037 
1038 	/*
1039 	 * Wait until retry of SEPT-zap-related SEAMCALL completes before
1040 	 * allowing vCPU entry to avoid contention with tdh_vp_enter() and
1041 	 * TDCALLs.
1042 	 */
1043 	if (unlikely(READ_ONCE(to_kvm_tdx(vcpu->kvm)->wait_for_sept_zap)))
1044 		return EXIT_FASTPATH_EXIT_HANDLED;
1045 
1046 	trace_kvm_entry(vcpu, force_immediate_exit);
1047 
1048 	if (pi_test_on(&vt->pi_desc)) {
1049 		apic->send_IPI_self(POSTED_INTR_VECTOR);
1050 
1051 		if (pi_test_pir(kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVTT) &
1052 			       APIC_VECTOR_MASK, &vt->pi_desc))
1053 			kvm_wait_lapic_expire(vcpu);
1054 	}
1055 
1056 	tdx_vcpu_enter_exit(vcpu);
1057 
1058 	if (vt->host_debugctlmsr & ~TDX_DEBUGCTL_PRESERVED)
1059 		update_debugctlmsr(vt->host_debugctlmsr);
1060 
1061 	tdx_load_host_xsave_state(vcpu);
1062 	tdx->guest_entered = true;
1063 
1064 	vcpu->arch.regs_avail &= TDX_REGS_AVAIL_SET;
1065 
1066 	if (unlikely(tdx->vp_enter_ret == EXIT_REASON_EPT_MISCONFIG))
1067 		return EXIT_FASTPATH_NONE;
1068 
1069 	if (unlikely((tdx->vp_enter_ret & TDX_SW_ERROR) == TDX_SW_ERROR))
1070 		return EXIT_FASTPATH_NONE;
1071 
1072 	if (unlikely(vmx_get_exit_reason(vcpu).basic == EXIT_REASON_MCE_DURING_VMENTRY))
1073 		kvm_machine_check();
1074 
1075 	trace_kvm_exit(vcpu, KVM_ISA_VMX);
1076 
1077 	if (unlikely(tdx_failed_vmentry(vcpu)))
1078 		return EXIT_FASTPATH_NONE;
1079 
1080 	return tdx_exit_handlers_fastpath(vcpu);
1081 }
1082 
1083 void tdx_inject_nmi(struct kvm_vcpu *vcpu)
1084 {
1085 	++vcpu->stat.nmi_injections;
1086 	td_management_write8(to_tdx(vcpu), TD_VCPU_PEND_NMI, 1);
1087 	/*
1088 	 * From KVM's perspective, NMI injection is completed right after
1089 	 * writing to PEND_NMI.  KVM doesn't care whether an NMI is injected by
1090 	 * the TDX module or not.
1091 	 */
1092 	vcpu->arch.nmi_injected = false;
1093 	/*
1094 	 * TDX doesn't support KVM to request NMI window exit.  If there is
1095 	 * still a pending vNMI, KVM is not able to inject it along with the
1096 	 * one pending in TDX module in a back-to-back way.  Since the previous
1097 	 * vNMI is still pending in TDX module, i.e. it has not been delivered
1098 	 * to TDX guest yet, it's OK to collapse the pending vNMI into the
1099 	 * previous one.  The guest is expected to handle all the NMI sources
1100 	 * when handling the first vNMI.
1101 	 */
1102 	vcpu->arch.nmi_pending = 0;
1103 }
1104 
1105 static int tdx_handle_exception_nmi(struct kvm_vcpu *vcpu)
1106 {
1107 	u32 intr_info = vmx_get_intr_info(vcpu);
1108 
1109 	/*
1110 	 * Machine checks are handled by handle_exception_irqoff(), or by
1111 	 * tdx_handle_exit() with TDX_NON_RECOVERABLE set if a #MC occurs on
1112 	 * VM-Entry.  NMIs are handled by tdx_vcpu_enter_exit().
1113 	 */
1114 	if (is_nmi(intr_info) || is_machine_check(intr_info))
1115 		return 1;
1116 
1117 	vcpu->run->exit_reason = KVM_EXIT_EXCEPTION;
1118 	vcpu->run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
1119 	vcpu->run->ex.error_code = 0;
1120 
1121 	return 0;
1122 }
1123 
1124 static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
1125 {
1126 	tdvmcall_set_return_code(vcpu, vcpu->run->hypercall.ret);
1127 	return 1;
1128 }
1129 
1130 static int tdx_emulate_vmcall(struct kvm_vcpu *vcpu)
1131 {
1132 	kvm_rax_write(vcpu, to_tdx(vcpu)->vp_enter_args.r10);
1133 	kvm_rbx_write(vcpu, to_tdx(vcpu)->vp_enter_args.r11);
1134 	kvm_rcx_write(vcpu, to_tdx(vcpu)->vp_enter_args.r12);
1135 	kvm_rdx_write(vcpu, to_tdx(vcpu)->vp_enter_args.r13);
1136 	kvm_rsi_write(vcpu, to_tdx(vcpu)->vp_enter_args.r14);
1137 
1138 	return __kvm_emulate_hypercall(vcpu, 0, complete_hypercall_exit);
1139 }
1140 
1141 /*
1142  * Split into chunks and check interrupt pending between chunks.  This allows
1143  * for timely injection of interrupts to prevent issues with guest lockup
1144  * detection.
1145  */
1146 #define TDX_MAP_GPA_MAX_LEN (2 * 1024 * 1024)
1147 static void __tdx_map_gpa(struct vcpu_tdx *tdx);
1148 
1149 static int tdx_complete_vmcall_map_gpa(struct kvm_vcpu *vcpu)
1150 {
1151 	struct vcpu_tdx *tdx = to_tdx(vcpu);
1152 
1153 	if (vcpu->run->hypercall.ret) {
1154 		tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND);
1155 		tdx->vp_enter_args.r11 = tdx->map_gpa_next;
1156 		return 1;
1157 	}
1158 
1159 	tdx->map_gpa_next += TDX_MAP_GPA_MAX_LEN;
1160 	if (tdx->map_gpa_next >= tdx->map_gpa_end)
1161 		return 1;
1162 
1163 	/*
1164 	 * Stop processing the remaining part if there is a pending interrupt,
1165 	 * which could be qualified to deliver.  Skip checking pending RVI for
1166 	 * TDVMCALL_MAP_GPA, see comments in tdx_protected_apic_has_interrupt().
1167 	 */
1168 	if (kvm_vcpu_has_events(vcpu)) {
1169 		tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_RETRY);
1170 		tdx->vp_enter_args.r11 = tdx->map_gpa_next;
1171 		return 1;
1172 	}
1173 
1174 	__tdx_map_gpa(tdx);
1175 	return 0;
1176 }
1177 
1178 static void __tdx_map_gpa(struct vcpu_tdx *tdx)
1179 {
1180 	u64 gpa = tdx->map_gpa_next;
1181 	u64 size = tdx->map_gpa_end - tdx->map_gpa_next;
1182 
1183 	if (size > TDX_MAP_GPA_MAX_LEN)
1184 		size = TDX_MAP_GPA_MAX_LEN;
1185 
1186 	tdx->vcpu.run->exit_reason       = KVM_EXIT_HYPERCALL;
1187 	tdx->vcpu.run->hypercall.nr      = KVM_HC_MAP_GPA_RANGE;
1188 	/*
1189 	 * In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2)
1190 	 * assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that
1191 	 * it was always zero on KVM_EXIT_HYPERCALL.  Since KVM is now overwriting
1192 	 * vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU.
1193 	 */
1194 	tdx->vcpu.run->hypercall.ret = 0;
1195 	tdx->vcpu.run->hypercall.args[0] = gpa & ~gfn_to_gpa(kvm_gfn_direct_bits(tdx->vcpu.kvm));
1196 	tdx->vcpu.run->hypercall.args[1] = size / PAGE_SIZE;
1197 	tdx->vcpu.run->hypercall.args[2] = vt_is_tdx_private_gpa(tdx->vcpu.kvm, gpa) ?
1198 					   KVM_MAP_GPA_RANGE_ENCRYPTED :
1199 					   KVM_MAP_GPA_RANGE_DECRYPTED;
1200 	tdx->vcpu.run->hypercall.flags   = KVM_EXIT_HYPERCALL_LONG_MODE;
1201 
1202 	tdx->vcpu.arch.complete_userspace_io = tdx_complete_vmcall_map_gpa;
1203 }
1204 
1205 static int tdx_map_gpa(struct kvm_vcpu *vcpu)
1206 {
1207 	struct vcpu_tdx *tdx = to_tdx(vcpu);
1208 	u64 gpa = tdx->vp_enter_args.r12;
1209 	u64 size = tdx->vp_enter_args.r13;
1210 	u64 ret;
1211 
1212 	/*
1213 	 * Converting TDVMCALL_MAP_GPA to KVM_HC_MAP_GPA_RANGE requires
1214 	 * userspace to enable KVM_CAP_EXIT_HYPERCALL with KVM_HC_MAP_GPA_RANGE
1215 	 * bit set.  If not, the error code is not defined in GHCI for TDX, use
1216 	 * TDVMCALL_STATUS_INVALID_OPERAND for this case.
1217 	 */
1218 	if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE)) {
1219 		ret = TDVMCALL_STATUS_INVALID_OPERAND;
1220 		goto error;
1221 	}
1222 
1223 	if (gpa + size <= gpa || !kvm_vcpu_is_legal_gpa(vcpu, gpa) ||
1224 	    !kvm_vcpu_is_legal_gpa(vcpu, gpa + size - 1) ||
1225 	    (vt_is_tdx_private_gpa(vcpu->kvm, gpa) !=
1226 	     vt_is_tdx_private_gpa(vcpu->kvm, gpa + size - 1))) {
1227 		ret = TDVMCALL_STATUS_INVALID_OPERAND;
1228 		goto error;
1229 	}
1230 
1231 	if (!PAGE_ALIGNED(gpa) || !PAGE_ALIGNED(size)) {
1232 		ret = TDVMCALL_STATUS_ALIGN_ERROR;
1233 		goto error;
1234 	}
1235 
1236 	tdx->map_gpa_end = gpa + size;
1237 	tdx->map_gpa_next = gpa;
1238 
1239 	__tdx_map_gpa(tdx);
1240 	return 0;
1241 
1242 error:
1243 	tdvmcall_set_return_code(vcpu, ret);
1244 	tdx->vp_enter_args.r11 = gpa;
1245 	return 1;
1246 }
1247 
1248 static int tdx_report_fatal_error(struct kvm_vcpu *vcpu)
1249 {
1250 	struct vcpu_tdx *tdx = to_tdx(vcpu);
1251 	u64 *regs = vcpu->run->system_event.data;
1252 	u64 *module_regs = &tdx->vp_enter_args.r8;
1253 	int index = VCPU_REGS_RAX;
1254 
1255 	vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
1256 	vcpu->run->system_event.type = KVM_SYSTEM_EVENT_TDX_FATAL;
1257 	vcpu->run->system_event.ndata = 16;
1258 
1259 	/* Dump 16 general-purpose registers to userspace in ascending order. */
1260 	regs[index++] = tdx->vp_enter_ret;
1261 	regs[index++] = tdx->vp_enter_args.rcx;
1262 	regs[index++] = tdx->vp_enter_args.rdx;
1263 	regs[index++] = tdx->vp_enter_args.rbx;
1264 	regs[index++] = 0;
1265 	regs[index++] = 0;
1266 	regs[index++] = tdx->vp_enter_args.rsi;
1267 	regs[index] = tdx->vp_enter_args.rdi;
1268 	for (index = 0; index < 8; index++)
1269 		regs[VCPU_REGS_R8 + index] = module_regs[index];
1270 
1271 	return 0;
1272 }
1273 
1274 static int tdx_emulate_cpuid(struct kvm_vcpu *vcpu)
1275 {
1276 	u32 eax, ebx, ecx, edx;
1277 	struct vcpu_tdx *tdx = to_tdx(vcpu);
1278 
1279 	/* EAX and ECX for cpuid is stored in R12 and R13. */
1280 	eax = tdx->vp_enter_args.r12;
1281 	ecx = tdx->vp_enter_args.r13;
1282 
1283 	kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
1284 
1285 	tdx->vp_enter_args.r12 = eax;
1286 	tdx->vp_enter_args.r13 = ebx;
1287 	tdx->vp_enter_args.r14 = ecx;
1288 	tdx->vp_enter_args.r15 = edx;
1289 
1290 	return 1;
1291 }
1292 
1293 static int tdx_complete_pio_out(struct kvm_vcpu *vcpu)
1294 {
1295 	vcpu->arch.pio.count = 0;
1296 	return 1;
1297 }
1298 
1299 static int tdx_complete_pio_in(struct kvm_vcpu *vcpu)
1300 {
1301 	struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
1302 	unsigned long val = 0;
1303 	int ret;
1304 
1305 	ret = ctxt->ops->pio_in_emulated(ctxt, vcpu->arch.pio.size,
1306 					 vcpu->arch.pio.port, &val, 1);
1307 
1308 	WARN_ON_ONCE(!ret);
1309 
1310 	tdvmcall_set_return_val(vcpu, val);
1311 
1312 	return 1;
1313 }
1314 
1315 static int tdx_emulate_io(struct kvm_vcpu *vcpu)
1316 {
1317 	struct vcpu_tdx *tdx = to_tdx(vcpu);
1318 	struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
1319 	unsigned long val = 0;
1320 	unsigned int port;
1321 	u64 size, write;
1322 	int ret;
1323 
1324 	++vcpu->stat.io_exits;
1325 
1326 	size = tdx->vp_enter_args.r12;
1327 	write = tdx->vp_enter_args.r13;
1328 	port = tdx->vp_enter_args.r14;
1329 
1330 	if ((write != 0 && write != 1) || (size != 1 && size != 2 && size != 4)) {
1331 		tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND);
1332 		return 1;
1333 	}
1334 
1335 	if (write) {
1336 		val = tdx->vp_enter_args.r15;
1337 		ret = ctxt->ops->pio_out_emulated(ctxt, size, port, &val, 1);
1338 	} else {
1339 		ret = ctxt->ops->pio_in_emulated(ctxt, size, port, &val, 1);
1340 	}
1341 
1342 	if (!ret)
1343 		vcpu->arch.complete_userspace_io = write ? tdx_complete_pio_out :
1344 							   tdx_complete_pio_in;
1345 	else if (!write)
1346 		tdvmcall_set_return_val(vcpu, val);
1347 
1348 	return ret;
1349 }
1350 
1351 static int tdx_complete_mmio_read(struct kvm_vcpu *vcpu)
1352 {
1353 	unsigned long val = 0;
1354 	gpa_t gpa;
1355 	int size;
1356 
1357 	gpa = vcpu->mmio_fragments[0].gpa;
1358 	size = vcpu->mmio_fragments[0].len;
1359 
1360 	memcpy(&val, vcpu->run->mmio.data, size);
1361 	tdvmcall_set_return_val(vcpu, val);
1362 	trace_kvm_mmio(KVM_TRACE_MMIO_READ, size, gpa, &val);
1363 	return 1;
1364 }
1365 
1366 static inline int tdx_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, int size,
1367 				 unsigned long val)
1368 {
1369 	if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
1370 		trace_kvm_fast_mmio(gpa);
1371 		return 0;
1372 	}
1373 
1374 	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, size, gpa, &val);
1375 	if (kvm_io_bus_write(vcpu, KVM_MMIO_BUS, gpa, size, &val))
1376 		return -EOPNOTSUPP;
1377 
1378 	return 0;
1379 }
1380 
1381 static inline int tdx_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, int size)
1382 {
1383 	unsigned long val;
1384 
1385 	if (kvm_io_bus_read(vcpu, KVM_MMIO_BUS, gpa, size, &val))
1386 		return -EOPNOTSUPP;
1387 
1388 	tdvmcall_set_return_val(vcpu, val);
1389 	trace_kvm_mmio(KVM_TRACE_MMIO_READ, size, gpa, &val);
1390 	return 0;
1391 }
1392 
1393 static int tdx_emulate_mmio(struct kvm_vcpu *vcpu)
1394 {
1395 	struct vcpu_tdx *tdx = to_tdx(vcpu);
1396 	int size, write, r;
1397 	unsigned long val;
1398 	gpa_t gpa;
1399 
1400 	size = tdx->vp_enter_args.r12;
1401 	write = tdx->vp_enter_args.r13;
1402 	gpa = tdx->vp_enter_args.r14;
1403 	val = write ? tdx->vp_enter_args.r15 : 0;
1404 
1405 	if (size != 1 && size != 2 && size != 4 && size != 8)
1406 		goto error;
1407 	if (write != 0 && write != 1)
1408 		goto error;
1409 
1410 	/*
1411 	 * TDG.VP.VMCALL<MMIO> allows only shared GPA, it makes no sense to
1412 	 * do MMIO emulation for private GPA.
1413 	 */
1414 	if (vt_is_tdx_private_gpa(vcpu->kvm, gpa) ||
1415 	    vt_is_tdx_private_gpa(vcpu->kvm, gpa + size - 1))
1416 		goto error;
1417 
1418 	gpa = gpa & ~gfn_to_gpa(kvm_gfn_direct_bits(vcpu->kvm));
1419 
1420 	if (write)
1421 		r = tdx_mmio_write(vcpu, gpa, size, val);
1422 	else
1423 		r = tdx_mmio_read(vcpu, gpa, size);
1424 	if (!r)
1425 		/* Kernel completed device emulation. */
1426 		return 1;
1427 
1428 	/* Request the device emulation to userspace device model. */
1429 	vcpu->mmio_is_write = write;
1430 	if (!write)
1431 		vcpu->arch.complete_userspace_io = tdx_complete_mmio_read;
1432 
1433 	vcpu->run->mmio.phys_addr = gpa;
1434 	vcpu->run->mmio.len = size;
1435 	vcpu->run->mmio.is_write = write;
1436 	vcpu->run->exit_reason = KVM_EXIT_MMIO;
1437 
1438 	if (write) {
1439 		memcpy(vcpu->run->mmio.data, &val, size);
1440 	} else {
1441 		vcpu->mmio_fragments[0].gpa = gpa;
1442 		vcpu->mmio_fragments[0].len = size;
1443 		trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, size, gpa, NULL);
1444 	}
1445 	return 0;
1446 
1447 error:
1448 	tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND);
1449 	return 1;
1450 }
1451 
1452 static int tdx_get_td_vm_call_info(struct kvm_vcpu *vcpu)
1453 {
1454 	struct vcpu_tdx *tdx = to_tdx(vcpu);
1455 
1456 	if (tdx->vp_enter_args.r12)
1457 		tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND);
1458 	else {
1459 		tdx->vp_enter_args.r11 = 0;
1460 		tdx->vp_enter_args.r13 = 0;
1461 		tdx->vp_enter_args.r14 = 0;
1462 	}
1463 	return 1;
1464 }
1465 
1466 static int handle_tdvmcall(struct kvm_vcpu *vcpu)
1467 {
1468 	switch (tdvmcall_leaf(vcpu)) {
1469 	case TDVMCALL_MAP_GPA:
1470 		return tdx_map_gpa(vcpu);
1471 	case TDVMCALL_REPORT_FATAL_ERROR:
1472 		return tdx_report_fatal_error(vcpu);
1473 	case TDVMCALL_GET_TD_VM_CALL_INFO:
1474 		return tdx_get_td_vm_call_info(vcpu);
1475 	default:
1476 		break;
1477 	}
1478 
1479 	tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND);
1480 	return 1;
1481 }
1482 
1483 void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int pgd_level)
1484 {
1485 	u64 shared_bit = (pgd_level == 5) ? TDX_SHARED_BIT_PWL_5 :
1486 			  TDX_SHARED_BIT_PWL_4;
1487 
1488 	if (KVM_BUG_ON(shared_bit != kvm_gfn_direct_bits(vcpu->kvm), vcpu->kvm))
1489 		return;
1490 
1491 	td_vmcs_write64(to_tdx(vcpu), SHARED_EPT_POINTER, root_hpa);
1492 }
1493 
1494 static void tdx_unpin(struct kvm *kvm, struct page *page)
1495 {
1496 	put_page(page);
1497 }
1498 
1499 static int tdx_mem_page_aug(struct kvm *kvm, gfn_t gfn,
1500 			    enum pg_level level, struct page *page)
1501 {
1502 	int tdx_level = pg_level_to_tdx_sept_level(level);
1503 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
1504 	gpa_t gpa = gfn_to_gpa(gfn);
1505 	u64 entry, level_state;
1506 	u64 err;
1507 
1508 	err = tdh_mem_page_aug(&kvm_tdx->td, gpa, tdx_level, page, &entry, &level_state);
1509 	if (unlikely(tdx_operand_busy(err))) {
1510 		tdx_unpin(kvm, page);
1511 		return -EBUSY;
1512 	}
1513 
1514 	if (KVM_BUG_ON(err, kvm)) {
1515 		pr_tdx_error_2(TDH_MEM_PAGE_AUG, err, entry, level_state);
1516 		tdx_unpin(kvm, page);
1517 		return -EIO;
1518 	}
1519 
1520 	return 0;
1521 }
1522 
1523 /*
1524  * KVM_TDX_INIT_MEM_REGION calls kvm_gmem_populate() to map guest pages; the
1525  * callback tdx_gmem_post_populate() then maps pages into private memory.
1526  * through the a seamcall TDH.MEM.PAGE.ADD().  The SEAMCALL also requires the
1527  * private EPT structures for the page to have been built before, which is
1528  * done via kvm_tdp_map_page(). nr_premapped counts the number of pages that
1529  * were added to the EPT structures but not added with TDH.MEM.PAGE.ADD().
1530  * The counter has to be zero on KVM_TDX_FINALIZE_VM, to ensure that there
1531  * are no half-initialized shared EPT pages.
1532  */
1533 static int tdx_mem_page_record_premap_cnt(struct kvm *kvm, gfn_t gfn,
1534 					  enum pg_level level, kvm_pfn_t pfn)
1535 {
1536 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
1537 
1538 	if (KVM_BUG_ON(kvm->arch.pre_fault_allowed, kvm))
1539 		return -EINVAL;
1540 
1541 	/* nr_premapped will be decreased when tdh_mem_page_add() is called. */
1542 	atomic64_inc(&kvm_tdx->nr_premapped);
1543 	return 0;
1544 }
1545 
1546 int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
1547 			      enum pg_level level, kvm_pfn_t pfn)
1548 {
1549 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
1550 	struct page *page = pfn_to_page(pfn);
1551 
1552 	/* TODO: handle large pages. */
1553 	if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
1554 		return -EINVAL;
1555 
1556 	/*
1557 	 * Because guest_memfd doesn't support page migration with
1558 	 * a_ops->migrate_folio (yet), no callback is triggered for KVM on page
1559 	 * migration.  Until guest_memfd supports page migration, prevent page
1560 	 * migration.
1561 	 * TODO: Once guest_memfd introduces callback on page migration,
1562 	 * implement it and remove get_page/put_page().
1563 	 */
1564 	get_page(page);
1565 
1566 	/*
1567 	 * Read 'pre_fault_allowed' before 'kvm_tdx->state'; see matching
1568 	 * barrier in tdx_td_finalize().
1569 	 */
1570 	smp_rmb();
1571 	if (likely(kvm_tdx->state == TD_STATE_RUNNABLE))
1572 		return tdx_mem_page_aug(kvm, gfn, level, page);
1573 
1574 	return tdx_mem_page_record_premap_cnt(kvm, gfn, level, pfn);
1575 }
1576 
1577 static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
1578 				      enum pg_level level, struct page *page)
1579 {
1580 	int tdx_level = pg_level_to_tdx_sept_level(level);
1581 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
1582 	gpa_t gpa = gfn_to_gpa(gfn);
1583 	u64 err, entry, level_state;
1584 
1585 	/* TODO: handle large pages. */
1586 	if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
1587 		return -EINVAL;
1588 
1589 	if (KVM_BUG_ON(!is_hkid_assigned(kvm_tdx), kvm))
1590 		return -EINVAL;
1591 
1592 	/*
1593 	 * When zapping private page, write lock is held. So no race condition
1594 	 * with other vcpu sept operation.
1595 	 * Race with TDH.VP.ENTER due to (0-step mitigation) and Guest TDCALLs.
1596 	 */
1597 	err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
1598 				  &level_state);
1599 
1600 	if (unlikely(tdx_operand_busy(err))) {
1601 		/*
1602 		 * The second retry is expected to succeed after kicking off all
1603 		 * other vCPUs and prevent them from invoking TDH.VP.ENTER.
1604 		 */
1605 		tdx_no_vcpus_enter_start(kvm);
1606 		err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
1607 					  &level_state);
1608 		tdx_no_vcpus_enter_stop(kvm);
1609 	}
1610 
1611 	if (KVM_BUG_ON(err, kvm)) {
1612 		pr_tdx_error_2(TDH_MEM_PAGE_REMOVE, err, entry, level_state);
1613 		return -EIO;
1614 	}
1615 
1616 	err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, page);
1617 
1618 	if (KVM_BUG_ON(err, kvm)) {
1619 		pr_tdx_error(TDH_PHYMEM_PAGE_WBINVD, err);
1620 		return -EIO;
1621 	}
1622 	tdx_clear_page(page);
1623 	tdx_unpin(kvm, page);
1624 	return 0;
1625 }
1626 
1627 int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
1628 			      enum pg_level level, void *private_spt)
1629 {
1630 	int tdx_level = pg_level_to_tdx_sept_level(level);
1631 	gpa_t gpa = gfn_to_gpa(gfn);
1632 	struct page *page = virt_to_page(private_spt);
1633 	u64 err, entry, level_state;
1634 
1635 	err = tdh_mem_sept_add(&to_kvm_tdx(kvm)->td, gpa, tdx_level, page, &entry,
1636 			       &level_state);
1637 	if (unlikely(tdx_operand_busy(err)))
1638 		return -EBUSY;
1639 
1640 	if (KVM_BUG_ON(err, kvm)) {
1641 		pr_tdx_error_2(TDH_MEM_SEPT_ADD, err, entry, level_state);
1642 		return -EIO;
1643 	}
1644 
1645 	return 0;
1646 }
1647 
1648 /*
1649  * Check if the error returned from a SEPT zap SEAMCALL is due to that a page is
1650  * mapped by KVM_TDX_INIT_MEM_REGION without tdh_mem_page_add() being called
1651  * successfully.
1652  *
1653  * Since tdh_mem_sept_add() must have been invoked successfully before a
1654  * non-leaf entry present in the mirrored page table, the SEPT ZAP related
1655  * SEAMCALLs should not encounter err TDX_EPT_WALK_FAILED. They should instead
1656  * find TDX_EPT_ENTRY_STATE_INCORRECT due to an empty leaf entry found in the
1657  * SEPT.
1658  *
1659  * Further check if the returned entry from SEPT walking is with RWX permissions
1660  * to filter out anything unexpected.
1661  *
1662  * Note: @level is pg_level, not the tdx_level. The tdx_level extracted from
1663  * level_state returned from a SEAMCALL error is the same as that passed into
1664  * the SEAMCALL.
1665  */
1666 static int tdx_is_sept_zap_err_due_to_premap(struct kvm_tdx *kvm_tdx, u64 err,
1667 					     u64 entry, int level)
1668 {
1669 	if (!err || kvm_tdx->state == TD_STATE_RUNNABLE)
1670 		return false;
1671 
1672 	if (err != (TDX_EPT_ENTRY_STATE_INCORRECT | TDX_OPERAND_ID_RCX))
1673 		return false;
1674 
1675 	if ((is_last_spte(entry, level) && (entry & VMX_EPT_RWX_MASK)))
1676 		return false;
1677 
1678 	return true;
1679 }
1680 
1681 static int tdx_sept_zap_private_spte(struct kvm *kvm, gfn_t gfn,
1682 				     enum pg_level level, struct page *page)
1683 {
1684 	int tdx_level = pg_level_to_tdx_sept_level(level);
1685 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
1686 	gpa_t gpa = gfn_to_gpa(gfn) & KVM_HPAGE_MASK(level);
1687 	u64 err, entry, level_state;
1688 
1689 	/* For now large page isn't supported yet. */
1690 	WARN_ON_ONCE(level != PG_LEVEL_4K);
1691 
1692 	err = tdh_mem_range_block(&kvm_tdx->td, gpa, tdx_level, &entry, &level_state);
1693 
1694 	if (unlikely(tdx_operand_busy(err))) {
1695 		/* After no vCPUs enter, the second retry is expected to succeed */
1696 		tdx_no_vcpus_enter_start(kvm);
1697 		err = tdh_mem_range_block(&kvm_tdx->td, gpa, tdx_level, &entry, &level_state);
1698 		tdx_no_vcpus_enter_stop(kvm);
1699 	}
1700 	if (tdx_is_sept_zap_err_due_to_premap(kvm_tdx, err, entry, level) &&
1701 	    !KVM_BUG_ON(!atomic64_read(&kvm_tdx->nr_premapped), kvm)) {
1702 		atomic64_dec(&kvm_tdx->nr_premapped);
1703 		tdx_unpin(kvm, page);
1704 		return 0;
1705 	}
1706 
1707 	if (KVM_BUG_ON(err, kvm)) {
1708 		pr_tdx_error_2(TDH_MEM_RANGE_BLOCK, err, entry, level_state);
1709 		return -EIO;
1710 	}
1711 	return 1;
1712 }
1713 
1714 /*
1715  * Ensure shared and private EPTs to be flushed on all vCPUs.
1716  * tdh_mem_track() is the only caller that increases TD epoch. An increase in
1717  * the TD epoch (e.g., to value "N + 1") is successful only if no vCPUs are
1718  * running in guest mode with the value "N - 1".
1719  *
1720  * A successful execution of tdh_mem_track() ensures that vCPUs can only run in
1721  * guest mode with TD epoch value "N" if no TD exit occurs after the TD epoch
1722  * being increased to "N + 1".
1723  *
1724  * Kicking off all vCPUs after that further results in no vCPUs can run in guest
1725  * mode with TD epoch value "N", which unblocks the next tdh_mem_track() (e.g.
1726  * to increase TD epoch to "N + 2").
1727  *
1728  * TDX module will flush EPT on the next TD enter and make vCPUs to run in
1729  * guest mode with TD epoch value "N + 1".
1730  *
1731  * kvm_make_all_cpus_request() guarantees all vCPUs are out of guest mode by
1732  * waiting empty IPI handler ack_kick().
1733  *
1734  * No action is required to the vCPUs being kicked off since the kicking off
1735  * occurs certainly after TD epoch increment and before the next
1736  * tdh_mem_track().
1737  */
1738 static void tdx_track(struct kvm *kvm)
1739 {
1740 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
1741 	u64 err;
1742 
1743 	/* If TD isn't finalized, it's before any vcpu running. */
1744 	if (unlikely(kvm_tdx->state != TD_STATE_RUNNABLE))
1745 		return;
1746 
1747 	lockdep_assert_held_write(&kvm->mmu_lock);
1748 
1749 	err = tdh_mem_track(&kvm_tdx->td);
1750 	if (unlikely(tdx_operand_busy(err))) {
1751 		/* After no vCPUs enter, the second retry is expected to succeed */
1752 		tdx_no_vcpus_enter_start(kvm);
1753 		err = tdh_mem_track(&kvm_tdx->td);
1754 		tdx_no_vcpus_enter_stop(kvm);
1755 	}
1756 
1757 	if (KVM_BUG_ON(err, kvm))
1758 		pr_tdx_error(TDH_MEM_TRACK, err);
1759 
1760 	kvm_make_all_cpus_request(kvm, KVM_REQ_OUTSIDE_GUEST_MODE);
1761 }
1762 
1763 int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
1764 			      enum pg_level level, void *private_spt)
1765 {
1766 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
1767 
1768 	/*
1769 	 * free_external_spt() is only called after hkid is freed when TD is
1770 	 * tearing down.
1771 	 * KVM doesn't (yet) zap page table pages in mirror page table while
1772 	 * TD is active, though guest pages mapped in mirror page table could be
1773 	 * zapped during TD is active, e.g. for shared <-> private conversion
1774 	 * and slot move/deletion.
1775 	 */
1776 	if (KVM_BUG_ON(is_hkid_assigned(kvm_tdx), kvm))
1777 		return -EINVAL;
1778 
1779 	/*
1780 	 * The HKID assigned to this TD was already freed and cache was
1781 	 * already flushed. We don't have to flush again.
1782 	 */
1783 	return tdx_reclaim_page(virt_to_page(private_spt));
1784 }
1785 
1786 int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
1787 				 enum pg_level level, kvm_pfn_t pfn)
1788 {
1789 	struct page *page = pfn_to_page(pfn);
1790 	int ret;
1791 
1792 	/*
1793 	 * HKID is released after all private pages have been removed, and set
1794 	 * before any might be populated. Warn if zapping is attempted when
1795 	 * there can't be anything populated in the private EPT.
1796 	 */
1797 	if (KVM_BUG_ON(!is_hkid_assigned(to_kvm_tdx(kvm)), kvm))
1798 		return -EINVAL;
1799 
1800 	ret = tdx_sept_zap_private_spte(kvm, gfn, level, page);
1801 	if (ret <= 0)
1802 		return ret;
1803 
1804 	/*
1805 	 * TDX requires TLB tracking before dropping private page.  Do
1806 	 * it here, although it is also done later.
1807 	 */
1808 	tdx_track(kvm);
1809 
1810 	return tdx_sept_drop_private_spte(kvm, gfn, level, page);
1811 }
1812 
1813 void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
1814 			   int trig_mode, int vector)
1815 {
1816 	struct kvm_vcpu *vcpu = apic->vcpu;
1817 	struct vcpu_tdx *tdx = to_tdx(vcpu);
1818 
1819 	/* TDX supports only posted interrupt.  No lapic emulation. */
1820 	__vmx_deliver_posted_interrupt(vcpu, &tdx->vt.pi_desc, vector);
1821 
1822 	trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
1823 }
1824 
1825 static inline bool tdx_is_sept_violation_unexpected_pending(struct kvm_vcpu *vcpu)
1826 {
1827 	u64 eeq_type = to_tdx(vcpu)->ext_exit_qualification & TDX_EXT_EXIT_QUAL_TYPE_MASK;
1828 	u64 eq = vmx_get_exit_qual(vcpu);
1829 
1830 	if (eeq_type != TDX_EXT_EXIT_QUAL_TYPE_PENDING_EPT_VIOLATION)
1831 		return false;
1832 
1833 	return !(eq & EPT_VIOLATION_PROT_MASK) && !(eq & EPT_VIOLATION_EXEC_FOR_RING3_LIN);
1834 }
1835 
1836 static int tdx_handle_ept_violation(struct kvm_vcpu *vcpu)
1837 {
1838 	unsigned long exit_qual;
1839 	gpa_t gpa = to_tdx(vcpu)->exit_gpa;
1840 	bool local_retry = false;
1841 	int ret;
1842 
1843 	if (vt_is_tdx_private_gpa(vcpu->kvm, gpa)) {
1844 		if (tdx_is_sept_violation_unexpected_pending(vcpu)) {
1845 			pr_warn("Guest access before accepting 0x%llx on vCPU %d\n",
1846 				gpa, vcpu->vcpu_id);
1847 			kvm_vm_dead(vcpu->kvm);
1848 			return -EIO;
1849 		}
1850 		/*
1851 		 * Always treat SEPT violations as write faults.  Ignore the
1852 		 * EXIT_QUALIFICATION reported by TDX-SEAM for SEPT violations.
1853 		 * TD private pages are always RWX in the SEPT tables,
1854 		 * i.e. they're always mapped writable.  Just as importantly,
1855 		 * treating SEPT violations as write faults is necessary to
1856 		 * avoid COW allocations, which will cause TDAUGPAGE failures
1857 		 * due to aliasing a single HPA to multiple GPAs.
1858 		 */
1859 		exit_qual = EPT_VIOLATION_ACC_WRITE;
1860 
1861 		/* Only private GPA triggers zero-step mitigation */
1862 		local_retry = true;
1863 	} else {
1864 		exit_qual = vmx_get_exit_qual(vcpu);
1865 		/*
1866 		 * EPT violation due to instruction fetch should never be
1867 		 * triggered from shared memory in TDX guest.  If such EPT
1868 		 * violation occurs, treat it as broken hardware.
1869 		 */
1870 		if (KVM_BUG_ON(exit_qual & EPT_VIOLATION_ACC_INSTR, vcpu->kvm))
1871 			return -EIO;
1872 	}
1873 
1874 	trace_kvm_page_fault(vcpu, gpa, exit_qual);
1875 
1876 	/*
1877 	 * To minimize TDH.VP.ENTER invocations, retry locally for private GPA
1878 	 * mapping in TDX.
1879 	 *
1880 	 * KVM may return RET_PF_RETRY for private GPA due to
1881 	 * - contentions when atomically updating SPTEs of the mirror page table
1882 	 * - in-progress GFN invalidation or memslot removal.
1883 	 * - TDX_OPERAND_BUSY error from TDH.MEM.PAGE.AUG or TDH.MEM.SEPT.ADD,
1884 	 *   caused by contentions with TDH.VP.ENTER (with zero-step mitigation)
1885 	 *   or certain TDCALLs.
1886 	 *
1887 	 * If TDH.VP.ENTER is invoked more times than the threshold set by the
1888 	 * TDX module before KVM resolves the private GPA mapping, the TDX
1889 	 * module will activate zero-step mitigation during TDH.VP.ENTER. This
1890 	 * process acquires an SEPT tree lock in the TDX module, leading to
1891 	 * further contentions with TDH.MEM.PAGE.AUG or TDH.MEM.SEPT.ADD
1892 	 * operations on other vCPUs.
1893 	 *
1894 	 * Breaking out of local retries for kvm_vcpu_has_events() is for
1895 	 * interrupt injection. kvm_vcpu_has_events() should not see pending
1896 	 * events for TDX. Since KVM can't determine if IRQs (or NMIs) are
1897 	 * blocked by TDs, false positives are inevitable i.e., KVM may re-enter
1898 	 * the guest even if the IRQ/NMI can't be delivered.
1899 	 *
1900 	 * Note: even without breaking out of local retries, zero-step
1901 	 * mitigation may still occur due to
1902 	 * - invoking of TDH.VP.ENTER after KVM_EXIT_MEMORY_FAULT,
1903 	 * - a single RIP causing EPT violations for more GFNs than the
1904 	 *   threshold count.
1905 	 * This is safe, as triggering zero-step mitigation only introduces
1906 	 * contentions to page installation SEAMCALLs on other vCPUs, which will
1907 	 * handle retries locally in their EPT violation handlers.
1908 	 */
1909 	while (1) {
1910 		ret = __vmx_handle_ept_violation(vcpu, gpa, exit_qual);
1911 
1912 		if (ret != RET_PF_RETRY || !local_retry)
1913 			break;
1914 
1915 		if (kvm_vcpu_has_events(vcpu) || signal_pending(current))
1916 			break;
1917 
1918 		if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) {
1919 			ret = -EIO;
1920 			break;
1921 		}
1922 
1923 		cond_resched();
1924 	}
1925 	return ret;
1926 }
1927 
1928 int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
1929 {
1930 	if (err) {
1931 		tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND);
1932 		return 1;
1933 	}
1934 
1935 	if (vmx_get_exit_reason(vcpu).basic == EXIT_REASON_MSR_READ)
1936 		tdvmcall_set_return_val(vcpu, kvm_read_edx_eax(vcpu));
1937 
1938 	return 1;
1939 }
1940 
1941 
1942 int tdx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t fastpath)
1943 {
1944 	struct vcpu_tdx *tdx = to_tdx(vcpu);
1945 	u64 vp_enter_ret = tdx->vp_enter_ret;
1946 	union vmx_exit_reason exit_reason = vmx_get_exit_reason(vcpu);
1947 
1948 	if (fastpath != EXIT_FASTPATH_NONE)
1949 		return 1;
1950 
1951 	if (unlikely(vp_enter_ret == EXIT_REASON_EPT_MISCONFIG)) {
1952 		KVM_BUG_ON(1, vcpu->kvm);
1953 		return -EIO;
1954 	}
1955 
1956 	/*
1957 	 * Handle TDX SW errors, including TDX_SEAMCALL_UD, TDX_SEAMCALL_GP and
1958 	 * TDX_SEAMCALL_VMFAILINVALID.
1959 	 */
1960 	if (unlikely((vp_enter_ret & TDX_SW_ERROR) == TDX_SW_ERROR)) {
1961 		KVM_BUG_ON(!kvm_rebooting, vcpu->kvm);
1962 		goto unhandled_exit;
1963 	}
1964 
1965 	if (unlikely(tdx_failed_vmentry(vcpu))) {
1966 		/*
1967 		 * If the guest state is protected, that means off-TD debug is
1968 		 * not enabled, TDX_NON_RECOVERABLE must be set.
1969 		 */
1970 		WARN_ON_ONCE(vcpu->arch.guest_state_protected &&
1971 				!(vp_enter_ret & TDX_NON_RECOVERABLE));
1972 		vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1973 		vcpu->run->fail_entry.hardware_entry_failure_reason = exit_reason.full;
1974 		vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
1975 		return 0;
1976 	}
1977 
1978 	if (unlikely(vp_enter_ret & (TDX_ERROR | TDX_NON_RECOVERABLE)) &&
1979 		exit_reason.basic != EXIT_REASON_TRIPLE_FAULT) {
1980 		kvm_pr_unimpl("TD vp_enter_ret 0x%llx\n", vp_enter_ret);
1981 		goto unhandled_exit;
1982 	}
1983 
1984 	WARN_ON_ONCE(exit_reason.basic != EXIT_REASON_TRIPLE_FAULT &&
1985 		     (vp_enter_ret & TDX_SEAMCALL_STATUS_MASK) != TDX_SUCCESS);
1986 
1987 	switch (exit_reason.basic) {
1988 	case EXIT_REASON_TRIPLE_FAULT:
1989 		vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
1990 		vcpu->mmio_needed = 0;
1991 		return 0;
1992 	case EXIT_REASON_EXCEPTION_NMI:
1993 		return tdx_handle_exception_nmi(vcpu);
1994 	case EXIT_REASON_EXTERNAL_INTERRUPT:
1995 		++vcpu->stat.irq_exits;
1996 		return 1;
1997 	case EXIT_REASON_CPUID:
1998 		return tdx_emulate_cpuid(vcpu);
1999 	case EXIT_REASON_HLT:
2000 		return kvm_emulate_halt_noskip(vcpu);
2001 	case EXIT_REASON_TDCALL:
2002 		return handle_tdvmcall(vcpu);
2003 	case EXIT_REASON_VMCALL:
2004 		return tdx_emulate_vmcall(vcpu);
2005 	case EXIT_REASON_IO_INSTRUCTION:
2006 		return tdx_emulate_io(vcpu);
2007 	case EXIT_REASON_MSR_READ:
2008 		kvm_rcx_write(vcpu, tdx->vp_enter_args.r12);
2009 		return kvm_emulate_rdmsr(vcpu);
2010 	case EXIT_REASON_MSR_WRITE:
2011 		kvm_rcx_write(vcpu, tdx->vp_enter_args.r12);
2012 		kvm_rax_write(vcpu, tdx->vp_enter_args.r13 & -1u);
2013 		kvm_rdx_write(vcpu, tdx->vp_enter_args.r13 >> 32);
2014 		return kvm_emulate_wrmsr(vcpu);
2015 	case EXIT_REASON_EPT_MISCONFIG:
2016 		return tdx_emulate_mmio(vcpu);
2017 	case EXIT_REASON_EPT_VIOLATION:
2018 		return tdx_handle_ept_violation(vcpu);
2019 	case EXIT_REASON_OTHER_SMI:
2020 		/*
2021 		 * Unlike VMX, SMI in SEAM non-root mode (i.e. when
2022 		 * TD guest vCPU is running) will cause VM exit to TDX module,
2023 		 * then SEAMRET to KVM.  Once it exits to KVM, SMI is delivered
2024 		 * and handled by kernel handler right away.
2025 		 *
2026 		 * The Other SMI exit can also be caused by the SEAM non-root
2027 		 * machine check delivered via Machine Check System Management
2028 		 * Interrupt (MSMI), but it has already been handled by the
2029 		 * kernel machine check handler, i.e., the memory page has been
2030 		 * marked as poisoned and it won't be freed to the free list
2031 		 * when the TDX guest is terminated (the TDX module marks the
2032 		 * guest as dead and prevent it from further running when
2033 		 * machine check happens in SEAM non-root).
2034 		 *
2035 		 * - A MSMI will not reach here, it's handled as non_recoverable
2036 		 *   case above.
2037 		 * - If it's not an MSMI, no need to do anything here.
2038 		 */
2039 		return 1;
2040 	default:
2041 		break;
2042 	}
2043 
2044 unhandled_exit:
2045 	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2046 	vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
2047 	vcpu->run->internal.ndata = 2;
2048 	vcpu->run->internal.data[0] = vp_enter_ret;
2049 	vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
2050 	return 0;
2051 }
2052 
2053 void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
2054 		u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code)
2055 {
2056 	struct vcpu_tdx *tdx = to_tdx(vcpu);
2057 
2058 	*reason = tdx->vt.exit_reason.full;
2059 	if (*reason != -1u) {
2060 		*info1 = vmx_get_exit_qual(vcpu);
2061 		*info2 = tdx->ext_exit_qualification;
2062 		*intr_info = vmx_get_intr_info(vcpu);
2063 	} else {
2064 		*info1 = 0;
2065 		*info2 = 0;
2066 		*intr_info = 0;
2067 	}
2068 
2069 	*error_code = 0;
2070 }
2071 
2072 bool tdx_has_emulated_msr(u32 index)
2073 {
2074 	switch (index) {
2075 	case MSR_IA32_UCODE_REV:
2076 	case MSR_IA32_ARCH_CAPABILITIES:
2077 	case MSR_IA32_POWER_CTL:
2078 	case MSR_IA32_CR_PAT:
2079 	case MSR_MTRRcap:
2080 	case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
2081 	case MSR_MTRRdefType:
2082 	case MSR_IA32_TSC_DEADLINE:
2083 	case MSR_IA32_MISC_ENABLE:
2084 	case MSR_PLATFORM_INFO:
2085 	case MSR_MISC_FEATURES_ENABLES:
2086 	case MSR_IA32_APICBASE:
2087 	case MSR_EFER:
2088 	case MSR_IA32_FEAT_CTL:
2089 	case MSR_IA32_MCG_CAP:
2090 	case MSR_IA32_MCG_STATUS:
2091 	case MSR_IA32_MCG_CTL:
2092 	case MSR_IA32_MCG_EXT_CTL:
2093 	case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2094 	case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
2095 		/* MSR_IA32_MCx_{CTL, STATUS, ADDR, MISC, CTL2} */
2096 	case MSR_KVM_POLL_CONTROL:
2097 		return true;
2098 	case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
2099 		/*
2100 		 * x2APIC registers that are virtualized by the CPU can't be
2101 		 * emulated, KVM doesn't have access to the virtual APIC page.
2102 		 */
2103 		switch (index) {
2104 		case X2APIC_MSR(APIC_TASKPRI):
2105 		case X2APIC_MSR(APIC_PROCPRI):
2106 		case X2APIC_MSR(APIC_EOI):
2107 		case X2APIC_MSR(APIC_ISR) ... X2APIC_MSR(APIC_ISR + APIC_ISR_NR):
2108 		case X2APIC_MSR(APIC_TMR) ... X2APIC_MSR(APIC_TMR + APIC_ISR_NR):
2109 		case X2APIC_MSR(APIC_IRR) ... X2APIC_MSR(APIC_IRR + APIC_ISR_NR):
2110 			return false;
2111 		default:
2112 			return true;
2113 		}
2114 	default:
2115 		return false;
2116 	}
2117 }
2118 
2119 static bool tdx_is_read_only_msr(u32 index)
2120 {
2121 	return  index == MSR_IA32_APICBASE || index == MSR_EFER ||
2122 		index == MSR_IA32_FEAT_CTL;
2123 }
2124 
2125 int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2126 {
2127 	switch (msr->index) {
2128 	case MSR_IA32_FEAT_CTL:
2129 		/*
2130 		 * MCE and MCA are advertised via cpuid. Guest kernel could
2131 		 * check if LMCE is enabled or not.
2132 		 */
2133 		msr->data = FEAT_CTL_LOCKED;
2134 		if (vcpu->arch.mcg_cap & MCG_LMCE_P)
2135 			msr->data |= FEAT_CTL_LMCE_ENABLED;
2136 		return 0;
2137 	case MSR_IA32_MCG_EXT_CTL:
2138 		if (!msr->host_initiated && !(vcpu->arch.mcg_cap & MCG_LMCE_P))
2139 			return 1;
2140 		msr->data = vcpu->arch.mcg_ext_ctl;
2141 		return 0;
2142 	default:
2143 		if (!tdx_has_emulated_msr(msr->index))
2144 			return 1;
2145 
2146 		return kvm_get_msr_common(vcpu, msr);
2147 	}
2148 }
2149 
2150 int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2151 {
2152 	switch (msr->index) {
2153 	case MSR_IA32_MCG_EXT_CTL:
2154 		if ((!msr->host_initiated && !(vcpu->arch.mcg_cap & MCG_LMCE_P)) ||
2155 		    (msr->data & ~MCG_EXT_CTL_LMCE_EN))
2156 			return 1;
2157 		vcpu->arch.mcg_ext_ctl = msr->data;
2158 		return 0;
2159 	default:
2160 		if (tdx_is_read_only_msr(msr->index))
2161 			return 1;
2162 
2163 		if (!tdx_has_emulated_msr(msr->index))
2164 			return 1;
2165 
2166 		return kvm_set_msr_common(vcpu, msr);
2167 	}
2168 }
2169 
2170 static int tdx_get_capabilities(struct kvm_tdx_cmd *cmd)
2171 {
2172 	const struct tdx_sys_info_td_conf *td_conf = &tdx_sysinfo->td_conf;
2173 	struct kvm_tdx_capabilities __user *user_caps;
2174 	struct kvm_tdx_capabilities *caps = NULL;
2175 	int ret = 0;
2176 
2177 	/* flags is reserved for future use */
2178 	if (cmd->flags)
2179 		return -EINVAL;
2180 
2181 	caps = kmalloc(sizeof(*caps) +
2182 		       sizeof(struct kvm_cpuid_entry2) * td_conf->num_cpuid_config,
2183 		       GFP_KERNEL);
2184 	if (!caps)
2185 		return -ENOMEM;
2186 
2187 	user_caps = u64_to_user_ptr(cmd->data);
2188 	if (copy_from_user(caps, user_caps, sizeof(*caps))) {
2189 		ret = -EFAULT;
2190 		goto out;
2191 	}
2192 
2193 	if (caps->cpuid.nent < td_conf->num_cpuid_config) {
2194 		ret = -E2BIG;
2195 		goto out;
2196 	}
2197 
2198 	ret = init_kvm_tdx_caps(td_conf, caps);
2199 	if (ret)
2200 		goto out;
2201 
2202 	if (copy_to_user(user_caps, caps, sizeof(*caps))) {
2203 		ret = -EFAULT;
2204 		goto out;
2205 	}
2206 
2207 	if (copy_to_user(user_caps->cpuid.entries, caps->cpuid.entries,
2208 			 caps->cpuid.nent *
2209 			 sizeof(caps->cpuid.entries[0])))
2210 		ret = -EFAULT;
2211 
2212 out:
2213 	/* kfree() accepts NULL. */
2214 	kfree(caps);
2215 	return ret;
2216 }
2217 
2218 /*
2219  * KVM reports guest physical address in CPUID.0x800000008.EAX[23:16], which is
2220  * similar to TDX's GPAW. Use this field as the interface for userspace to
2221  * configure the GPAW and EPT level for TDs.
2222  *
2223  * Only values 48 and 52 are supported. Value 52 means GPAW-52 and EPT level
2224  * 5, Value 48 means GPAW-48 and EPT level 4. For value 48, GPAW-48 is always
2225  * supported. Value 52 is only supported when the platform supports 5 level
2226  * EPT.
2227  */
2228 static int setup_tdparams_eptp_controls(struct kvm_cpuid2 *cpuid,
2229 					struct td_params *td_params)
2230 {
2231 	const struct kvm_cpuid_entry2 *entry;
2232 	int guest_pa;
2233 
2234 	entry = kvm_find_cpuid_entry2(cpuid->entries, cpuid->nent, 0x80000008, 0);
2235 	if (!entry)
2236 		return -EINVAL;
2237 
2238 	guest_pa = tdx_get_guest_phys_addr_bits(entry->eax);
2239 
2240 	if (guest_pa != 48 && guest_pa != 52)
2241 		return -EINVAL;
2242 
2243 	if (guest_pa == 52 && !cpu_has_vmx_ept_5levels())
2244 		return -EINVAL;
2245 
2246 	td_params->eptp_controls = VMX_EPTP_MT_WB;
2247 	if (guest_pa == 52) {
2248 		td_params->eptp_controls |= VMX_EPTP_PWL_5;
2249 		td_params->config_flags |= TDX_CONFIG_FLAGS_MAX_GPAW;
2250 	} else {
2251 		td_params->eptp_controls |= VMX_EPTP_PWL_4;
2252 	}
2253 
2254 	return 0;
2255 }
2256 
2257 static int setup_tdparams_cpuids(struct kvm_cpuid2 *cpuid,
2258 				 struct td_params *td_params)
2259 {
2260 	const struct tdx_sys_info_td_conf *td_conf = &tdx_sysinfo->td_conf;
2261 	const struct kvm_cpuid_entry2 *entry;
2262 	struct tdx_cpuid_value *value;
2263 	int i, copy_cnt = 0;
2264 
2265 	/*
2266 	 * td_params.cpuid_values: The number and the order of cpuid_value must
2267 	 * be same to the one of struct tdsysinfo.{num_cpuid_config, cpuid_configs}
2268 	 * It's assumed that td_params was zeroed.
2269 	 */
2270 	for (i = 0; i < td_conf->num_cpuid_config; i++) {
2271 		struct kvm_cpuid_entry2 tmp;
2272 
2273 		td_init_cpuid_entry2(&tmp, i);
2274 
2275 		entry = kvm_find_cpuid_entry2(cpuid->entries, cpuid->nent,
2276 					      tmp.function, tmp.index);
2277 		if (!entry)
2278 			continue;
2279 
2280 		if (tdx_unsupported_cpuid(entry))
2281 			return -EINVAL;
2282 
2283 		copy_cnt++;
2284 
2285 		value = &td_params->cpuid_values[i];
2286 		value->eax = entry->eax;
2287 		value->ebx = entry->ebx;
2288 		value->ecx = entry->ecx;
2289 		value->edx = entry->edx;
2290 
2291 		/*
2292 		 * TDX module does not accept nonzero bits 16..23 for the
2293 		 * CPUID[0x80000008].EAX, see setup_tdparams_eptp_controls().
2294 		 */
2295 		if (tmp.function == 0x80000008)
2296 			value->eax = tdx_set_guest_phys_addr_bits(value->eax, 0);
2297 	}
2298 
2299 	/*
2300 	 * Rely on the TDX module to reject invalid configuration, but it can't
2301 	 * check of leafs that don't have a proper slot in td_params->cpuid_values
2302 	 * to stick then. So fail if there were entries that didn't get copied to
2303 	 * td_params.
2304 	 */
2305 	if (copy_cnt != cpuid->nent)
2306 		return -EINVAL;
2307 
2308 	return 0;
2309 }
2310 
2311 static int setup_tdparams(struct kvm *kvm, struct td_params *td_params,
2312 			struct kvm_tdx_init_vm *init_vm)
2313 {
2314 	const struct tdx_sys_info_td_conf *td_conf = &tdx_sysinfo->td_conf;
2315 	struct kvm_cpuid2 *cpuid = &init_vm->cpuid;
2316 	int ret;
2317 
2318 	if (kvm->created_vcpus)
2319 		return -EBUSY;
2320 
2321 	if (init_vm->attributes & ~tdx_get_supported_attrs(td_conf))
2322 		return -EINVAL;
2323 
2324 	if (init_vm->xfam & ~tdx_get_supported_xfam(td_conf))
2325 		return -EINVAL;
2326 
2327 	td_params->max_vcpus = kvm->max_vcpus;
2328 	td_params->attributes = init_vm->attributes | td_conf->attributes_fixed1;
2329 	td_params->xfam = init_vm->xfam | td_conf->xfam_fixed1;
2330 
2331 	td_params->config_flags = TDX_CONFIG_FLAGS_NO_RBP_MOD;
2332 	td_params->tsc_frequency = TDX_TSC_KHZ_TO_25MHZ(kvm->arch.default_tsc_khz);
2333 
2334 	ret = setup_tdparams_eptp_controls(cpuid, td_params);
2335 	if (ret)
2336 		return ret;
2337 
2338 	ret = setup_tdparams_cpuids(cpuid, td_params);
2339 	if (ret)
2340 		return ret;
2341 
2342 #define MEMCPY_SAME_SIZE(dst, src)				\
2343 	do {							\
2344 		BUILD_BUG_ON(sizeof(dst) != sizeof(src));	\
2345 		memcpy((dst), (src), sizeof(dst));		\
2346 	} while (0)
2347 
2348 	MEMCPY_SAME_SIZE(td_params->mrconfigid, init_vm->mrconfigid);
2349 	MEMCPY_SAME_SIZE(td_params->mrowner, init_vm->mrowner);
2350 	MEMCPY_SAME_SIZE(td_params->mrownerconfig, init_vm->mrownerconfig);
2351 
2352 	return 0;
2353 }
2354 
2355 static int __tdx_td_init(struct kvm *kvm, struct td_params *td_params,
2356 			 u64 *seamcall_err)
2357 {
2358 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
2359 	cpumask_var_t packages;
2360 	struct page **tdcs_pages = NULL;
2361 	struct page *tdr_page;
2362 	int ret, i;
2363 	u64 err, rcx;
2364 
2365 	*seamcall_err = 0;
2366 	ret = tdx_guest_keyid_alloc();
2367 	if (ret < 0)
2368 		return ret;
2369 	kvm_tdx->hkid = ret;
2370 	kvm_tdx->misc_cg = get_current_misc_cg();
2371 	ret = misc_cg_try_charge(MISC_CG_RES_TDX, kvm_tdx->misc_cg, 1);
2372 	if (ret)
2373 		goto free_hkid;
2374 
2375 	ret = -ENOMEM;
2376 
2377 	atomic_inc(&nr_configured_hkid);
2378 
2379 	tdr_page = alloc_page(GFP_KERNEL);
2380 	if (!tdr_page)
2381 		goto free_hkid;
2382 
2383 	kvm_tdx->td.tdcs_nr_pages = tdx_sysinfo->td_ctrl.tdcs_base_size / PAGE_SIZE;
2384 	/* TDVPS = TDVPR(4K page) + TDCX(multiple 4K pages), -1 for TDVPR. */
2385 	kvm_tdx->td.tdcx_nr_pages = tdx_sysinfo->td_ctrl.tdvps_base_size / PAGE_SIZE - 1;
2386 	tdcs_pages = kcalloc(kvm_tdx->td.tdcs_nr_pages, sizeof(*kvm_tdx->td.tdcs_pages),
2387 			     GFP_KERNEL | __GFP_ZERO);
2388 	if (!tdcs_pages)
2389 		goto free_tdr;
2390 
2391 	for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
2392 		tdcs_pages[i] = alloc_page(GFP_KERNEL);
2393 		if (!tdcs_pages[i])
2394 			goto free_tdcs;
2395 	}
2396 
2397 	if (!zalloc_cpumask_var(&packages, GFP_KERNEL))
2398 		goto free_tdcs;
2399 
2400 	cpus_read_lock();
2401 
2402 	/*
2403 	 * Need at least one CPU of the package to be online in order to
2404 	 * program all packages for host key id.  Check it.
2405 	 */
2406 	for_each_present_cpu(i)
2407 		cpumask_set_cpu(topology_physical_package_id(i), packages);
2408 	for_each_online_cpu(i)
2409 		cpumask_clear_cpu(topology_physical_package_id(i), packages);
2410 	if (!cpumask_empty(packages)) {
2411 		ret = -EIO;
2412 		/*
2413 		 * Because it's hard for human operator to figure out the
2414 		 * reason, warn it.
2415 		 */
2416 #define MSG_ALLPKG	"All packages need to have online CPU to create TD. Online CPU and retry.\n"
2417 		pr_warn_ratelimited(MSG_ALLPKG);
2418 		goto free_packages;
2419 	}
2420 
2421 	/*
2422 	 * TDH.MNG.CREATE tries to grab the global TDX module and fails
2423 	 * with TDX_OPERAND_BUSY when it fails to grab.  Take the global
2424 	 * lock to prevent it from failure.
2425 	 */
2426 	mutex_lock(&tdx_lock);
2427 	kvm_tdx->td.tdr_page = tdr_page;
2428 	err = tdh_mng_create(&kvm_tdx->td, kvm_tdx->hkid);
2429 	mutex_unlock(&tdx_lock);
2430 
2431 	if (err == TDX_RND_NO_ENTROPY) {
2432 		ret = -EAGAIN;
2433 		goto free_packages;
2434 	}
2435 
2436 	if (WARN_ON_ONCE(err)) {
2437 		pr_tdx_error(TDH_MNG_CREATE, err);
2438 		ret = -EIO;
2439 		goto free_packages;
2440 	}
2441 
2442 	for_each_online_cpu(i) {
2443 		int pkg = topology_physical_package_id(i);
2444 
2445 		if (cpumask_test_and_set_cpu(pkg, packages))
2446 			continue;
2447 
2448 		/*
2449 		 * Program the memory controller in the package with an
2450 		 * encryption key associated to a TDX private host key id
2451 		 * assigned to this TDR.  Concurrent operations on same memory
2452 		 * controller results in TDX_OPERAND_BUSY. No locking needed
2453 		 * beyond the cpus_read_lock() above as it serializes against
2454 		 * hotplug and the first online CPU of the package is always
2455 		 * used. We never have two CPUs in the same socket trying to
2456 		 * program the key.
2457 		 */
2458 		ret = smp_call_on_cpu(i, tdx_do_tdh_mng_key_config,
2459 				      kvm_tdx, true);
2460 		if (ret)
2461 			break;
2462 	}
2463 	cpus_read_unlock();
2464 	free_cpumask_var(packages);
2465 	if (ret) {
2466 		i = 0;
2467 		goto teardown;
2468 	}
2469 
2470 	kvm_tdx->td.tdcs_pages = tdcs_pages;
2471 	for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
2472 		err = tdh_mng_addcx(&kvm_tdx->td, tdcs_pages[i]);
2473 		if (err == TDX_RND_NO_ENTROPY) {
2474 			/* Here it's hard to allow userspace to retry. */
2475 			ret = -EAGAIN;
2476 			goto teardown;
2477 		}
2478 		if (WARN_ON_ONCE(err)) {
2479 			pr_tdx_error(TDH_MNG_ADDCX, err);
2480 			ret = -EIO;
2481 			goto teardown;
2482 		}
2483 	}
2484 
2485 	err = tdh_mng_init(&kvm_tdx->td, __pa(td_params), &rcx);
2486 	if ((err & TDX_SEAMCALL_STATUS_MASK) == TDX_OPERAND_INVALID) {
2487 		/*
2488 		 * Because a user gives operands, don't warn.
2489 		 * Return a hint to the user because it's sometimes hard for the
2490 		 * user to figure out which operand is invalid.  SEAMCALL status
2491 		 * code includes which operand caused invalid operand error.
2492 		 */
2493 		*seamcall_err = err;
2494 		ret = -EINVAL;
2495 		goto teardown;
2496 	} else if (WARN_ON_ONCE(err)) {
2497 		pr_tdx_error_1(TDH_MNG_INIT, err, rcx);
2498 		ret = -EIO;
2499 		goto teardown;
2500 	}
2501 
2502 	return 0;
2503 
2504 	/*
2505 	 * The sequence for freeing resources from a partially initialized TD
2506 	 * varies based on where in the initialization flow failure occurred.
2507 	 * Simply use the full teardown and destroy, which naturally play nice
2508 	 * with partial initialization.
2509 	 */
2510 teardown:
2511 	/* Only free pages not yet added, so start at 'i' */
2512 	for (; i < kvm_tdx->td.tdcs_nr_pages; i++) {
2513 		if (tdcs_pages[i]) {
2514 			__free_page(tdcs_pages[i]);
2515 			tdcs_pages[i] = NULL;
2516 		}
2517 	}
2518 	if (!kvm_tdx->td.tdcs_pages)
2519 		kfree(tdcs_pages);
2520 
2521 	tdx_mmu_release_hkid(kvm);
2522 	tdx_reclaim_td_control_pages(kvm);
2523 
2524 	return ret;
2525 
2526 free_packages:
2527 	cpus_read_unlock();
2528 	free_cpumask_var(packages);
2529 
2530 free_tdcs:
2531 	for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
2532 		if (tdcs_pages[i])
2533 			__free_page(tdcs_pages[i]);
2534 	}
2535 	kfree(tdcs_pages);
2536 	kvm_tdx->td.tdcs_pages = NULL;
2537 
2538 free_tdr:
2539 	if (tdr_page)
2540 		__free_page(tdr_page);
2541 	kvm_tdx->td.tdr_page = 0;
2542 
2543 free_hkid:
2544 	tdx_hkid_free(kvm_tdx);
2545 
2546 	return ret;
2547 }
2548 
2549 static u64 tdx_td_metadata_field_read(struct kvm_tdx *tdx, u64 field_id,
2550 				      u64 *data)
2551 {
2552 	u64 err;
2553 
2554 	err = tdh_mng_rd(&tdx->td, field_id, data);
2555 
2556 	return err;
2557 }
2558 
2559 #define TDX_MD_UNREADABLE_LEAF_MASK	GENMASK(30, 7)
2560 #define TDX_MD_UNREADABLE_SUBLEAF_MASK	GENMASK(31, 7)
2561 
2562 static int tdx_read_cpuid(struct kvm_vcpu *vcpu, u32 leaf, u32 sub_leaf,
2563 			  bool sub_leaf_set, int *entry_index,
2564 			  struct kvm_cpuid_entry2 *out)
2565 {
2566 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
2567 	u64 field_id = TD_MD_FIELD_ID_CPUID_VALUES;
2568 	u64 ebx_eax, edx_ecx;
2569 	u64 err = 0;
2570 
2571 	if (sub_leaf > 0b1111111)
2572 		return -EINVAL;
2573 
2574 	if (*entry_index >= KVM_MAX_CPUID_ENTRIES)
2575 		return -EINVAL;
2576 
2577 	if (leaf & TDX_MD_UNREADABLE_LEAF_MASK ||
2578 	    sub_leaf & TDX_MD_UNREADABLE_SUBLEAF_MASK)
2579 		return -EINVAL;
2580 
2581 	/*
2582 	 * bit 23:17, REVSERVED: reserved, must be 0;
2583 	 * bit 16,    LEAF_31: leaf number bit 31;
2584 	 * bit 15:9,  LEAF_6_0: leaf number bits 6:0, leaf bits 30:7 are
2585 	 *                      implicitly 0;
2586 	 * bit 8,     SUBLEAF_NA: sub-leaf not applicable flag;
2587 	 * bit 7:1,   SUBLEAF_6_0: sub-leaf number bits 6:0. If SUBLEAF_NA is 1,
2588 	 *                         the SUBLEAF_6_0 is all-1.
2589 	 *                         sub-leaf bits 31:7 are implicitly 0;
2590 	 * bit 0,     ELEMENT_I: Element index within field;
2591 	 */
2592 	field_id |= ((leaf & 0x80000000) ? 1 : 0) << 16;
2593 	field_id |= (leaf & 0x7f) << 9;
2594 	if (sub_leaf_set)
2595 		field_id |= (sub_leaf & 0x7f) << 1;
2596 	else
2597 		field_id |= 0x1fe;
2598 
2599 	err = tdx_td_metadata_field_read(kvm_tdx, field_id, &ebx_eax);
2600 	if (err) //TODO check for specific errors
2601 		goto err_out;
2602 
2603 	out->eax = (u32) ebx_eax;
2604 	out->ebx = (u32) (ebx_eax >> 32);
2605 
2606 	field_id++;
2607 	err = tdx_td_metadata_field_read(kvm_tdx, field_id, &edx_ecx);
2608 	/*
2609 	 * It's weird that reading edx_ecx fails while reading ebx_eax
2610 	 * succeeded.
2611 	 */
2612 	if (WARN_ON_ONCE(err))
2613 		goto err_out;
2614 
2615 	out->ecx = (u32) edx_ecx;
2616 	out->edx = (u32) (edx_ecx >> 32);
2617 
2618 	out->function = leaf;
2619 	out->index = sub_leaf;
2620 	out->flags |= sub_leaf_set ? KVM_CPUID_FLAG_SIGNIFCANT_INDEX : 0;
2621 
2622 	/*
2623 	 * Work around missing support on old TDX modules, fetch
2624 	 * guest maxpa from gfn_direct_bits.
2625 	 */
2626 	if (leaf == 0x80000008) {
2627 		gpa_t gpa_bits = gfn_to_gpa(kvm_gfn_direct_bits(vcpu->kvm));
2628 		unsigned int g_maxpa = __ffs(gpa_bits) + 1;
2629 
2630 		out->eax = tdx_set_guest_phys_addr_bits(out->eax, g_maxpa);
2631 	}
2632 
2633 	(*entry_index)++;
2634 
2635 	return 0;
2636 
2637 err_out:
2638 	out->eax = 0;
2639 	out->ebx = 0;
2640 	out->ecx = 0;
2641 	out->edx = 0;
2642 
2643 	return -EIO;
2644 }
2645 
2646 static int tdx_td_init(struct kvm *kvm, struct kvm_tdx_cmd *cmd)
2647 {
2648 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
2649 	struct kvm_tdx_init_vm *init_vm;
2650 	struct td_params *td_params = NULL;
2651 	int ret;
2652 
2653 	BUILD_BUG_ON(sizeof(*init_vm) != 256 + sizeof_field(struct kvm_tdx_init_vm, cpuid));
2654 	BUILD_BUG_ON(sizeof(struct td_params) != 1024);
2655 
2656 	if (kvm_tdx->state != TD_STATE_UNINITIALIZED)
2657 		return -EINVAL;
2658 
2659 	if (cmd->flags)
2660 		return -EINVAL;
2661 
2662 	init_vm = kmalloc(sizeof(*init_vm) +
2663 			  sizeof(init_vm->cpuid.entries[0]) * KVM_MAX_CPUID_ENTRIES,
2664 			  GFP_KERNEL);
2665 	if (!init_vm)
2666 		return -ENOMEM;
2667 
2668 	if (copy_from_user(init_vm, u64_to_user_ptr(cmd->data), sizeof(*init_vm))) {
2669 		ret = -EFAULT;
2670 		goto out;
2671 	}
2672 
2673 	if (init_vm->cpuid.nent > KVM_MAX_CPUID_ENTRIES) {
2674 		ret = -E2BIG;
2675 		goto out;
2676 	}
2677 
2678 	if (copy_from_user(init_vm->cpuid.entries,
2679 			   u64_to_user_ptr(cmd->data) + sizeof(*init_vm),
2680 			   flex_array_size(init_vm, cpuid.entries, init_vm->cpuid.nent))) {
2681 		ret = -EFAULT;
2682 		goto out;
2683 	}
2684 
2685 	if (memchr_inv(init_vm->reserved, 0, sizeof(init_vm->reserved))) {
2686 		ret = -EINVAL;
2687 		goto out;
2688 	}
2689 
2690 	if (init_vm->cpuid.padding) {
2691 		ret = -EINVAL;
2692 		goto out;
2693 	}
2694 
2695 	td_params = kzalloc(sizeof(struct td_params), GFP_KERNEL);
2696 	if (!td_params) {
2697 		ret = -ENOMEM;
2698 		goto out;
2699 	}
2700 
2701 	ret = setup_tdparams(kvm, td_params, init_vm);
2702 	if (ret)
2703 		goto out;
2704 
2705 	ret = __tdx_td_init(kvm, td_params, &cmd->hw_error);
2706 	if (ret)
2707 		goto out;
2708 
2709 	kvm_tdx->tsc_offset = td_tdcs_exec_read64(kvm_tdx, TD_TDCS_EXEC_TSC_OFFSET);
2710 	kvm_tdx->tsc_multiplier = td_tdcs_exec_read64(kvm_tdx, TD_TDCS_EXEC_TSC_MULTIPLIER);
2711 	kvm_tdx->attributes = td_params->attributes;
2712 	kvm_tdx->xfam = td_params->xfam;
2713 
2714 	if (td_params->config_flags & TDX_CONFIG_FLAGS_MAX_GPAW)
2715 		kvm->arch.gfn_direct_bits = TDX_SHARED_BIT_PWL_5;
2716 	else
2717 		kvm->arch.gfn_direct_bits = TDX_SHARED_BIT_PWL_4;
2718 
2719 	kvm_tdx->state = TD_STATE_INITIALIZED;
2720 out:
2721 	/* kfree() accepts NULL. */
2722 	kfree(init_vm);
2723 	kfree(td_params);
2724 
2725 	return ret;
2726 }
2727 
2728 void tdx_flush_tlb_current(struct kvm_vcpu *vcpu)
2729 {
2730 	/*
2731 	 * flush_tlb_current() is invoked when the first time for the vcpu to
2732 	 * run or when root of shared EPT is invalidated.
2733 	 * KVM only needs to flush shared EPT because the TDX module handles TLB
2734 	 * invalidation for private EPT in tdh_vp_enter();
2735 	 *
2736 	 * A single context invalidation for shared EPT can be performed here.
2737 	 * However, this single context invalidation requires the private EPTP
2738 	 * rather than the shared EPTP to flush shared EPT, as shared EPT uses
2739 	 * private EPTP as its ASID for TLB invalidation.
2740 	 *
2741 	 * To avoid reading back private EPTP, perform a global invalidation for
2742 	 * shared EPT instead to keep this function simple.
2743 	 */
2744 	ept_sync_global();
2745 }
2746 
2747 void tdx_flush_tlb_all(struct kvm_vcpu *vcpu)
2748 {
2749 	/*
2750 	 * TDX has called tdx_track() in tdx_sept_remove_private_spte() to
2751 	 * ensure that private EPT will be flushed on the next TD enter. No need
2752 	 * to call tdx_track() here again even when this callback is a result of
2753 	 * zapping private EPT.
2754 	 *
2755 	 * Due to the lack of the context to determine which EPT has been
2756 	 * affected by zapping, invoke invept() directly here for both shared
2757 	 * EPT and private EPT for simplicity, though it's not necessary for
2758 	 * private EPT.
2759 	 */
2760 	ept_sync_global();
2761 }
2762 
2763 static int tdx_td_finalize(struct kvm *kvm, struct kvm_tdx_cmd *cmd)
2764 {
2765 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
2766 
2767 	guard(mutex)(&kvm->slots_lock);
2768 
2769 	if (!is_hkid_assigned(kvm_tdx) || kvm_tdx->state == TD_STATE_RUNNABLE)
2770 		return -EINVAL;
2771 	/*
2772 	 * Pages are pending for KVM_TDX_INIT_MEM_REGION to issue
2773 	 * TDH.MEM.PAGE.ADD().
2774 	 */
2775 	if (atomic64_read(&kvm_tdx->nr_premapped))
2776 		return -EINVAL;
2777 
2778 	cmd->hw_error = tdh_mr_finalize(&kvm_tdx->td);
2779 	if (tdx_operand_busy(cmd->hw_error))
2780 		return -EBUSY;
2781 	if (KVM_BUG_ON(cmd->hw_error, kvm)) {
2782 		pr_tdx_error(TDH_MR_FINALIZE, cmd->hw_error);
2783 		return -EIO;
2784 	}
2785 
2786 	kvm_tdx->state = TD_STATE_RUNNABLE;
2787 	/* TD_STATE_RUNNABLE must be set before 'pre_fault_allowed' */
2788 	smp_wmb();
2789 	kvm->arch.pre_fault_allowed = true;
2790 	return 0;
2791 }
2792 
2793 int tdx_vm_ioctl(struct kvm *kvm, void __user *argp)
2794 {
2795 	struct kvm_tdx_cmd tdx_cmd;
2796 	int r;
2797 
2798 	if (copy_from_user(&tdx_cmd, argp, sizeof(struct kvm_tdx_cmd)))
2799 		return -EFAULT;
2800 
2801 	/*
2802 	 * Userspace should never set hw_error. It is used to fill
2803 	 * hardware-defined error by the kernel.
2804 	 */
2805 	if (tdx_cmd.hw_error)
2806 		return -EINVAL;
2807 
2808 	mutex_lock(&kvm->lock);
2809 
2810 	switch (tdx_cmd.id) {
2811 	case KVM_TDX_CAPABILITIES:
2812 		r = tdx_get_capabilities(&tdx_cmd);
2813 		break;
2814 	case KVM_TDX_INIT_VM:
2815 		r = tdx_td_init(kvm, &tdx_cmd);
2816 		break;
2817 	case KVM_TDX_FINALIZE_VM:
2818 		r = tdx_td_finalize(kvm, &tdx_cmd);
2819 		break;
2820 	default:
2821 		r = -EINVAL;
2822 		goto out;
2823 	}
2824 
2825 	if (copy_to_user(argp, &tdx_cmd, sizeof(struct kvm_tdx_cmd)))
2826 		r = -EFAULT;
2827 
2828 out:
2829 	mutex_unlock(&kvm->lock);
2830 	return r;
2831 }
2832 
2833 /* VMM can pass one 64bit auxiliary data to vcpu via RCX for guest BIOS. */
2834 static int tdx_td_vcpu_init(struct kvm_vcpu *vcpu, u64 vcpu_rcx)
2835 {
2836 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
2837 	struct vcpu_tdx *tdx = to_tdx(vcpu);
2838 	struct page *page;
2839 	int ret, i;
2840 	u64 err;
2841 
2842 	page = alloc_page(GFP_KERNEL);
2843 	if (!page)
2844 		return -ENOMEM;
2845 	tdx->vp.tdvpr_page = page;
2846 
2847 	tdx->vp.tdcx_pages = kcalloc(kvm_tdx->td.tdcx_nr_pages, sizeof(*tdx->vp.tdcx_pages),
2848 			       	     GFP_KERNEL);
2849 	if (!tdx->vp.tdcx_pages) {
2850 		ret = -ENOMEM;
2851 		goto free_tdvpr;
2852 	}
2853 
2854 	for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
2855 		page = alloc_page(GFP_KERNEL);
2856 		if (!page) {
2857 			ret = -ENOMEM;
2858 			goto free_tdcx;
2859 		}
2860 		tdx->vp.tdcx_pages[i] = page;
2861 	}
2862 
2863 	err = tdh_vp_create(&kvm_tdx->td, &tdx->vp);
2864 	if (KVM_BUG_ON(err, vcpu->kvm)) {
2865 		ret = -EIO;
2866 		pr_tdx_error(TDH_VP_CREATE, err);
2867 		goto free_tdcx;
2868 	}
2869 
2870 	for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
2871 		err = tdh_vp_addcx(&tdx->vp, tdx->vp.tdcx_pages[i]);
2872 		if (KVM_BUG_ON(err, vcpu->kvm)) {
2873 			pr_tdx_error(TDH_VP_ADDCX, err);
2874 			/*
2875 			 * Pages already added are reclaimed by the vcpu_free
2876 			 * method, but the rest are freed here.
2877 			 */
2878 			for (; i < kvm_tdx->td.tdcx_nr_pages; i++) {
2879 				__free_page(tdx->vp.tdcx_pages[i]);
2880 				tdx->vp.tdcx_pages[i] = NULL;
2881 			}
2882 			return -EIO;
2883 		}
2884 	}
2885 
2886 	err = tdh_vp_init(&tdx->vp, vcpu_rcx, vcpu->vcpu_id);
2887 	if (KVM_BUG_ON(err, vcpu->kvm)) {
2888 		pr_tdx_error(TDH_VP_INIT, err);
2889 		return -EIO;
2890 	}
2891 
2892 	vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2893 
2894 	return 0;
2895 
2896 free_tdcx:
2897 	for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
2898 		if (tdx->vp.tdcx_pages[i])
2899 			__free_page(tdx->vp.tdcx_pages[i]);
2900 		tdx->vp.tdcx_pages[i] = NULL;
2901 	}
2902 	kfree(tdx->vp.tdcx_pages);
2903 	tdx->vp.tdcx_pages = NULL;
2904 
2905 free_tdvpr:
2906 	if (tdx->vp.tdvpr_page)
2907 		__free_page(tdx->vp.tdvpr_page);
2908 	tdx->vp.tdvpr_page = 0;
2909 
2910 	return ret;
2911 }
2912 
2913 /* Sometimes reads multipple subleafs. Return how many enties were written. */
2914 static int tdx_vcpu_get_cpuid_leaf(struct kvm_vcpu *vcpu, u32 leaf, int *entry_index,
2915 				   struct kvm_cpuid_entry2 *output_e)
2916 {
2917 	int sub_leaf = 0;
2918 	int ret;
2919 
2920 	/* First try without a subleaf */
2921 	ret = tdx_read_cpuid(vcpu, leaf, 0, false, entry_index, output_e);
2922 
2923 	/* If success, or invalid leaf, just give up */
2924 	if (ret != -EIO)
2925 		return ret;
2926 
2927 	/*
2928 	 * If the try without a subleaf failed, try reading subleafs until
2929 	 * failure. The TDX module only supports 6 bits of subleaf index.
2930 	 */
2931 	while (1) {
2932 		/* Keep reading subleafs until there is a failure. */
2933 		if (tdx_read_cpuid(vcpu, leaf, sub_leaf, true, entry_index, output_e))
2934 			return !sub_leaf;
2935 
2936 		sub_leaf++;
2937 		output_e++;
2938 	}
2939 
2940 	return 0;
2941 }
2942 
2943 static int tdx_vcpu_get_cpuid(struct kvm_vcpu *vcpu, struct kvm_tdx_cmd *cmd)
2944 {
2945 	struct kvm_cpuid2 __user *output, *td_cpuid;
2946 	int r = 0, i = 0, leaf;
2947 	u32 level;
2948 
2949 	output = u64_to_user_ptr(cmd->data);
2950 	td_cpuid = kzalloc(sizeof(*td_cpuid) +
2951 			sizeof(output->entries[0]) * KVM_MAX_CPUID_ENTRIES,
2952 			GFP_KERNEL);
2953 	if (!td_cpuid)
2954 		return -ENOMEM;
2955 
2956 	if (copy_from_user(td_cpuid, output, sizeof(*output))) {
2957 		r = -EFAULT;
2958 		goto out;
2959 	}
2960 
2961 	/* Read max CPUID for normal range */
2962 	if (tdx_vcpu_get_cpuid_leaf(vcpu, 0, &i, &td_cpuid->entries[i])) {
2963 		r = -EIO;
2964 		goto out;
2965 	}
2966 	level = td_cpuid->entries[0].eax;
2967 
2968 	for (leaf = 1; leaf <= level; leaf++)
2969 		tdx_vcpu_get_cpuid_leaf(vcpu, leaf, &i, &td_cpuid->entries[i]);
2970 
2971 	/* Read max CPUID for extended range */
2972 	if (tdx_vcpu_get_cpuid_leaf(vcpu, 0x80000000, &i, &td_cpuid->entries[i])) {
2973 		r = -EIO;
2974 		goto out;
2975 	}
2976 	level = td_cpuid->entries[i - 1].eax;
2977 
2978 	for (leaf = 0x80000001; leaf <= level; leaf++)
2979 		tdx_vcpu_get_cpuid_leaf(vcpu, leaf, &i, &td_cpuid->entries[i]);
2980 
2981 	if (td_cpuid->nent < i)
2982 		r = -E2BIG;
2983 	td_cpuid->nent = i;
2984 
2985 	if (copy_to_user(output, td_cpuid, sizeof(*output))) {
2986 		r = -EFAULT;
2987 		goto out;
2988 	}
2989 
2990 	if (r == -E2BIG)
2991 		goto out;
2992 
2993 	if (copy_to_user(output->entries, td_cpuid->entries,
2994 			 td_cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
2995 		r = -EFAULT;
2996 
2997 out:
2998 	kfree(td_cpuid);
2999 
3000 	return r;
3001 }
3002 
3003 static int tdx_vcpu_init(struct kvm_vcpu *vcpu, struct kvm_tdx_cmd *cmd)
3004 {
3005 	u64 apic_base;
3006 	struct vcpu_tdx *tdx = to_tdx(vcpu);
3007 	int ret;
3008 
3009 	if (cmd->flags)
3010 		return -EINVAL;
3011 
3012 	if (tdx->state != VCPU_TD_STATE_UNINITIALIZED)
3013 		return -EINVAL;
3014 
3015 	/*
3016 	 * TDX requires X2APIC, userspace is responsible for configuring guest
3017 	 * CPUID accordingly.
3018 	 */
3019 	apic_base = APIC_DEFAULT_PHYS_BASE | LAPIC_MODE_X2APIC |
3020 		(kvm_vcpu_is_reset_bsp(vcpu) ? MSR_IA32_APICBASE_BSP : 0);
3021 	if (kvm_apic_set_base(vcpu, apic_base, true))
3022 		return -EINVAL;
3023 
3024 	ret = tdx_td_vcpu_init(vcpu, (u64)cmd->data);
3025 	if (ret)
3026 		return ret;
3027 
3028 	td_vmcs_write16(tdx, POSTED_INTR_NV, POSTED_INTR_VECTOR);
3029 	td_vmcs_write64(tdx, POSTED_INTR_DESC_ADDR, __pa(&tdx->vt.pi_desc));
3030 	td_vmcs_setbit32(tdx, PIN_BASED_VM_EXEC_CONTROL, PIN_BASED_POSTED_INTR);
3031 
3032 	tdx->state = VCPU_TD_STATE_INITIALIZED;
3033 
3034 	return 0;
3035 }
3036 
3037 void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
3038 {
3039 	/*
3040 	 * Yell on INIT, as TDX doesn't support INIT, i.e. KVM should drop all
3041 	 * INIT events.
3042 	 *
3043 	 * Defer initializing vCPU for RESET state until KVM_TDX_INIT_VCPU, as
3044 	 * userspace needs to define the vCPU model before KVM can initialize
3045 	 * vCPU state, e.g. to enable x2APIC.
3046 	 */
3047 	WARN_ON_ONCE(init_event);
3048 }
3049 
3050 struct tdx_gmem_post_populate_arg {
3051 	struct kvm_vcpu *vcpu;
3052 	__u32 flags;
3053 };
3054 
3055 static int tdx_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
3056 				  void __user *src, int order, void *_arg)
3057 {
3058 	u64 error_code = PFERR_GUEST_FINAL_MASK | PFERR_PRIVATE_ACCESS;
3059 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
3060 	struct tdx_gmem_post_populate_arg *arg = _arg;
3061 	struct kvm_vcpu *vcpu = arg->vcpu;
3062 	gpa_t gpa = gfn_to_gpa(gfn);
3063 	u8 level = PG_LEVEL_4K;
3064 	struct page *src_page;
3065 	int ret, i;
3066 	u64 err, entry, level_state;
3067 
3068 	/*
3069 	 * Get the source page if it has been faulted in. Return failure if the
3070 	 * source page has been swapped out or unmapped in primary memory.
3071 	 */
3072 	ret = get_user_pages_fast((unsigned long)src, 1, 0, &src_page);
3073 	if (ret < 0)
3074 		return ret;
3075 	if (ret != 1)
3076 		return -ENOMEM;
3077 
3078 	ret = kvm_tdp_map_page(vcpu, gpa, error_code, &level);
3079 	if (ret < 0)
3080 		goto out;
3081 
3082 	/*
3083 	 * The private mem cannot be zapped after kvm_tdp_map_page()
3084 	 * because all paths are covered by slots_lock and the
3085 	 * filemap invalidate lock.  Check that they are indeed enough.
3086 	 */
3087 	if (IS_ENABLED(CONFIG_KVM_PROVE_MMU)) {
3088 		scoped_guard(read_lock, &kvm->mmu_lock) {
3089 			if (KVM_BUG_ON(!kvm_tdp_mmu_gpa_is_mapped(vcpu, gpa), kvm)) {
3090 				ret = -EIO;
3091 				goto out;
3092 			}
3093 		}
3094 	}
3095 
3096 	ret = 0;
3097 	err = tdh_mem_page_add(&kvm_tdx->td, gpa, pfn_to_page(pfn),
3098 			       src_page, &entry, &level_state);
3099 	if (err) {
3100 		ret = unlikely(tdx_operand_busy(err)) ? -EBUSY : -EIO;
3101 		goto out;
3102 	}
3103 
3104 	if (!KVM_BUG_ON(!atomic64_read(&kvm_tdx->nr_premapped), kvm))
3105 		atomic64_dec(&kvm_tdx->nr_premapped);
3106 
3107 	if (arg->flags & KVM_TDX_MEASURE_MEMORY_REGION) {
3108 		for (i = 0; i < PAGE_SIZE; i += TDX_EXTENDMR_CHUNKSIZE) {
3109 			err = tdh_mr_extend(&kvm_tdx->td, gpa + i, &entry,
3110 					    &level_state);
3111 			if (err) {
3112 				ret = -EIO;
3113 				break;
3114 			}
3115 		}
3116 	}
3117 
3118 out:
3119 	put_page(src_page);
3120 	return ret;
3121 }
3122 
3123 static int tdx_vcpu_init_mem_region(struct kvm_vcpu *vcpu, struct kvm_tdx_cmd *cmd)
3124 {
3125 	struct vcpu_tdx *tdx = to_tdx(vcpu);
3126 	struct kvm *kvm = vcpu->kvm;
3127 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
3128 	struct kvm_tdx_init_mem_region region;
3129 	struct tdx_gmem_post_populate_arg arg;
3130 	long gmem_ret;
3131 	int ret;
3132 
3133 	if (tdx->state != VCPU_TD_STATE_INITIALIZED)
3134 		return -EINVAL;
3135 
3136 	guard(mutex)(&kvm->slots_lock);
3137 
3138 	/* Once TD is finalized, the initial guest memory is fixed. */
3139 	if (kvm_tdx->state == TD_STATE_RUNNABLE)
3140 		return -EINVAL;
3141 
3142 	if (cmd->flags & ~KVM_TDX_MEASURE_MEMORY_REGION)
3143 		return -EINVAL;
3144 
3145 	if (copy_from_user(&region, u64_to_user_ptr(cmd->data), sizeof(region)))
3146 		return -EFAULT;
3147 
3148 	if (!PAGE_ALIGNED(region.source_addr) || !PAGE_ALIGNED(region.gpa) ||
3149 	    !region.nr_pages ||
3150 	    region.gpa + (region.nr_pages << PAGE_SHIFT) <= region.gpa ||
3151 	    !vt_is_tdx_private_gpa(kvm, region.gpa) ||
3152 	    !vt_is_tdx_private_gpa(kvm, region.gpa + (region.nr_pages << PAGE_SHIFT) - 1))
3153 		return -EINVAL;
3154 
3155 	kvm_mmu_reload(vcpu);
3156 	ret = 0;
3157 	while (region.nr_pages) {
3158 		if (signal_pending(current)) {
3159 			ret = -EINTR;
3160 			break;
3161 		}
3162 
3163 		arg = (struct tdx_gmem_post_populate_arg) {
3164 			.vcpu = vcpu,
3165 			.flags = cmd->flags,
3166 		};
3167 		gmem_ret = kvm_gmem_populate(kvm, gpa_to_gfn(region.gpa),
3168 					     u64_to_user_ptr(region.source_addr),
3169 					     1, tdx_gmem_post_populate, &arg);
3170 		if (gmem_ret < 0) {
3171 			ret = gmem_ret;
3172 			break;
3173 		}
3174 
3175 		if (gmem_ret != 1) {
3176 			ret = -EIO;
3177 			break;
3178 		}
3179 
3180 		region.source_addr += PAGE_SIZE;
3181 		region.gpa += PAGE_SIZE;
3182 		region.nr_pages--;
3183 
3184 		cond_resched();
3185 	}
3186 
3187 	if (copy_to_user(u64_to_user_ptr(cmd->data), &region, sizeof(region)))
3188 		ret = -EFAULT;
3189 	return ret;
3190 }
3191 
3192 int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
3193 {
3194 	struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
3195 	struct kvm_tdx_cmd cmd;
3196 	int ret;
3197 
3198 	if (!is_hkid_assigned(kvm_tdx) || kvm_tdx->state == TD_STATE_RUNNABLE)
3199 		return -EINVAL;
3200 
3201 	if (copy_from_user(&cmd, argp, sizeof(cmd)))
3202 		return -EFAULT;
3203 
3204 	if (cmd.hw_error)
3205 		return -EINVAL;
3206 
3207 	switch (cmd.id) {
3208 	case KVM_TDX_INIT_VCPU:
3209 		ret = tdx_vcpu_init(vcpu, &cmd);
3210 		break;
3211 	case KVM_TDX_INIT_MEM_REGION:
3212 		ret = tdx_vcpu_init_mem_region(vcpu, &cmd);
3213 		break;
3214 	case KVM_TDX_GET_CPUID:
3215 		ret = tdx_vcpu_get_cpuid(vcpu, &cmd);
3216 		break;
3217 	default:
3218 		ret = -EINVAL;
3219 		break;
3220 	}
3221 
3222 	return ret;
3223 }
3224 
3225 int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
3226 {
3227 	return PG_LEVEL_4K;
3228 }
3229 
3230 static int tdx_online_cpu(unsigned int cpu)
3231 {
3232 	unsigned long flags;
3233 	int r;
3234 
3235 	/* Sanity check CPU is already in post-VMXON */
3236 	WARN_ON_ONCE(!(cr4_read_shadow() & X86_CR4_VMXE));
3237 
3238 	local_irq_save(flags);
3239 	r = tdx_cpu_enable();
3240 	local_irq_restore(flags);
3241 
3242 	return r;
3243 }
3244 
3245 static int tdx_offline_cpu(unsigned int cpu)
3246 {
3247 	int i;
3248 
3249 	/* No TD is running.  Allow any cpu to be offline. */
3250 	if (!atomic_read(&nr_configured_hkid))
3251 		return 0;
3252 
3253 	/*
3254 	 * In order to reclaim TDX HKID, (i.e. when deleting guest TD), need to
3255 	 * call TDH.PHYMEM.PAGE.WBINVD on all packages to program all memory
3256 	 * controller with pconfig.  If we have active TDX HKID, refuse to
3257 	 * offline the last online cpu.
3258 	 */
3259 	for_each_online_cpu(i) {
3260 		/*
3261 		 * Found another online cpu on the same package.
3262 		 * Allow to offline.
3263 		 */
3264 		if (i != cpu && topology_physical_package_id(i) ==
3265 				topology_physical_package_id(cpu))
3266 			return 0;
3267 	}
3268 
3269 	/*
3270 	 * This is the last cpu of this package.  Don't offline it.
3271 	 *
3272 	 * Because it's hard for human operator to understand the
3273 	 * reason, warn it.
3274 	 */
3275 #define MSG_ALLPKG_ONLINE \
3276 	"TDX requires all packages to have an online CPU. Delete all TDs in order to offline all CPUs of a package.\n"
3277 	pr_warn_ratelimited(MSG_ALLPKG_ONLINE);
3278 	return -EBUSY;
3279 }
3280 
3281 static void __do_tdx_cleanup(void)
3282 {
3283 	/*
3284 	 * Once TDX module is initialized, it cannot be disabled and
3285 	 * re-initialized again w/o runtime update (which isn't
3286 	 * supported by kernel).  Only need to remove the cpuhp here.
3287 	 * The TDX host core code tracks TDX status and can handle
3288 	 * 'multiple enabling' scenario.
3289 	 */
3290 	WARN_ON_ONCE(!tdx_cpuhp_state);
3291 	cpuhp_remove_state_nocalls_cpuslocked(tdx_cpuhp_state);
3292 	tdx_cpuhp_state = 0;
3293 }
3294 
3295 static void __tdx_cleanup(void)
3296 {
3297 	cpus_read_lock();
3298 	__do_tdx_cleanup();
3299 	cpus_read_unlock();
3300 }
3301 
3302 static int __init __do_tdx_bringup(void)
3303 {
3304 	int r;
3305 
3306 	/*
3307 	 * TDX-specific cpuhp callback to call tdx_cpu_enable() on all
3308 	 * online CPUs before calling tdx_enable(), and on any new
3309 	 * going-online CPU to make sure it is ready for TDX guest.
3310 	 */
3311 	r = cpuhp_setup_state_cpuslocked(CPUHP_AP_ONLINE_DYN,
3312 					 "kvm/cpu/tdx:online",
3313 					 tdx_online_cpu, tdx_offline_cpu);
3314 	if (r < 0)
3315 		return r;
3316 
3317 	tdx_cpuhp_state = r;
3318 
3319 	r = tdx_enable();
3320 	if (r)
3321 		__do_tdx_cleanup();
3322 
3323 	return r;
3324 }
3325 
3326 static int __init __tdx_bringup(void)
3327 {
3328 	const struct tdx_sys_info_td_conf *td_conf;
3329 	int r, i;
3330 
3331 	for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++) {
3332 		/*
3333 		 * Check if MSRs (tdx_uret_msrs) can be saved/restored
3334 		 * before returning to user space.
3335 		 *
3336 		 * this_cpu_ptr(user_return_msrs)->registered isn't checked
3337 		 * because the registration is done at vcpu runtime by
3338 		 * tdx_user_return_msr_update_cache().
3339 		 */
3340 		tdx_uret_msrs[i].slot = kvm_find_user_return_msr(tdx_uret_msrs[i].msr);
3341 		if (tdx_uret_msrs[i].slot == -1) {
3342 			/* If any MSR isn't supported, it is a KVM bug */
3343 			pr_err("MSR %x isn't included by kvm_find_user_return_msr\n",
3344 				tdx_uret_msrs[i].msr);
3345 			return -EIO;
3346 		}
3347 	}
3348 
3349 	/*
3350 	 * Enabling TDX requires enabling hardware virtualization first,
3351 	 * as making SEAMCALLs requires CPU being in post-VMXON state.
3352 	 */
3353 	r = kvm_enable_virtualization();
3354 	if (r)
3355 		return r;
3356 
3357 	cpus_read_lock();
3358 	r = __do_tdx_bringup();
3359 	cpus_read_unlock();
3360 
3361 	if (r)
3362 		goto tdx_bringup_err;
3363 
3364 	/* Get TDX global information for later use */
3365 	tdx_sysinfo = tdx_get_sysinfo();
3366 	if (WARN_ON_ONCE(!tdx_sysinfo)) {
3367 		r = -EINVAL;
3368 		goto get_sysinfo_err;
3369 	}
3370 
3371 	/* Check TDX module and KVM capabilities */
3372 	if (!tdx_get_supported_attrs(&tdx_sysinfo->td_conf) ||
3373 	    !tdx_get_supported_xfam(&tdx_sysinfo->td_conf))
3374 		goto get_sysinfo_err;
3375 
3376 	if (!(tdx_sysinfo->features.tdx_features0 & MD_FIELD_ID_FEATURES0_TOPOLOGY_ENUM))
3377 		goto get_sysinfo_err;
3378 
3379 	/*
3380 	 * TDX has its own limit of maximum vCPUs it can support for all
3381 	 * TDX guests in addition to KVM_MAX_VCPUS.  Userspace needs to
3382 	 * query TDX guest's maximum vCPUs by checking KVM_CAP_MAX_VCPU
3383 	 * extension on per-VM basis.
3384 	 *
3385 	 * TDX module reports such limit via the MAX_VCPU_PER_TD global
3386 	 * metadata.  Different modules may report different values.
3387 	 * Some old module may also not support this metadata (in which
3388 	 * case this limit is U16_MAX).
3389 	 *
3390 	 * In practice, the reported value reflects the maximum logical
3391 	 * CPUs that ALL the platforms that the module supports can
3392 	 * possibly have.
3393 	 *
3394 	 * Simply forwarding the MAX_VCPU_PER_TD to userspace could
3395 	 * result in an unpredictable ABI.  KVM instead always advertise
3396 	 * the number of logical CPUs the platform has as the maximum
3397 	 * vCPUs for TDX guests.
3398 	 *
3399 	 * Make sure MAX_VCPU_PER_TD reported by TDX module is not
3400 	 * smaller than the number of logical CPUs, otherwise KVM will
3401 	 * report an unsupported value to userspace.
3402 	 *
3403 	 * Note, a platform with TDX enabled in the BIOS cannot support
3404 	 * physical CPU hotplug, and TDX requires the BIOS has marked
3405 	 * all logical CPUs in MADT table as enabled.  Just use
3406 	 * num_present_cpus() for the number of logical CPUs.
3407 	 */
3408 	td_conf = &tdx_sysinfo->td_conf;
3409 	if (td_conf->max_vcpus_per_td < num_present_cpus()) {
3410 		pr_err("Disable TDX: MAX_VCPU_PER_TD (%u) smaller than number of logical CPUs (%u).\n",
3411 				td_conf->max_vcpus_per_td, num_present_cpus());
3412 		r = -EINVAL;
3413 		goto get_sysinfo_err;
3414 	}
3415 
3416 	if (misc_cg_set_capacity(MISC_CG_RES_TDX, tdx_get_nr_guest_keyids())) {
3417 		r = -EINVAL;
3418 		goto get_sysinfo_err;
3419 	}
3420 
3421 	/*
3422 	 * Leave hardware virtualization enabled after TDX is enabled
3423 	 * successfully.  TDX CPU hotplug depends on this.
3424 	 */
3425 	return 0;
3426 
3427 get_sysinfo_err:
3428 	__tdx_cleanup();
3429 tdx_bringup_err:
3430 	kvm_disable_virtualization();
3431 	return r;
3432 }
3433 
3434 void tdx_cleanup(void)
3435 {
3436 	if (enable_tdx) {
3437 		misc_cg_set_capacity(MISC_CG_RES_TDX, 0);
3438 		__tdx_cleanup();
3439 		kvm_disable_virtualization();
3440 	}
3441 }
3442 
3443 int __init tdx_bringup(void)
3444 {
3445 	int r, i;
3446 
3447 	/* tdx_disable_virtualization_cpu() uses associated_tdvcpus. */
3448 	for_each_possible_cpu(i)
3449 		INIT_LIST_HEAD(&per_cpu(associated_tdvcpus, i));
3450 
3451 	if (!enable_tdx)
3452 		return 0;
3453 
3454 	if (!enable_ept) {
3455 		pr_err("EPT is required for TDX\n");
3456 		goto success_disable_tdx;
3457 	}
3458 
3459 	if (!tdp_mmu_enabled || !enable_mmio_caching || !enable_ept_ad_bits) {
3460 		pr_err("TDP MMU and MMIO caching and EPT A/D bit is required for TDX\n");
3461 		goto success_disable_tdx;
3462 	}
3463 
3464 	if (!enable_apicv) {
3465 		pr_err("APICv is required for TDX\n");
3466 		goto success_disable_tdx;
3467 	}
3468 
3469 	if (!cpu_feature_enabled(X86_FEATURE_OSXSAVE)) {
3470 		pr_err("tdx: OSXSAVE is required for TDX\n");
3471 		goto success_disable_tdx;
3472 	}
3473 
3474 	if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
3475 		pr_err("tdx: MOVDIR64B is required for TDX\n");
3476 		goto success_disable_tdx;
3477 	}
3478 
3479 	if (!cpu_feature_enabled(X86_FEATURE_SELFSNOOP)) {
3480 		pr_err("Self-snoop is required for TDX\n");
3481 		goto success_disable_tdx;
3482 	}
3483 
3484 	if (!cpu_feature_enabled(X86_FEATURE_TDX_HOST_PLATFORM)) {
3485 		pr_err("tdx: no TDX private KeyIDs available\n");
3486 		goto success_disable_tdx;
3487 	}
3488 
3489 	if (!enable_virt_at_load) {
3490 		pr_err("tdx: tdx requires kvm.enable_virt_at_load=1\n");
3491 		goto success_disable_tdx;
3492 	}
3493 
3494 	/*
3495 	 * Ideally KVM should probe whether TDX module has been loaded
3496 	 * first and then try to bring it up.  But TDX needs to use SEAMCALL
3497 	 * to probe whether the module is loaded (there is no CPUID or MSR
3498 	 * for that), and making SEAMCALL requires enabling virtualization
3499 	 * first, just like the rest steps of bringing up TDX module.
3500 	 *
3501 	 * So, for simplicity do everything in __tdx_bringup(); the first
3502 	 * SEAMCALL will return -ENODEV when the module is not loaded.  The
3503 	 * only complication is having to make sure that initialization
3504 	 * SEAMCALLs don't return TDX_SEAMCALL_VMFAILINVALID in other
3505 	 * cases.
3506 	 */
3507 	r = __tdx_bringup();
3508 	if (r) {
3509 		/*
3510 		 * Disable TDX only but don't fail to load module if
3511 		 * the TDX module could not be loaded.  No need to print
3512 		 * message saying "module is not loaded" because it was
3513 		 * printed when the first SEAMCALL failed.
3514 		 */
3515 		if (r == -ENODEV)
3516 			goto success_disable_tdx;
3517 
3518 		enable_tdx = 0;
3519 	}
3520 
3521 	return r;
3522 
3523 success_disable_tdx:
3524 	enable_tdx = 0;
3525 	return 0;
3526 }
3527