xref: /linux/tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c (revision 11e8c7e9471cf8e6ae6ec7324a3174191cd965e3)
1*3e745694SPaolo Bonzini // SPDX-License-Identifier: GPL-2.0
2*3e745694SPaolo Bonzini /*
3*3e745694SPaolo Bonzini  * Copyright (C) 2026, Red Hat, Inc.
4*3e745694SPaolo Bonzini  *
5*3e745694SPaolo Bonzini  * Test that vmx_leave_smm() validates vmcs12 controls before re-entering
6*3e745694SPaolo Bonzini  * nested guest mode on RSM.
7*3e745694SPaolo Bonzini  */
8*3e745694SPaolo Bonzini #include <fcntl.h>
9*3e745694SPaolo Bonzini #include <stdio.h>
10*3e745694SPaolo Bonzini #include <stdlib.h>
11*3e745694SPaolo Bonzini #include <string.h>
12*3e745694SPaolo Bonzini #include <sys/ioctl.h>
13*3e745694SPaolo Bonzini 
14*3e745694SPaolo Bonzini #include "test_util.h"
15*3e745694SPaolo Bonzini #include "kvm_util.h"
16*3e745694SPaolo Bonzini #include "smm.h"
17*3e745694SPaolo Bonzini #include "hyperv.h"
18*3e745694SPaolo Bonzini #include "vmx.h"
19*3e745694SPaolo Bonzini 
20*3e745694SPaolo Bonzini #define SMRAM_GPA	0x1000000
21*3e745694SPaolo Bonzini #define SMRAM_STAGE	0xfe
22*3e745694SPaolo Bonzini 
23*3e745694SPaolo Bonzini #define SYNC_PORT	0xe
24*3e745694SPaolo Bonzini 
25*3e745694SPaolo Bonzini #define STR(x) #x
26*3e745694SPaolo Bonzini #define XSTR(s) STR(s)
27*3e745694SPaolo Bonzini 
28*3e745694SPaolo Bonzini /*
29*3e745694SPaolo Bonzini  * SMI handler: runs in real-address mode.
30*3e745694SPaolo Bonzini  * Reports SMRAM_STAGE via port IO, then does RSM.
31*3e745694SPaolo Bonzini  */
32*3e745694SPaolo Bonzini static uint8_t smi_handler[] = {
33*3e745694SPaolo Bonzini 	0xb0, SMRAM_STAGE,    /* mov $SMRAM_STAGE, %al */
34*3e745694SPaolo Bonzini 	0xe4, SYNC_PORT,      /* in $SYNC_PORT, %al */
35*3e745694SPaolo Bonzini 	0x0f, 0xaa,           /* rsm */
36*3e745694SPaolo Bonzini };
37*3e745694SPaolo Bonzini 
sync_with_host(uint64_t phase)38*3e745694SPaolo Bonzini static inline void sync_with_host(uint64_t phase)
39*3e745694SPaolo Bonzini {
40*3e745694SPaolo Bonzini 	asm volatile("in $" XSTR(SYNC_PORT) ", %%al \n"
41*3e745694SPaolo Bonzini 		     : "+a" (phase));
42*3e745694SPaolo Bonzini }
43*3e745694SPaolo Bonzini 
l2_guest_code(void)44*3e745694SPaolo Bonzini static void l2_guest_code(void)
45*3e745694SPaolo Bonzini {
46*3e745694SPaolo Bonzini 	sync_with_host(1);
47*3e745694SPaolo Bonzini 
48*3e745694SPaolo Bonzini 	/* After SMI+RSM with invalid controls, we should not reach here. */
49*3e745694SPaolo Bonzini 	vmcall();
50*3e745694SPaolo Bonzini }
51*3e745694SPaolo Bonzini 
guest_code(struct vmx_pages * vmx_pages,struct hyperv_test_pages * hv_pages)52*3e745694SPaolo Bonzini static void guest_code(struct vmx_pages *vmx_pages,
53*3e745694SPaolo Bonzini 		       struct hyperv_test_pages *hv_pages)
54*3e745694SPaolo Bonzini {
55*3e745694SPaolo Bonzini #define L2_GUEST_STACK_SIZE 64
56*3e745694SPaolo Bonzini 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
57*3e745694SPaolo Bonzini 
58*3e745694SPaolo Bonzini 	/* Set up Hyper-V enlightenments and eVMCS */
59*3e745694SPaolo Bonzini 	wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
60*3e745694SPaolo Bonzini 	enable_vp_assist(hv_pages->vp_assist_gpa, hv_pages->vp_assist);
61*3e745694SPaolo Bonzini 	evmcs_enable();
62*3e745694SPaolo Bonzini 
63*3e745694SPaolo Bonzini 	GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
64*3e745694SPaolo Bonzini 	GUEST_ASSERT(load_evmcs(hv_pages));
65*3e745694SPaolo Bonzini 	prepare_vmcs(vmx_pages, l2_guest_code,
66*3e745694SPaolo Bonzini 		     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
67*3e745694SPaolo Bonzini 
68*3e745694SPaolo Bonzini 	GUEST_ASSERT(!vmlaunch());
69*3e745694SPaolo Bonzini 
70*3e745694SPaolo Bonzini 	/* L2 exits via vmcall if test fails */
71*3e745694SPaolo Bonzini 	sync_with_host(2);
72*3e745694SPaolo Bonzini }
73*3e745694SPaolo Bonzini 
main(int argc,char * argv[])74*3e745694SPaolo Bonzini int main(int argc, char *argv[])
75*3e745694SPaolo Bonzini {
76*3e745694SPaolo Bonzini 	vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0;
77*3e745694SPaolo Bonzini 	struct hyperv_test_pages *hv;
78*3e745694SPaolo Bonzini 	struct hv_enlightened_vmcs *evmcs;
79*3e745694SPaolo Bonzini 	struct kvm_vcpu *vcpu;
80*3e745694SPaolo Bonzini 	struct kvm_vm *vm;
81*3e745694SPaolo Bonzini 	struct kvm_regs regs;
82*3e745694SPaolo Bonzini 	int stage_reported;
83*3e745694SPaolo Bonzini 
84*3e745694SPaolo Bonzini 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
85*3e745694SPaolo Bonzini 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
86*3e745694SPaolo Bonzini 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS));
87*3e745694SPaolo Bonzini 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_SMM));
88*3e745694SPaolo Bonzini 
89*3e745694SPaolo Bonzini 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
90*3e745694SPaolo Bonzini 
91*3e745694SPaolo Bonzini 	setup_smram(vm, vcpu, SMRAM_GPA, smi_handler, sizeof(smi_handler));
92*3e745694SPaolo Bonzini 
93*3e745694SPaolo Bonzini 	vcpu_set_hv_cpuid(vcpu);
94*3e745694SPaolo Bonzini 	vcpu_enable_evmcs(vcpu);
95*3e745694SPaolo Bonzini 	vcpu_alloc_vmx(vm, &vmx_pages_gva);
96*3e745694SPaolo Bonzini 	hv = vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva);
97*3e745694SPaolo Bonzini 	vcpu_args_set(vcpu, 2, vmx_pages_gva, hv_pages_gva);
98*3e745694SPaolo Bonzini 
99*3e745694SPaolo Bonzini 	vcpu_run(vcpu);
100*3e745694SPaolo Bonzini 
101*3e745694SPaolo Bonzini 	/* L2 is running and syncs with host.  */
102*3e745694SPaolo Bonzini 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
103*3e745694SPaolo Bonzini 	vcpu_regs_get(vcpu, &regs);
104*3e745694SPaolo Bonzini 	stage_reported = regs.rax & 0xff;
105*3e745694SPaolo Bonzini 	TEST_ASSERT(stage_reported == 1,
106*3e745694SPaolo Bonzini 		    "Expected stage 1, got %d", stage_reported);
107*3e745694SPaolo Bonzini 
108*3e745694SPaolo Bonzini 	/* Inject SMI while L2 is running.  */
109*3e745694SPaolo Bonzini 	inject_smi(vcpu);
110*3e745694SPaolo Bonzini 	vcpu_run(vcpu);
111*3e745694SPaolo Bonzini 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
112*3e745694SPaolo Bonzini 	vcpu_regs_get(vcpu, &regs);
113*3e745694SPaolo Bonzini 	stage_reported = regs.rax & 0xff;
114*3e745694SPaolo Bonzini 	TEST_ASSERT(stage_reported == SMRAM_STAGE,
115*3e745694SPaolo Bonzini 		    "Expected SMM handler stage %#x, got %#x",
116*3e745694SPaolo Bonzini 		    SMRAM_STAGE, stage_reported);
117*3e745694SPaolo Bonzini 
118*3e745694SPaolo Bonzini 	/*
119*3e745694SPaolo Bonzini 	 * Guest is now paused in the SMI handler, about to execute RSM.
120*3e745694SPaolo Bonzini 	 * Hack the eVMCS page to set-up invalid pin-based execution
121*3e745694SPaolo Bonzini 	 * control (PIN_BASED_VIRTUAL_NMIS without PIN_BASED_NMI_EXITING).
122*3e745694SPaolo Bonzini 	 */
123*3e745694SPaolo Bonzini 	evmcs = hv->enlightened_vmcs_hva;
124*3e745694SPaolo Bonzini 	evmcs->pin_based_vm_exec_control |= PIN_BASED_VIRTUAL_NMIS;
125*3e745694SPaolo Bonzini 	evmcs->hv_clean_fields = 0;
126*3e745694SPaolo Bonzini 
127*3e745694SPaolo Bonzini 	/*
128*3e745694SPaolo Bonzini 	 * Trigger copy_enlightened_to_vmcs12() via KVM_GET_NESTED_STATE,
129*3e745694SPaolo Bonzini 	 * copying the invalid pin_based_vm_exec_control into cached_vmcs12.
130*3e745694SPaolo Bonzini 	 */
131*3e745694SPaolo Bonzini 	union {
132*3e745694SPaolo Bonzini 		struct kvm_nested_state state;
133*3e745694SPaolo Bonzini 		char state_[16384];
134*3e745694SPaolo Bonzini 	} nested_state_buf;
135*3e745694SPaolo Bonzini 
136*3e745694SPaolo Bonzini 	memset(&nested_state_buf, 0, sizeof(nested_state_buf));
137*3e745694SPaolo Bonzini 	nested_state_buf.state.size = sizeof(nested_state_buf);
138*3e745694SPaolo Bonzini 	vcpu_nested_state_get(vcpu, &nested_state_buf.state);
139*3e745694SPaolo Bonzini 
140*3e745694SPaolo Bonzini 	/*
141*3e745694SPaolo Bonzini 	 * Resume the guest.  The SMI handler executes RSM, which calls
142*3e745694SPaolo Bonzini 	 * vmx_leave_smm().  nested_vmx_check_controls() should detect
143*3e745694SPaolo Bonzini 	 * VIRTUAL_NMIS without NMI_EXITING and cause a triple fault.
144*3e745694SPaolo Bonzini 	 */
145*3e745694SPaolo Bonzini 	vcpu_run(vcpu);
146*3e745694SPaolo Bonzini 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN);
147*3e745694SPaolo Bonzini 
148*3e745694SPaolo Bonzini 	kvm_vm_free(vm);
149*3e745694SPaolo Bonzini 	return 0;
150*3e745694SPaolo Bonzini }
151