1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2026, Red Hat, Inc.
4 *
5 * Test that vmx_leave_smm() validates vmcs12 controls before re-entering
6 * nested guest mode on RSM.
7 */
8 #include <fcntl.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/ioctl.h>
13
14 #include "test_util.h"
15 #include "kvm_util.h"
16 #include "smm.h"
17 #include "hyperv.h"
18 #include "vmx.h"
19
20 #define SMRAM_GPA 0x1000000
21 #define SMRAM_STAGE 0xfe
22
23 #define SYNC_PORT 0xe
24
25 #define STR(x) #x
26 #define XSTR(s) STR(s)
27
28 /*
29 * SMI handler: runs in real-address mode.
30 * Reports SMRAM_STAGE via port IO, then does RSM.
31 */
32 static uint8_t smi_handler[] = {
33 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */
34 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */
35 0x0f, 0xaa, /* rsm */
36 };
37
sync_with_host(uint64_t phase)38 static inline void sync_with_host(uint64_t phase)
39 {
40 asm volatile("in $" XSTR(SYNC_PORT) ", %%al \n"
41 : "+a" (phase));
42 }
43
l2_guest_code(void)44 static void l2_guest_code(void)
45 {
46 sync_with_host(1);
47
48 /* After SMI+RSM with invalid controls, we should not reach here. */
49 vmcall();
50 }
51
guest_code(struct vmx_pages * vmx_pages,struct hyperv_test_pages * hv_pages)52 static void guest_code(struct vmx_pages *vmx_pages,
53 struct hyperv_test_pages *hv_pages)
54 {
55 #define L2_GUEST_STACK_SIZE 64
56 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
57
58 /* Set up Hyper-V enlightenments and eVMCS */
59 wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
60 enable_vp_assist(hv_pages->vp_assist_gpa, hv_pages->vp_assist);
61 evmcs_enable();
62
63 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
64 GUEST_ASSERT(load_evmcs(hv_pages));
65 prepare_vmcs(vmx_pages, l2_guest_code,
66 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
67
68 GUEST_ASSERT(!vmlaunch());
69
70 /* L2 exits via vmcall if test fails */
71 sync_with_host(2);
72 }
73
main(int argc,char * argv[])74 int main(int argc, char *argv[])
75 {
76 vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0;
77 struct hyperv_test_pages *hv;
78 struct hv_enlightened_vmcs *evmcs;
79 struct kvm_vcpu *vcpu;
80 struct kvm_vm *vm;
81 struct kvm_regs regs;
82 int stage_reported;
83
84 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
85 TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
86 TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS));
87 TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_SMM));
88
89 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
90
91 setup_smram(vm, vcpu, SMRAM_GPA, smi_handler, sizeof(smi_handler));
92
93 vcpu_set_hv_cpuid(vcpu);
94 vcpu_enable_evmcs(vcpu);
95 vcpu_alloc_vmx(vm, &vmx_pages_gva);
96 hv = vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva);
97 vcpu_args_set(vcpu, 2, vmx_pages_gva, hv_pages_gva);
98
99 vcpu_run(vcpu);
100
101 /* L2 is running and syncs with host. */
102 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
103 vcpu_regs_get(vcpu, ®s);
104 stage_reported = regs.rax & 0xff;
105 TEST_ASSERT(stage_reported == 1,
106 "Expected stage 1, got %d", stage_reported);
107
108 /* Inject SMI while L2 is running. */
109 inject_smi(vcpu);
110 vcpu_run(vcpu);
111 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
112 vcpu_regs_get(vcpu, ®s);
113 stage_reported = regs.rax & 0xff;
114 TEST_ASSERT(stage_reported == SMRAM_STAGE,
115 "Expected SMM handler stage %#x, got %#x",
116 SMRAM_STAGE, stage_reported);
117
118 /*
119 * Guest is now paused in the SMI handler, about to execute RSM.
120 * Hack the eVMCS page to set-up invalid pin-based execution
121 * control (PIN_BASED_VIRTUAL_NMIS without PIN_BASED_NMI_EXITING).
122 */
123 evmcs = hv->enlightened_vmcs_hva;
124 evmcs->pin_based_vm_exec_control |= PIN_BASED_VIRTUAL_NMIS;
125 evmcs->hv_clean_fields = 0;
126
127 /*
128 * Trigger copy_enlightened_to_vmcs12() via KVM_GET_NESTED_STATE,
129 * copying the invalid pin_based_vm_exec_control into cached_vmcs12.
130 */
131 union {
132 struct kvm_nested_state state;
133 char state_[16384];
134 } nested_state_buf;
135
136 memset(&nested_state_buf, 0, sizeof(nested_state_buf));
137 nested_state_buf.state.size = sizeof(nested_state_buf);
138 vcpu_nested_state_get(vcpu, &nested_state_buf.state);
139
140 /*
141 * Resume the guest. The SMI handler executes RSM, which calls
142 * vmx_leave_smm(). nested_vmx_check_controls() should detect
143 * VIRTUAL_NMIS without NMI_EXITING and cause a triple fault.
144 */
145 vcpu_run(vcpu);
146 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN);
147
148 kvm_vm_free(vm);
149 return 0;
150 }
151