1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2018, Red Hat, Inc.
4 *
5 * Tests for SMM.
6 */
7 #include <fcntl.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <stdint.h>
11 #include <string.h>
12 #include <sys/ioctl.h>
13
14 #include "test_util.h"
15
16 #include "kvm_util.h"
17 #include "smm.h"
18
19 #include "vmx.h"
20 #include "svm_util.h"
21
22 #define SMRAM_GPA 0x1000000
23 #define SMRAM_STAGE 0xfe
24
25 #define STR(x) #x
26 #define XSTR(s) STR(s)
27
28 #define SYNC_PORT 0xe
29 #define DONE 0xff
30
31 /*
32 * This is compiled as normal 64-bit code, however, SMI handler is executed
33 * in real-address mode. To stay simple we're limiting ourselves to a mode
34 * independent subset of asm here.
35 * SMI handler always report back fixed stage SMRAM_STAGE.
36 */
37 uint8_t smi_handler[] = {
38 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */
39 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */
40 0x0f, 0xaa, /* rsm */
41 };
42
sync_with_host(uint64_t phase)43 static inline void sync_with_host(uint64_t phase)
44 {
45 asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
46 : "+a" (phase));
47 }
48
self_smi(void)49 static void self_smi(void)
50 {
51 x2apic_write_reg(APIC_ICR,
52 APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
53 }
54
l2_guest_code(void)55 static void l2_guest_code(void)
56 {
57 sync_with_host(8);
58
59 sync_with_host(10);
60
61 vmcall();
62 }
63
guest_code(void * arg)64 static void guest_code(void *arg)
65 {
66 #define L2_GUEST_STACK_SIZE 64
67 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
68 uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
69 struct svm_test_data *svm = arg;
70 struct vmx_pages *vmx_pages = arg;
71
72 sync_with_host(1);
73
74 wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);
75
76 sync_with_host(2);
77
78 self_smi();
79
80 sync_with_host(4);
81
82 if (arg) {
83 if (this_cpu_has(X86_FEATURE_SVM)) {
84 generic_svm_setup(svm, l2_guest_code,
85 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
86 } else {
87 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
88 GUEST_ASSERT(load_vmcs(vmx_pages));
89 prepare_vmcs(vmx_pages, l2_guest_code,
90 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
91 }
92
93 sync_with_host(5);
94
95 self_smi();
96
97 sync_with_host(7);
98
99 if (this_cpu_has(X86_FEATURE_SVM)) {
100 run_guest(svm->vmcb, svm->vmcb_gpa);
101 run_guest(svm->vmcb, svm->vmcb_gpa);
102 } else {
103 vmlaunch();
104 vmresume();
105 }
106
107 /* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */
108 sync_with_host(12);
109 }
110
111 sync_with_host(DONE);
112 }
113
main(int argc,char * argv[])114 int main(int argc, char *argv[])
115 {
116 vm_vaddr_t nested_gva = 0;
117
118 struct kvm_vcpu *vcpu;
119 struct kvm_regs regs;
120 struct kvm_vm *vm;
121 struct kvm_x86_state *state;
122 int stage, stage_reported;
123
124 TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_SMM));
125
126 /* Create VM */
127 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
128
129 setup_smram(vm, vcpu, SMRAM_GPA, smi_handler, sizeof(smi_handler));
130
131 if (kvm_has_cap(KVM_CAP_NESTED_STATE)) {
132 if (kvm_cpu_has(X86_FEATURE_SVM))
133 vcpu_alloc_svm(vm, &nested_gva);
134 else if (kvm_cpu_has(X86_FEATURE_VMX))
135 vcpu_alloc_vmx(vm, &nested_gva);
136 }
137
138 if (!nested_gva)
139 pr_info("will skip SMM test with VMX enabled\n");
140
141 vcpu_args_set(vcpu, 1, nested_gva);
142
143 for (stage = 1;; stage++) {
144 vcpu_run(vcpu);
145 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
146
147 memset(®s, 0, sizeof(regs));
148 vcpu_regs_get(vcpu, ®s);
149
150 stage_reported = regs.rax & 0xff;
151
152 if (stage_reported == DONE)
153 goto done;
154
155 TEST_ASSERT(stage_reported == stage ||
156 stage_reported == SMRAM_STAGE,
157 "Unexpected stage: #%x, got %x",
158 stage, stage_reported);
159
160 /*
161 * Enter SMM during L2 execution and check that we correctly
162 * return from it. Do not perform save/restore while in SMM yet.
163 */
164 if (stage == 8) {
165 inject_smi(vcpu);
166 continue;
167 }
168
169 /*
170 * Perform save/restore while the guest is in SMM triggered
171 * during L2 execution.
172 */
173 if (stage == 10)
174 inject_smi(vcpu);
175
176 state = vcpu_save_state(vcpu);
177 kvm_vm_release(vm);
178
179 vcpu = vm_recreate_with_one_vcpu(vm);
180 vcpu_load_state(vcpu, state);
181 kvm_x86_state_cleanup(state);
182 }
183
184 done:
185 kvm_vm_free(vm);
186 }
187