xref: /linux/tools/testing/selftests/kvm/x86_64/smm_test.c (revision 46e6acfe3501fa938af9c5bd730f0020235b08a2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018, Red Hat, Inc.
4  *
5  * Tests for SMM.
6  */
7 #include <fcntl.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <stdint.h>
11 #include <string.h>
12 #include <sys/ioctl.h>
13 
14 #include "test_util.h"
15 
16 #include "kvm_util.h"
17 
18 #include "vmx.h"
19 #include "svm_util.h"
20 
21 #define SMRAM_SIZE 65536
22 #define SMRAM_MEMSLOT ((1 << 16) | 1)
23 #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
24 #define SMRAM_GPA 0x1000000
25 #define SMRAM_STAGE 0xfe
26 
27 #define STR(x) #x
28 #define XSTR(s) STR(s)
29 
30 #define SYNC_PORT 0xe
31 #define DONE 0xff
32 
33 /*
34  * This is compiled as normal 64-bit code, however, SMI handler is executed
35  * in real-address mode. To stay simple we're limiting ourselves to a mode
36  * independent subset of asm here.
37  * SMI handler always report back fixed stage SMRAM_STAGE.
38  */
39 uint8_t smi_handler[] = {
40 	0xb0, SMRAM_STAGE,    /* mov $SMRAM_STAGE, %al */
41 	0xe4, SYNC_PORT,      /* in $SYNC_PORT, %al */
42 	0x0f, 0xaa,           /* rsm */
43 };
44 
45 static inline void sync_with_host(uint64_t phase)
46 {
47 	asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
48 		     : "+a" (phase));
49 }
50 
51 static void self_smi(void)
52 {
53 	x2apic_write_reg(APIC_ICR,
54 			 APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
55 }
56 
57 static void l2_guest_code(void)
58 {
59 	sync_with_host(8);
60 
61 	sync_with_host(10);
62 
63 	vmcall();
64 }
65 
66 static void guest_code(void *arg)
67 {
68 	#define L2_GUEST_STACK_SIZE 64
69 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
70 	uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
71 	struct svm_test_data *svm = arg;
72 	struct vmx_pages *vmx_pages = arg;
73 
74 	sync_with_host(1);
75 
76 	wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);
77 
78 	sync_with_host(2);
79 
80 	self_smi();
81 
82 	sync_with_host(4);
83 
84 	if (arg) {
85 		if (this_cpu_has(X86_FEATURE_SVM)) {
86 			generic_svm_setup(svm, l2_guest_code,
87 					  &l2_guest_stack[L2_GUEST_STACK_SIZE]);
88 		} else {
89 			GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
90 			GUEST_ASSERT(load_vmcs(vmx_pages));
91 			prepare_vmcs(vmx_pages, l2_guest_code,
92 				     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
93 		}
94 
95 		sync_with_host(5);
96 
97 		self_smi();
98 
99 		sync_with_host(7);
100 
101 		if (this_cpu_has(X86_FEATURE_SVM)) {
102 			run_guest(svm->vmcb, svm->vmcb_gpa);
103 			run_guest(svm->vmcb, svm->vmcb_gpa);
104 		} else {
105 			vmlaunch();
106 			vmresume();
107 		}
108 
109 		/* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */
110 		sync_with_host(12);
111 	}
112 
113 	sync_with_host(DONE);
114 }
115 
116 void inject_smi(struct kvm_vcpu *vcpu)
117 {
118 	struct kvm_vcpu_events events;
119 
120 	vcpu_events_get(vcpu, &events);
121 
122 	events.smi.pending = 1;
123 	events.flags |= KVM_VCPUEVENT_VALID_SMM;
124 
125 	vcpu_events_set(vcpu, &events);
126 }
127 
128 int main(int argc, char *argv[])
129 {
130 	vm_vaddr_t nested_gva = 0;
131 
132 	struct kvm_vcpu *vcpu;
133 	struct kvm_regs regs;
134 	struct kvm_vm *vm;
135 	struct kvm_x86_state *state;
136 	int stage, stage_reported;
137 
138 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_SMM));
139 
140 	/* Create VM */
141 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
142 
143 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
144 				    SMRAM_MEMSLOT, SMRAM_PAGES, 0);
145 	TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
146 		    == SMRAM_GPA, "could not allocate guest physical addresses?");
147 
148 	memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);
149 	memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
150 	       sizeof(smi_handler));
151 
152 	vcpu_set_msr(vcpu, MSR_IA32_SMBASE, SMRAM_GPA);
153 
154 	if (kvm_has_cap(KVM_CAP_NESTED_STATE)) {
155 		if (kvm_cpu_has(X86_FEATURE_SVM))
156 			vcpu_alloc_svm(vm, &nested_gva);
157 		else if (kvm_cpu_has(X86_FEATURE_VMX))
158 			vcpu_alloc_vmx(vm, &nested_gva);
159 	}
160 
161 	if (!nested_gva)
162 		pr_info("will skip SMM test with VMX enabled\n");
163 
164 	vcpu_args_set(vcpu, 1, nested_gva);
165 
166 	for (stage = 1;; stage++) {
167 		vcpu_run(vcpu);
168 		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
169 
170 		memset(&regs, 0, sizeof(regs));
171 		vcpu_regs_get(vcpu, &regs);
172 
173 		stage_reported = regs.rax & 0xff;
174 
175 		if (stage_reported == DONE)
176 			goto done;
177 
178 		TEST_ASSERT(stage_reported == stage ||
179 			    stage_reported == SMRAM_STAGE,
180 			    "Unexpected stage: #%x, got %x",
181 			    stage, stage_reported);
182 
183 		/*
184 		 * Enter SMM during L2 execution and check that we correctly
185 		 * return from it. Do not perform save/restore while in SMM yet.
186 		 */
187 		if (stage == 8) {
188 			inject_smi(vcpu);
189 			continue;
190 		}
191 
192 		/*
193 		 * Perform save/restore while the guest is in SMM triggered
194 		 * during L2 execution.
195 		 */
196 		if (stage == 10)
197 			inject_smi(vcpu);
198 
199 		state = vcpu_save_state(vcpu);
200 		kvm_vm_release(vm);
201 
202 		vcpu = vm_recreate_with_one_vcpu(vm);
203 		vcpu_load_state(vcpu, state);
204 		kvm_x86_state_cleanup(state);
205 	}
206 
207 done:
208 	kvm_vm_free(vm);
209 }
210