xref: /linux/tools/testing/selftests/kvm/x86_64/smm_test.c (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018, Red Hat, Inc.
4  *
5  * Tests for SMM.
6  */
7 #define _GNU_SOURCE /* for program_invocation_short_name */
8 #include <fcntl.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <stdint.h>
12 #include <string.h>
13 #include <sys/ioctl.h>
14 
15 #include "test_util.h"
16 
17 #include "kvm_util.h"
18 
19 #include "vmx.h"
20 #include "svm_util.h"
21 
22 #define VCPU_ID	      1
23 
24 #define SMRAM_SIZE 65536
25 #define SMRAM_MEMSLOT ((1 << 16) | 1)
26 #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
27 #define SMRAM_GPA 0x1000000
28 #define SMRAM_STAGE 0xfe
29 
30 #define STR(x) #x
31 #define XSTR(s) STR(s)
32 
33 #define SYNC_PORT 0xe
34 #define DONE 0xff
35 
36 /*
37  * This is compiled as normal 64-bit code, however, SMI handler is executed
38  * in real-address mode. To stay simple we're limiting ourselves to a mode
39  * independent subset of asm here.
40  * SMI handler always report back fixed stage SMRAM_STAGE.
41  */
42 uint8_t smi_handler[] = {
43 	0xb0, SMRAM_STAGE,    /* mov $SMRAM_STAGE, %al */
44 	0xe4, SYNC_PORT,      /* in $SYNC_PORT, %al */
45 	0x0f, 0xaa,           /* rsm */
46 };
47 
48 static inline void sync_with_host(uint64_t phase)
49 {
50 	asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
51 		     : "+a" (phase));
52 }
53 
54 static void self_smi(void)
55 {
56 	x2apic_write_reg(APIC_ICR,
57 			 APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
58 }
59 
60 static void l2_guest_code(void)
61 {
62 	sync_with_host(8);
63 
64 	sync_with_host(10);
65 
66 	vmcall();
67 }
68 
69 static void guest_code(void *arg)
70 {
71 	#define L2_GUEST_STACK_SIZE 64
72 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
73 	uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
74 	struct svm_test_data *svm = arg;
75 	struct vmx_pages *vmx_pages = arg;
76 
77 	sync_with_host(1);
78 
79 	wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);
80 
81 	sync_with_host(2);
82 
83 	self_smi();
84 
85 	sync_with_host(4);
86 
87 	if (arg) {
88 		if (cpu_has_svm()) {
89 			generic_svm_setup(svm, l2_guest_code,
90 					  &l2_guest_stack[L2_GUEST_STACK_SIZE]);
91 		} else {
92 			GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
93 			GUEST_ASSERT(load_vmcs(vmx_pages));
94 			prepare_vmcs(vmx_pages, l2_guest_code,
95 				     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
96 		}
97 
98 		sync_with_host(5);
99 
100 		self_smi();
101 
102 		sync_with_host(7);
103 
104 		if (cpu_has_svm()) {
105 			run_guest(svm->vmcb, svm->vmcb_gpa);
106 			run_guest(svm->vmcb, svm->vmcb_gpa);
107 		} else {
108 			vmlaunch();
109 			vmresume();
110 		}
111 
112 		/* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */
113 		sync_with_host(12);
114 	}
115 
116 	sync_with_host(DONE);
117 }
118 
119 void inject_smi(struct kvm_vm *vm)
120 {
121 	struct kvm_vcpu_events events;
122 
123 	vcpu_events_get(vm, VCPU_ID, &events);
124 
125 	events.smi.pending = 1;
126 	events.flags |= KVM_VCPUEVENT_VALID_SMM;
127 
128 	vcpu_events_set(vm, VCPU_ID, &events);
129 }
130 
131 int main(int argc, char *argv[])
132 {
133 	vm_vaddr_t nested_gva = 0;
134 
135 	struct kvm_regs regs;
136 	struct kvm_vm *vm;
137 	struct kvm_run *run;
138 	struct kvm_x86_state *state;
139 	int stage, stage_reported;
140 
141 	/* Create VM */
142 	vm = vm_create_default(VCPU_ID, 0, guest_code);
143 
144 	run = vcpu_state(vm, VCPU_ID);
145 
146 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
147 				    SMRAM_MEMSLOT, SMRAM_PAGES, 0);
148 	TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
149 		    == SMRAM_GPA, "could not allocate guest physical addresses?");
150 
151 	memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);
152 	memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
153 	       sizeof(smi_handler));
154 
155 	vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);
156 
157 	if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
158 		if (nested_svm_supported())
159 			vcpu_alloc_svm(vm, &nested_gva);
160 		else if (nested_vmx_supported())
161 			vcpu_alloc_vmx(vm, &nested_gva);
162 	}
163 
164 	if (!nested_gva)
165 		pr_info("will skip SMM test with VMX enabled\n");
166 
167 	vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
168 
169 	for (stage = 1;; stage++) {
170 		_vcpu_run(vm, VCPU_ID);
171 		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
172 			    "Stage %d: unexpected exit reason: %u (%s),\n",
173 			    stage, run->exit_reason,
174 			    exit_reason_str(run->exit_reason));
175 
176 		memset(&regs, 0, sizeof(regs));
177 		vcpu_regs_get(vm, VCPU_ID, &regs);
178 
179 		stage_reported = regs.rax & 0xff;
180 
181 		if (stage_reported == DONE)
182 			goto done;
183 
184 		TEST_ASSERT(stage_reported == stage ||
185 			    stage_reported == SMRAM_STAGE,
186 			    "Unexpected stage: #%x, got %x",
187 			    stage, stage_reported);
188 
189 		/*
190 		 * Enter SMM during L2 execution and check that we correctly
191 		 * return from it. Do not perform save/restore while in SMM yet.
192 		 */
193 		if (stage == 8) {
194 			inject_smi(vm);
195 			continue;
196 		}
197 
198 		/*
199 		 * Perform save/restore while the guest is in SMM triggered
200 		 * during L2 execution.
201 		 */
202 		if (stage == 10)
203 			inject_smi(vm);
204 
205 		state = vcpu_save_state(vm, VCPU_ID);
206 		kvm_vm_release(vm);
207 		kvm_vm_restart(vm, O_RDWR);
208 		vm_vcpu_add(vm, VCPU_ID);
209 		vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
210 		vcpu_load_state(vm, VCPU_ID, state);
211 		run = vcpu_state(vm, VCPU_ID);
212 		kvm_x86_state_cleanup(state);
213 	}
214 
215 done:
216 	kvm_vm_free(vm);
217 }
218