xref: /linux/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c (revision f4b0c4b508364fde023e4f7b9f23f7e38c663dfe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020, Google LLC.
4  *
5  * Tests for KVM paravirtual feature disablement
6  */
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
9 #include <stdint.h>
10 
11 #include "test_util.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 
15 struct msr_data {
16 	uint32_t idx;
17 	const char *name;
18 };
19 
20 #define TEST_MSR(msr) { .idx = msr, .name = #msr }
21 #define UCALL_PR_MSR 0xdeadbeef
22 #define PR_MSR(msr) ucall(UCALL_PR_MSR, 1, msr)
23 
24 /*
25  * KVM paravirtual msrs to test. Expect a #GP if any of these msrs are read or
26  * written, as the KVM_CPUID_FEATURES leaf is cleared.
27  */
28 static struct msr_data msrs_to_test[] = {
29 	TEST_MSR(MSR_KVM_SYSTEM_TIME),
30 	TEST_MSR(MSR_KVM_SYSTEM_TIME_NEW),
31 	TEST_MSR(MSR_KVM_WALL_CLOCK),
32 	TEST_MSR(MSR_KVM_WALL_CLOCK_NEW),
33 	TEST_MSR(MSR_KVM_ASYNC_PF_EN),
34 	TEST_MSR(MSR_KVM_STEAL_TIME),
35 	TEST_MSR(MSR_KVM_PV_EOI_EN),
36 	TEST_MSR(MSR_KVM_POLL_CONTROL),
37 	TEST_MSR(MSR_KVM_ASYNC_PF_INT),
38 	TEST_MSR(MSR_KVM_ASYNC_PF_ACK),
39 };
40 
test_msr(struct msr_data * msr)41 static void test_msr(struct msr_data *msr)
42 {
43 	uint64_t ignored;
44 	uint8_t vector;
45 
46 	PR_MSR(msr);
47 
48 	vector = rdmsr_safe(msr->idx, &ignored);
49 	GUEST_ASSERT_EQ(vector, GP_VECTOR);
50 
51 	vector = wrmsr_safe(msr->idx, 0);
52 	GUEST_ASSERT_EQ(vector, GP_VECTOR);
53 }
54 
55 struct hcall_data {
56 	uint64_t nr;
57 	const char *name;
58 };
59 
60 #define TEST_HCALL(hc) { .nr = hc, .name = #hc }
61 #define UCALL_PR_HCALL 0xdeadc0de
62 #define PR_HCALL(hc) ucall(UCALL_PR_HCALL, 1, hc)
63 
64 /*
65  * KVM hypercalls to test. Expect -KVM_ENOSYS when called, as the corresponding
66  * features have been cleared in KVM_CPUID_FEATURES.
67  */
68 static struct hcall_data hcalls_to_test[] = {
69 	TEST_HCALL(KVM_HC_KICK_CPU),
70 	TEST_HCALL(KVM_HC_SEND_IPI),
71 	TEST_HCALL(KVM_HC_SCHED_YIELD),
72 };
73 
test_hcall(struct hcall_data * hc)74 static void test_hcall(struct hcall_data *hc)
75 {
76 	uint64_t r;
77 
78 	PR_HCALL(hc);
79 	r = kvm_hypercall(hc->nr, 0, 0, 0, 0);
80 	GUEST_ASSERT_EQ(r, -KVM_ENOSYS);
81 }
82 
guest_main(void)83 static void guest_main(void)
84 {
85 	int i;
86 
87 	for (i = 0; i < ARRAY_SIZE(msrs_to_test); i++) {
88 		test_msr(&msrs_to_test[i]);
89 	}
90 
91 	for (i = 0; i < ARRAY_SIZE(hcalls_to_test); i++) {
92 		test_hcall(&hcalls_to_test[i]);
93 	}
94 
95 	GUEST_DONE();
96 }
97 
pr_msr(struct ucall * uc)98 static void pr_msr(struct ucall *uc)
99 {
100 	struct msr_data *msr = (struct msr_data *)uc->args[0];
101 
102 	pr_info("testing msr: %s (%#x)\n", msr->name, msr->idx);
103 }
104 
pr_hcall(struct ucall * uc)105 static void pr_hcall(struct ucall *uc)
106 {
107 	struct hcall_data *hc = (struct hcall_data *)uc->args[0];
108 
109 	pr_info("testing hcall: %s (%lu)\n", hc->name, hc->nr);
110 }
111 
enter_guest(struct kvm_vcpu * vcpu)112 static void enter_guest(struct kvm_vcpu *vcpu)
113 {
114 	struct ucall uc;
115 
116 	while (true) {
117 		vcpu_run(vcpu);
118 		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
119 
120 		switch (get_ucall(vcpu, &uc)) {
121 		case UCALL_PR_MSR:
122 			pr_msr(&uc);
123 			break;
124 		case UCALL_PR_HCALL:
125 			pr_hcall(&uc);
126 			break;
127 		case UCALL_ABORT:
128 			REPORT_GUEST_ASSERT(uc);
129 			return;
130 		case UCALL_DONE:
131 			return;
132 		}
133 	}
134 }
135 
test_pv_unhalt(void)136 static void test_pv_unhalt(void)
137 {
138 	struct kvm_vcpu *vcpu;
139 	struct kvm_vm *vm;
140 	struct kvm_cpuid_entry2 *ent;
141 	u32 kvm_sig_old;
142 
143 	pr_info("testing KVM_FEATURE_PV_UNHALT\n");
144 
145 	TEST_REQUIRE(KVM_CAP_X86_DISABLE_EXITS);
146 
147 	/* KVM_PV_UNHALT test */
148 	vm = vm_create_with_one_vcpu(&vcpu, guest_main);
149 	vcpu_set_cpuid_feature(vcpu, X86_FEATURE_KVM_PV_UNHALT);
150 
151 	TEST_ASSERT(vcpu_cpuid_has(vcpu, X86_FEATURE_KVM_PV_UNHALT),
152 		    "Enabling X86_FEATURE_KVM_PV_UNHALT had no effect");
153 
154 	/* Make sure KVM clears vcpu->arch.kvm_cpuid */
155 	ent = vcpu_get_cpuid_entry(vcpu, KVM_CPUID_SIGNATURE);
156 	kvm_sig_old = ent->ebx;
157 	ent->ebx = 0xdeadbeef;
158 	vcpu_set_cpuid(vcpu);
159 
160 	vm_enable_cap(vm, KVM_CAP_X86_DISABLE_EXITS, KVM_X86_DISABLE_EXITS_HLT);
161 	ent = vcpu_get_cpuid_entry(vcpu, KVM_CPUID_SIGNATURE);
162 	ent->ebx = kvm_sig_old;
163 	vcpu_set_cpuid(vcpu);
164 
165 	TEST_ASSERT(!vcpu_cpuid_has(vcpu, X86_FEATURE_KVM_PV_UNHALT),
166 		    "KVM_FEATURE_PV_UNHALT is set with KVM_CAP_X86_DISABLE_EXITS");
167 
168 	/* FIXME: actually test KVM_FEATURE_PV_UNHALT feature */
169 
170 	kvm_vm_free(vm);
171 }
172 
main(void)173 int main(void)
174 {
175 	struct kvm_vcpu *vcpu;
176 	struct kvm_vm *vm;
177 
178 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID));
179 
180 	vm = vm_create_with_one_vcpu(&vcpu, guest_main);
181 
182 	vcpu_enable_cap(vcpu, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1);
183 
184 	vcpu_clear_cpuid_entry(vcpu, KVM_CPUID_FEATURES);
185 
186 	enter_guest(vcpu);
187 	kvm_vm_free(vm);
188 
189 	test_pv_unhalt();
190 }
191