1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <fcntl.h>
3 #include <stdio.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <sys/ioctl.h>
7
8 #include "apic.h"
9 #include "kvm_util.h"
10 #include "processor.h"
11 #include "test_util.h"
12
13 struct xapic_vcpu {
14 struct kvm_vcpu *vcpu;
15 bool is_x2apic;
16 bool has_xavic_errata;
17 };
18
xapic_guest_code(void)19 static void xapic_guest_code(void)
20 {
21 asm volatile("cli");
22
23 xapic_enable();
24
25 while (1) {
26 uint64_t val = (u64)xapic_read_reg(APIC_IRR) |
27 (u64)xapic_read_reg(APIC_IRR + 0x10) << 32;
28
29 xapic_write_reg(APIC_ICR2, val >> 32);
30 xapic_write_reg(APIC_ICR, val);
31 GUEST_SYNC(val);
32 }
33 }
34
35 #define X2APIC_RSVD_BITS_MASK (GENMASK_ULL(31, 20) | \
36 GENMASK_ULL(17, 16) | \
37 GENMASK_ULL(13, 13))
38
x2apic_guest_code(void)39 static void x2apic_guest_code(void)
40 {
41 asm volatile("cli");
42
43 x2apic_enable();
44
45 do {
46 uint64_t val = x2apic_read_reg(APIC_IRR) |
47 x2apic_read_reg(APIC_IRR + 0x10) << 32;
48
49 if (val & X2APIC_RSVD_BITS_MASK) {
50 x2apic_write_reg_fault(APIC_ICR, val);
51 } else {
52 x2apic_write_reg(APIC_ICR, val);
53 GUEST_ASSERT_EQ(x2apic_read_reg(APIC_ICR), val);
54 }
55 GUEST_SYNC(val);
56 } while (1);
57 }
58
____test_icr(struct xapic_vcpu * x,uint64_t val)59 static void ____test_icr(struct xapic_vcpu *x, uint64_t val)
60 {
61 struct kvm_vcpu *vcpu = x->vcpu;
62 struct kvm_lapic_state xapic;
63 struct ucall uc;
64 uint64_t icr;
65
66 /*
67 * Tell the guest what ICR value to write. Use the IRR to pass info,
68 * all bits are valid and should not be modified by KVM (ignoring the
69 * fact that vectors 0-15 are technically illegal).
70 */
71 vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
72 *((u32 *)&xapic.regs[APIC_IRR]) = val;
73 *((u32 *)&xapic.regs[APIC_IRR + 0x10]) = val >> 32;
74 vcpu_ioctl(vcpu, KVM_SET_LAPIC, &xapic);
75
76 vcpu_run(vcpu);
77 TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
78 TEST_ASSERT_EQ(uc.args[1], val);
79
80 vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
81 icr = (u64)(*((u32 *)&xapic.regs[APIC_ICR])) |
82 (u64)(*((u32 *)&xapic.regs[APIC_ICR2])) << 32;
83 if (!x->is_x2apic) {
84 if (!x->has_xavic_errata)
85 val &= (-1u | (0xffull << (32 + 24)));
86 } else if (val & X2APIC_RSVD_BITS_MASK) {
87 return;
88 }
89
90 if (x->has_xavic_errata)
91 TEST_ASSERT_EQ(icr & ~APIC_ICR_BUSY, val & ~APIC_ICR_BUSY);
92 else
93 TEST_ASSERT_EQ(icr, val & ~APIC_ICR_BUSY);
94 }
95
__test_icr(struct xapic_vcpu * x,uint64_t val)96 static void __test_icr(struct xapic_vcpu *x, uint64_t val)
97 {
98 /*
99 * The BUSY bit is reserved on both AMD and Intel, but only AMD treats
100 * it is as _must_ be zero. Intel simply ignores the bit. Don't test
101 * the BUSY bit for x2APIC, as there is no single correct behavior.
102 */
103 if (!x->is_x2apic)
104 ____test_icr(x, val | APIC_ICR_BUSY);
105
106 ____test_icr(x, val & ~(u64)APIC_ICR_BUSY);
107 }
108
test_icr(struct xapic_vcpu * x)109 static void test_icr(struct xapic_vcpu *x)
110 {
111 struct kvm_vcpu *vcpu = x->vcpu;
112 uint64_t icr, i, j;
113
114 icr = APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_FIXED;
115 for (i = 0; i <= 0xff; i++)
116 __test_icr(x, icr | i);
117
118 icr = APIC_INT_ASSERT | APIC_DM_FIXED;
119 for (i = 0; i <= 0xff; i++)
120 __test_icr(x, icr | i);
121
122 /*
123 * Send all flavors of IPIs to non-existent vCPUs. TODO: use number of
124 * vCPUs, not vcpu.id + 1. Arbitrarily use vector 0xff.
125 */
126 icr = APIC_INT_ASSERT | 0xff;
127 for (i = 0; i < 0xff; i++) {
128 if (i == vcpu->id)
129 continue;
130 for (j = 0; j < 8; j++)
131 __test_icr(x, i << (32 + 24) | icr | (j << 8));
132 }
133
134 /* And again with a shorthand destination for all types of IPIs. */
135 icr = APIC_DEST_ALLBUT | APIC_INT_ASSERT;
136 for (i = 0; i < 8; i++)
137 __test_icr(x, icr | (i << 8));
138
139 /* And a few garbage value, just make sure it's an IRQ (blocked). */
140 __test_icr(x, 0xa5a5a5a5a5a5a5a5 & ~APIC_DM_FIXED_MASK);
141 __test_icr(x, 0x5a5a5a5a5a5a5a5a & ~APIC_DM_FIXED_MASK);
142 __test_icr(x, -1ull & ~APIC_DM_FIXED_MASK);
143 }
144
__test_apic_id(struct kvm_vcpu * vcpu,uint64_t apic_base)145 static void __test_apic_id(struct kvm_vcpu *vcpu, uint64_t apic_base)
146 {
147 uint32_t apic_id, expected;
148 struct kvm_lapic_state xapic;
149
150 vcpu_set_msr(vcpu, MSR_IA32_APICBASE, apic_base);
151
152 vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
153
154 expected = apic_base & X2APIC_ENABLE ? vcpu->id : vcpu->id << 24;
155 apic_id = *((u32 *)&xapic.regs[APIC_ID]);
156
157 TEST_ASSERT(apic_id == expected,
158 "APIC_ID not set back to %s format; wanted = %x, got = %x",
159 (apic_base & X2APIC_ENABLE) ? "x2APIC" : "xAPIC",
160 expected, apic_id);
161 }
162
163 /*
164 * Verify that KVM switches the APIC_ID between xAPIC and x2APIC when userspace
165 * stuffs MSR_IA32_APICBASE. Setting the APIC_ID when x2APIC is enabled and
166 * when the APIC transitions for DISABLED to ENABLED is architectural behavior
167 * (on Intel), whereas the x2APIC => xAPIC transition behavior is KVM ABI since
168 * attempted to transition from x2APIC to xAPIC without disabling the APIC is
169 * architecturally disallowed.
170 */
test_apic_id(void)171 static void test_apic_id(void)
172 {
173 const uint32_t NR_VCPUS = 3;
174 struct kvm_vcpu *vcpus[NR_VCPUS];
175 uint64_t apic_base;
176 struct kvm_vm *vm;
177 int i;
178
179 vm = vm_create_with_vcpus(NR_VCPUS, NULL, vcpus);
180 vm_enable_cap(vm, KVM_CAP_X2APIC_API, KVM_X2APIC_API_USE_32BIT_IDS);
181
182 for (i = 0; i < NR_VCPUS; i++) {
183 apic_base = vcpu_get_msr(vcpus[i], MSR_IA32_APICBASE);
184
185 TEST_ASSERT(apic_base & MSR_IA32_APICBASE_ENABLE,
186 "APIC not in ENABLED state at vCPU RESET");
187 TEST_ASSERT(!(apic_base & X2APIC_ENABLE),
188 "APIC not in xAPIC mode at vCPU RESET");
189
190 __test_apic_id(vcpus[i], apic_base);
191 __test_apic_id(vcpus[i], apic_base | X2APIC_ENABLE);
192 __test_apic_id(vcpus[i], apic_base);
193 }
194
195 kvm_vm_free(vm);
196 }
197
test_x2apic_id(void)198 static void test_x2apic_id(void)
199 {
200 struct kvm_lapic_state lapic = {};
201 struct kvm_vcpu *vcpu;
202 struct kvm_vm *vm;
203 int i;
204
205 vm = vm_create_with_one_vcpu(&vcpu, NULL);
206 vcpu_set_msr(vcpu, MSR_IA32_APICBASE, MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
207
208 /*
209 * Try stuffing a modified x2APIC ID, KVM should ignore the value and
210 * always return the vCPU's default/readonly x2APIC ID.
211 */
212 for (i = 0; i <= 0xff; i++) {
213 *(u32 *)(lapic.regs + APIC_ID) = i << 24;
214 *(u32 *)(lapic.regs + APIC_SPIV) = APIC_SPIV_APIC_ENABLED;
215 vcpu_ioctl(vcpu, KVM_SET_LAPIC, &lapic);
216
217 vcpu_ioctl(vcpu, KVM_GET_LAPIC, &lapic);
218 TEST_ASSERT(*((u32 *)&lapic.regs[APIC_ID]) == vcpu->id << 24,
219 "x2APIC ID should be fully readonly");
220 }
221
222 kvm_vm_free(vm);
223 }
224
main(int argc,char * argv[])225 int main(int argc, char *argv[])
226 {
227 struct xapic_vcpu x = {
228 .vcpu = NULL,
229 .is_x2apic = true,
230 };
231 struct kvm_vm *vm;
232
233 vm = vm_create_with_one_vcpu(&x.vcpu, x2apic_guest_code);
234 test_icr(&x);
235 kvm_vm_free(vm);
236
237 /*
238 * Use a second VM for the xAPIC test so that x2APIC can be hidden from
239 * the guest in order to test AVIC. KVM disallows changing CPUID after
240 * KVM_RUN and AVIC is disabled if _any_ vCPU is allowed to use x2APIC.
241 */
242 vm = vm_create_with_one_vcpu(&x.vcpu, xapic_guest_code);
243 x.is_x2apic = false;
244
245 /*
246 * AMD's AVIC implementation is buggy (fails to clear the ICR BUSY bit),
247 * and also diverges from KVM with respect to ICR2[23:0] (KVM and Intel
248 * drops writes, AMD does not). Account for the errata when checking
249 * that KVM reads back what was written.
250 */
251 x.has_xavic_errata = host_cpu_is_amd &&
252 get_kvm_amd_param_bool("avic");
253
254 vcpu_clear_cpuid_feature(x.vcpu, X86_FEATURE_X2APIC);
255
256 virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
257 test_icr(&x);
258 kvm_vm_free(vm);
259
260 test_apic_id();
261 test_x2apic_id();
262 }
263