1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Hyper-V HvCallSendSyntheticClusterIpi{,Ex} tests
4 *
5 * Copyright (C) 2022, Red Hat, Inc.
6 *
7 */
8 #include <pthread.h>
9 #include <inttypes.h>
10
11 #include "kvm_util.h"
12 #include "hyperv.h"
13 #include "test_util.h"
14 #include "vmx.h"
15
16 #define RECEIVER_VCPU_ID_1 2
17 #define RECEIVER_VCPU_ID_2 65
18
19 #define IPI_VECTOR 0xfe
20
21 static volatile uint64_t ipis_rcvd[RECEIVER_VCPU_ID_2 + 1];
22
23 struct hv_vpset {
24 u64 format;
25 u64 valid_bank_mask;
26 u64 bank_contents[2];
27 };
28
29 enum HV_GENERIC_SET_FORMAT {
30 HV_GENERIC_SET_SPARSE_4K,
31 HV_GENERIC_SET_ALL,
32 };
33
34 /* HvCallSendSyntheticClusterIpi hypercall */
35 struct hv_send_ipi {
36 u32 vector;
37 u32 reserved;
38 u64 cpu_mask;
39 };
40
41 /* HvCallSendSyntheticClusterIpiEx hypercall */
42 struct hv_send_ipi_ex {
43 u32 vector;
44 u32 reserved;
45 struct hv_vpset vp_set;
46 };
47
hv_init(vm_vaddr_t pgs_gpa)48 static inline void hv_init(vm_vaddr_t pgs_gpa)
49 {
50 wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
51 wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
52 }
53
receiver_code(void * hcall_page,vm_vaddr_t pgs_gpa)54 static void receiver_code(void *hcall_page, vm_vaddr_t pgs_gpa)
55 {
56 u32 vcpu_id;
57
58 x2apic_enable();
59 hv_init(pgs_gpa);
60
61 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
62
63 /* Signal sender vCPU we're ready */
64 ipis_rcvd[vcpu_id] = (u64)-1;
65
66 for (;;)
67 asm volatile("sti; hlt; cli");
68 }
69
guest_ipi_handler(struct ex_regs * regs)70 static void guest_ipi_handler(struct ex_regs *regs)
71 {
72 u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
73
74 ipis_rcvd[vcpu_id]++;
75 wrmsr(HV_X64_MSR_EOI, 1);
76 }
77
nop_loop(void)78 static inline void nop_loop(void)
79 {
80 int i;
81
82 for (i = 0; i < 100000000; i++)
83 asm volatile("nop");
84 }
85
sender_guest_code(void * hcall_page,vm_vaddr_t pgs_gpa)86 static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa)
87 {
88 struct hv_send_ipi *ipi = (struct hv_send_ipi *)hcall_page;
89 struct hv_send_ipi_ex *ipi_ex = (struct hv_send_ipi_ex *)hcall_page;
90 int stage = 1, ipis_expected[2] = {0};
91
92 hv_init(pgs_gpa);
93 GUEST_SYNC(stage++);
94
95 /* Wait for receiver vCPUs to come up */
96 while (!ipis_rcvd[RECEIVER_VCPU_ID_1] || !ipis_rcvd[RECEIVER_VCPU_ID_2])
97 nop_loop();
98 ipis_rcvd[RECEIVER_VCPU_ID_1] = ipis_rcvd[RECEIVER_VCPU_ID_2] = 0;
99
100 /* 'Slow' HvCallSendSyntheticClusterIpi to RECEIVER_VCPU_ID_1 */
101 ipi->vector = IPI_VECTOR;
102 ipi->cpu_mask = 1 << RECEIVER_VCPU_ID_1;
103 hyperv_hypercall(HVCALL_SEND_IPI, pgs_gpa, pgs_gpa + 4096);
104 nop_loop();
105 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
106 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
107 GUEST_SYNC(stage++);
108 /* 'Fast' HvCallSendSyntheticClusterIpi to RECEIVER_VCPU_ID_1 */
109 hyperv_hypercall(HVCALL_SEND_IPI | HV_HYPERCALL_FAST_BIT,
110 IPI_VECTOR, 1 << RECEIVER_VCPU_ID_1);
111 nop_loop();
112 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
113 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
114 GUEST_SYNC(stage++);
115
116 /* 'Slow' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_1 */
117 memset(hcall_page, 0, 4096);
118 ipi_ex->vector = IPI_VECTOR;
119 ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
120 ipi_ex->vp_set.valid_bank_mask = 1 << 0;
121 ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_1);
122 hyperv_hypercall(HVCALL_SEND_IPI_EX | (1 << HV_HYPERCALL_VARHEAD_OFFSET),
123 pgs_gpa, pgs_gpa + 4096);
124 nop_loop();
125 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
126 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
127 GUEST_SYNC(stage++);
128 /* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_1 */
129 hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 1);
130 hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
131 (1 << HV_HYPERCALL_VARHEAD_OFFSET),
132 IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
133 nop_loop();
134 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
135 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
136 GUEST_SYNC(stage++);
137
138 /* 'Slow' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_2 */
139 memset(hcall_page, 0, 4096);
140 ipi_ex->vector = IPI_VECTOR;
141 ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
142 ipi_ex->vp_set.valid_bank_mask = 1 << 1;
143 ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_2 - 64);
144 hyperv_hypercall(HVCALL_SEND_IPI_EX | (1 << HV_HYPERCALL_VARHEAD_OFFSET),
145 pgs_gpa, pgs_gpa + 4096);
146 nop_loop();
147 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ipis_expected[0]);
148 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
149 GUEST_SYNC(stage++);
150 /* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_2 */
151 hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 1);
152 hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
153 (1 << HV_HYPERCALL_VARHEAD_OFFSET),
154 IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
155 nop_loop();
156 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ipis_expected[0]);
157 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
158 GUEST_SYNC(stage++);
159
160 /* 'Slow' HvCallSendSyntheticClusterIpiEx to both RECEIVER_VCPU_ID_{1,2} */
161 memset(hcall_page, 0, 4096);
162 ipi_ex->vector = IPI_VECTOR;
163 ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
164 ipi_ex->vp_set.valid_bank_mask = 1 << 1 | 1;
165 ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_1);
166 ipi_ex->vp_set.bank_contents[1] = BIT(RECEIVER_VCPU_ID_2 - 64);
167 hyperv_hypercall(HVCALL_SEND_IPI_EX | (2 << HV_HYPERCALL_VARHEAD_OFFSET),
168 pgs_gpa, pgs_gpa + 4096);
169 nop_loop();
170 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
171 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
172 GUEST_SYNC(stage++);
173 /* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to both RECEIVER_VCPU_ID_{1, 2} */
174 hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 2);
175 hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
176 (2 << HV_HYPERCALL_VARHEAD_OFFSET),
177 IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
178 nop_loop();
179 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
180 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
181 GUEST_SYNC(stage++);
182
183 /* 'Slow' HvCallSendSyntheticClusterIpiEx to HV_GENERIC_SET_ALL */
184 memset(hcall_page, 0, 4096);
185 ipi_ex->vector = IPI_VECTOR;
186 ipi_ex->vp_set.format = HV_GENERIC_SET_ALL;
187 hyperv_hypercall(HVCALL_SEND_IPI_EX, pgs_gpa, pgs_gpa + 4096);
188 nop_loop();
189 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
190 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
191 GUEST_SYNC(stage++);
192 /*
193 * 'XMM Fast' HvCallSendSyntheticClusterIpiEx to HV_GENERIC_SET_ALL.
194 */
195 ipi_ex->vp_set.valid_bank_mask = 0;
196 hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 2);
197 hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT,
198 IPI_VECTOR, HV_GENERIC_SET_ALL);
199 nop_loop();
200 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
201 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
202 GUEST_SYNC(stage++);
203
204 GUEST_DONE();
205 }
206
vcpu_thread(void * arg)207 static void *vcpu_thread(void *arg)
208 {
209 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)arg;
210 int old, r;
211
212 r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
213 TEST_ASSERT(!r, "pthread_setcanceltype failed on vcpu_id=%u with errno=%d",
214 vcpu->id, r);
215
216 vcpu_run(vcpu);
217
218 TEST_FAIL("vCPU %u exited unexpectedly", vcpu->id);
219
220 return NULL;
221 }
222
cancel_join_vcpu_thread(pthread_t thread,struct kvm_vcpu * vcpu)223 static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)
224 {
225 void *retval;
226 int r;
227
228 r = pthread_cancel(thread);
229 TEST_ASSERT(!r, "pthread_cancel on vcpu_id=%d failed with errno=%d",
230 vcpu->id, r);
231
232 r = pthread_join(thread, &retval);
233 TEST_ASSERT(!r, "pthread_join on vcpu_id=%d failed with errno=%d",
234 vcpu->id, r);
235 TEST_ASSERT(retval == PTHREAD_CANCELED,
236 "expected retval=%p, got %p", PTHREAD_CANCELED,
237 retval);
238 }
239
main(int argc,char * argv[])240 int main(int argc, char *argv[])
241 {
242 struct kvm_vm *vm;
243 struct kvm_vcpu *vcpu[3];
244 vm_vaddr_t hcall_page;
245 pthread_t threads[2];
246 int stage = 1, r;
247 struct ucall uc;
248
249 TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_SEND_IPI));
250
251 vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code);
252
253 /* Hypercall input/output */
254 hcall_page = vm_vaddr_alloc_pages(vm, 2);
255 memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
256
257
258 vcpu[1] = vm_vcpu_add(vm, RECEIVER_VCPU_ID_1, receiver_code);
259 vcpu_args_set(vcpu[1], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
260 vcpu_set_msr(vcpu[1], HV_X64_MSR_VP_INDEX, RECEIVER_VCPU_ID_1);
261 vcpu_set_hv_cpuid(vcpu[1]);
262
263 vcpu[2] = vm_vcpu_add(vm, RECEIVER_VCPU_ID_2, receiver_code);
264 vcpu_args_set(vcpu[2], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
265 vcpu_set_msr(vcpu[2], HV_X64_MSR_VP_INDEX, RECEIVER_VCPU_ID_2);
266 vcpu_set_hv_cpuid(vcpu[2]);
267
268 vm_install_exception_handler(vm, IPI_VECTOR, guest_ipi_handler);
269
270 vcpu_args_set(vcpu[0], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
271 vcpu_set_hv_cpuid(vcpu[0]);
272
273 r = pthread_create(&threads[0], NULL, vcpu_thread, vcpu[1]);
274 TEST_ASSERT(!r, "pthread_create failed errno=%d", r);
275
276 r = pthread_create(&threads[1], NULL, vcpu_thread, vcpu[2]);
277 TEST_ASSERT(!r, "pthread_create failed errno=%d", errno);
278
279 while (true) {
280 vcpu_run(vcpu[0]);
281
282 TEST_ASSERT_KVM_EXIT_REASON(vcpu[0], KVM_EXIT_IO);
283
284 switch (get_ucall(vcpu[0], &uc)) {
285 case UCALL_SYNC:
286 TEST_ASSERT(uc.args[1] == stage,
287 "Unexpected stage: %ld (%d expected)",
288 uc.args[1], stage);
289 break;
290 case UCALL_DONE:
291 goto done;
292 case UCALL_ABORT:
293 REPORT_GUEST_ASSERT(uc);
294 /* NOT REACHED */
295 default:
296 TEST_FAIL("Unknown ucall %lu", uc.cmd);
297 }
298
299 stage++;
300 }
301
302 done:
303 cancel_join_vcpu_thread(threads[0], vcpu[1]);
304 cancel_join_vcpu_thread(threads[1], vcpu[2]);
305 kvm_vm_free(vm);
306
307 return r;
308 }
309