xref: /linux/tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c (revision 69bfec7548f4c1595bac0e3ddfc0458a5af31f4c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hyper-V HvFlushVirtualAddress{List,Space}{,Ex} tests
4  *
5  * Copyright (C) 2022, Red Hat, Inc.
6  *
7  */
8 
9 #define _GNU_SOURCE /* for program_invocation_short_name */
10 #include <asm/barrier.h>
11 #include <pthread.h>
12 #include <inttypes.h>
13 
14 #include "kvm_util.h"
15 #include "processor.h"
16 #include "hyperv.h"
17 #include "test_util.h"
18 #include "vmx.h"
19 
20 #define WORKER_VCPU_ID_1 2
21 #define WORKER_VCPU_ID_2 65
22 
23 #define NTRY 100
24 #define NTEST_PAGES 2
25 
26 struct hv_vpset {
27 	u64 format;
28 	u64 valid_bank_mask;
29 	u64 bank_contents[];
30 };
31 
32 enum HV_GENERIC_SET_FORMAT {
33 	HV_GENERIC_SET_SPARSE_4K,
34 	HV_GENERIC_SET_ALL,
35 };
36 
37 #define HV_FLUSH_ALL_PROCESSORS			BIT(0)
38 #define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES	BIT(1)
39 #define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY	BIT(2)
40 #define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT	BIT(3)
41 
42 /* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
43 struct hv_tlb_flush {
44 	u64 address_space;
45 	u64 flags;
46 	u64 processor_mask;
47 	u64 gva_list[];
48 } __packed;
49 
50 /* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
51 struct hv_tlb_flush_ex {
52 	u64 address_space;
53 	u64 flags;
54 	struct hv_vpset hv_vp_set;
55 	u64 gva_list[];
56 } __packed;
57 
58 /*
59  * Pass the following info to 'workers' and 'sender'
60  * - Hypercall page's GVA
61  * - Hypercall page's GPA
62  * - Test pages GVA
63  * - GVAs of the test pages' PTEs
64  */
65 struct test_data {
66 	vm_vaddr_t hcall_gva;
67 	vm_paddr_t hcall_gpa;
68 	vm_vaddr_t test_pages;
69 	vm_vaddr_t test_pages_pte[NTEST_PAGES];
70 };
71 
72 /* 'Worker' vCPU code checking the contents of the test page */
73 static void worker_guest_code(vm_vaddr_t test_data)
74 {
75 	struct test_data *data = (struct test_data *)test_data;
76 	u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
77 	void *exp_page = (void *)data->test_pages + PAGE_SIZE * NTEST_PAGES;
78 	u64 *this_cpu = (u64 *)(exp_page + vcpu_id * sizeof(u64));
79 	u64 expected, val;
80 
81 	x2apic_enable();
82 	wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
83 
84 	for (;;) {
85 		cpu_relax();
86 
87 		expected = READ_ONCE(*this_cpu);
88 
89 		/*
90 		 * Make sure the value in the test page is read after reading
91 		 * the expectation for the first time. Pairs with wmb() in
92 		 * prepare_to_test().
93 		 */
94 		rmb();
95 
96 		val = READ_ONCE(*(u64 *)data->test_pages);
97 
98 		/*
99 		 * Make sure the value in the test page is read after before
100 		 * reading the expectation for the second time. Pairs with wmb()
101 		 * post_test().
102 		 */
103 		rmb();
104 
105 		/*
106 		 * '0' indicates the sender is between iterations, wait until
107 		 * the sender is ready for this vCPU to start checking again.
108 		 */
109 		if (!expected)
110 			continue;
111 
112 		/*
113 		 * Re-read the per-vCPU byte to ensure the sender didn't move
114 		 * onto a new iteration.
115 		 */
116 		if (expected != READ_ONCE(*this_cpu))
117 			continue;
118 
119 		GUEST_ASSERT(val == expected);
120 	}
121 }
122 
123 /*
124  * Write per-CPU info indicating what each 'worker' CPU is supposed to see in
125  * test page. '0' means don't check.
126  */
127 static void set_expected_val(void *addr, u64 val, int vcpu_id)
128 {
129 	void *exp_page = addr + PAGE_SIZE * NTEST_PAGES;
130 
131 	*(u64 *)(exp_page + vcpu_id * sizeof(u64)) = val;
132 }
133 
134 /*
135  * Update PTEs swapping two test pages.
136  * TODO: use swap()/xchg() when these are provided.
137  */
138 static void swap_two_test_pages(vm_paddr_t pte_gva1, vm_paddr_t pte_gva2)
139 {
140 	uint64_t tmp = *(uint64_t *)pte_gva1;
141 
142 	*(uint64_t *)pte_gva1 = *(uint64_t *)pte_gva2;
143 	*(uint64_t *)pte_gva2 = tmp;
144 }
145 
146 /*
147  * TODO: replace the silly NOP loop with a proper udelay() implementation.
148  */
149 static inline void do_delay(void)
150 {
151 	int i;
152 
153 	for (i = 0; i < 1000000; i++)
154 		asm volatile("nop");
155 }
156 
157 /*
158  * Prepare to test: 'disable' workers by setting the expectation to '0',
159  * clear hypercall input page and then swap two test pages.
160  */
161 static inline void prepare_to_test(struct test_data *data)
162 {
163 	/* Clear hypercall input page */
164 	memset((void *)data->hcall_gva, 0, PAGE_SIZE);
165 
166 	/* 'Disable' workers */
167 	set_expected_val((void *)data->test_pages, 0x0, WORKER_VCPU_ID_1);
168 	set_expected_val((void *)data->test_pages, 0x0, WORKER_VCPU_ID_2);
169 
170 	/* Make sure workers are 'disabled' before we swap PTEs. */
171 	wmb();
172 
173 	/* Make sure workers have enough time to notice */
174 	do_delay();
175 
176 	/* Swap test page mappings */
177 	swap_two_test_pages(data->test_pages_pte[0], data->test_pages_pte[1]);
178 }
179 
180 /*
181  * Finalize the test: check hypercall resule set the expected val for
182  * 'worker' CPUs and give them some time to test.
183  */
184 static inline void post_test(struct test_data *data, u64 exp1, u64 exp2)
185 {
186 	/* Make sure we change the expectation after swapping PTEs */
187 	wmb();
188 
189 	/* Set the expectation for workers, '0' means don't test */
190 	set_expected_val((void *)data->test_pages, exp1, WORKER_VCPU_ID_1);
191 	set_expected_val((void *)data->test_pages, exp2, WORKER_VCPU_ID_2);
192 
193 	/* Make sure workers have enough time to test */
194 	do_delay();
195 }
196 
197 #define TESTVAL1 0x0101010101010101
198 #define TESTVAL2 0x0202020202020202
199 
200 /* Main vCPU doing the test */
201 static void sender_guest_code(vm_vaddr_t test_data)
202 {
203 	struct test_data *data = (struct test_data *)test_data;
204 	struct hv_tlb_flush *flush = (struct hv_tlb_flush *)data->hcall_gva;
205 	struct hv_tlb_flush_ex *flush_ex = (struct hv_tlb_flush_ex *)data->hcall_gva;
206 	vm_paddr_t hcall_gpa = data->hcall_gpa;
207 	int i, stage = 1;
208 
209 	wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
210 	wrmsr(HV_X64_MSR_HYPERCALL, data->hcall_gpa);
211 
212 	/* "Slow" hypercalls */
213 
214 	GUEST_SYNC(stage++);
215 
216 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for WORKER_VCPU_ID_1 */
217 	for (i = 0; i < NTRY; i++) {
218 		prepare_to_test(data);
219 		flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
220 		flush->processor_mask = BIT(WORKER_VCPU_ID_1);
221 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, hcall_gpa,
222 				 hcall_gpa + PAGE_SIZE);
223 		post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);
224 	}
225 
226 	GUEST_SYNC(stage++);
227 
228 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for WORKER_VCPU_ID_1 */
229 	for (i = 0; i < NTRY; i++) {
230 		prepare_to_test(data);
231 		flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
232 		flush->processor_mask = BIT(WORKER_VCPU_ID_1);
233 		flush->gva_list[0] = (u64)data->test_pages;
234 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
235 				 (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
236 				 hcall_gpa, hcall_gpa + PAGE_SIZE);
237 		post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);
238 	}
239 
240 	GUEST_SYNC(stage++);
241 
242 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for HV_FLUSH_ALL_PROCESSORS */
243 	for (i = 0; i < NTRY; i++) {
244 		prepare_to_test(data);
245 		flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
246 			HV_FLUSH_ALL_PROCESSORS;
247 		flush->processor_mask = 0;
248 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, hcall_gpa,
249 				 hcall_gpa + PAGE_SIZE);
250 		post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, i % 2 ? TESTVAL1 : TESTVAL2);
251 	}
252 
253 	GUEST_SYNC(stage++);
254 
255 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for HV_FLUSH_ALL_PROCESSORS */
256 	for (i = 0; i < NTRY; i++) {
257 		prepare_to_test(data);
258 		flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
259 			HV_FLUSH_ALL_PROCESSORS;
260 		flush->gva_list[0] = (u64)data->test_pages;
261 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
262 				 (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
263 				 hcall_gpa, hcall_gpa + PAGE_SIZE);
264 		post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
265 			  i % 2 ? TESTVAL1 : TESTVAL2);
266 	}
267 
268 	GUEST_SYNC(stage++);
269 
270 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for WORKER_VCPU_ID_2 */
271 	for (i = 0; i < NTRY; i++) {
272 		prepare_to_test(data);
273 		flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
274 		flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
275 		flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
276 		flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
277 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
278 				 (1 << HV_HYPERCALL_VARHEAD_OFFSET),
279 				 hcall_gpa, hcall_gpa + PAGE_SIZE);
280 		post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);
281 	}
282 
283 	GUEST_SYNC(stage++);
284 
285 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for WORKER_VCPU_ID_2 */
286 	for (i = 0; i < NTRY; i++) {
287 		prepare_to_test(data);
288 		flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
289 		flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
290 		flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
291 		flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
292 		/* bank_contents and gva_list occupy the same space, thus [1] */
293 		flush_ex->gva_list[1] = (u64)data->test_pages;
294 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
295 				 (1 << HV_HYPERCALL_VARHEAD_OFFSET) |
296 				 (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
297 				 hcall_gpa, hcall_gpa + PAGE_SIZE);
298 		post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);
299 	}
300 
301 	GUEST_SYNC(stage++);
302 
303 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for both vCPUs */
304 	for (i = 0; i < NTRY; i++) {
305 		prepare_to_test(data);
306 		flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
307 		flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
308 		flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64) |
309 			BIT_ULL(WORKER_VCPU_ID_1 / 64);
310 		flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
311 		flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
312 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
313 				 (2 << HV_HYPERCALL_VARHEAD_OFFSET),
314 				 hcall_gpa, hcall_gpa + PAGE_SIZE);
315 		post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
316 			  i % 2 ? TESTVAL1 : TESTVAL2);
317 	}
318 
319 	GUEST_SYNC(stage++);
320 
321 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for both vCPUs */
322 	for (i = 0; i < NTRY; i++) {
323 		prepare_to_test(data);
324 		flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
325 		flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
326 		flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_1 / 64) |
327 			BIT_ULL(WORKER_VCPU_ID_2 / 64);
328 		flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
329 		flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
330 		/* bank_contents and gva_list occupy the same space, thus [2] */
331 		flush_ex->gva_list[2] = (u64)data->test_pages;
332 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
333 				 (2 << HV_HYPERCALL_VARHEAD_OFFSET) |
334 				 (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
335 				 hcall_gpa, hcall_gpa + PAGE_SIZE);
336 		post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
337 			  i % 2 ? TESTVAL1 : TESTVAL2);
338 	}
339 
340 	GUEST_SYNC(stage++);
341 
342 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for HV_GENERIC_SET_ALL */
343 	for (i = 0; i < NTRY; i++) {
344 		prepare_to_test(data);
345 		flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
346 		flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
347 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
348 				 hcall_gpa, hcall_gpa + PAGE_SIZE);
349 		post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
350 			  i % 2 ? TESTVAL1 : TESTVAL2);
351 	}
352 
353 	GUEST_SYNC(stage++);
354 
355 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for HV_GENERIC_SET_ALL */
356 	for (i = 0; i < NTRY; i++) {
357 		prepare_to_test(data);
358 		flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
359 		flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
360 		flush_ex->gva_list[0] = (u64)data->test_pages;
361 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
362 				 (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
363 				 hcall_gpa, hcall_gpa + PAGE_SIZE);
364 		post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
365 			  i % 2 ? TESTVAL1 : TESTVAL2);
366 	}
367 
368 	/* "Fast" hypercalls */
369 
370 	GUEST_SYNC(stage++);
371 
372 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for WORKER_VCPU_ID_1 */
373 	for (i = 0; i < NTRY; i++) {
374 		prepare_to_test(data);
375 		flush->processor_mask = BIT(WORKER_VCPU_ID_1);
376 		hyperv_write_xmm_input(&flush->processor_mask, 1);
377 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
378 				 HV_HYPERCALL_FAST_BIT, 0x0,
379 				 HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
380 		post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);
381 	}
382 
383 	GUEST_SYNC(stage++);
384 
385 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for WORKER_VCPU_ID_1 */
386 	for (i = 0; i < NTRY; i++) {
387 		prepare_to_test(data);
388 		flush->processor_mask = BIT(WORKER_VCPU_ID_1);
389 		flush->gva_list[0] = (u64)data->test_pages;
390 		hyperv_write_xmm_input(&flush->processor_mask, 1);
391 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
392 				 HV_HYPERCALL_FAST_BIT |
393 				 (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
394 				 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
395 		post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);
396 	}
397 
398 	GUEST_SYNC(stage++);
399 
400 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for HV_FLUSH_ALL_PROCESSORS */
401 	for (i = 0; i < NTRY; i++) {
402 		prepare_to_test(data);
403 		hyperv_write_xmm_input(&flush->processor_mask, 1);
404 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
405 				 HV_HYPERCALL_FAST_BIT, 0x0,
406 				 HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
407 				 HV_FLUSH_ALL_PROCESSORS);
408 		post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
409 			  i % 2 ? TESTVAL1 : TESTVAL2);
410 	}
411 
412 	GUEST_SYNC(stage++);
413 
414 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for HV_FLUSH_ALL_PROCESSORS */
415 	for (i = 0; i < NTRY; i++) {
416 		prepare_to_test(data);
417 		flush->gva_list[0] = (u64)data->test_pages;
418 		hyperv_write_xmm_input(&flush->processor_mask, 1);
419 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
420 				 HV_HYPERCALL_FAST_BIT |
421 				 (1UL << HV_HYPERCALL_REP_COMP_OFFSET), 0x0,
422 				 HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
423 				 HV_FLUSH_ALL_PROCESSORS);
424 		post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
425 			  i % 2 ? TESTVAL1 : TESTVAL2);
426 	}
427 
428 	GUEST_SYNC(stage++);
429 
430 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for WORKER_VCPU_ID_2 */
431 	for (i = 0; i < NTRY; i++) {
432 		prepare_to_test(data);
433 		flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
434 		flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
435 		flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
436 		hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
437 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
438 				 HV_HYPERCALL_FAST_BIT |
439 				 (1 << HV_HYPERCALL_VARHEAD_OFFSET),
440 				 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
441 		post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);
442 	}
443 
444 	GUEST_SYNC(stage++);
445 
446 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for WORKER_VCPU_ID_2 */
447 	for (i = 0; i < NTRY; i++) {
448 		prepare_to_test(data);
449 		flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
450 		flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
451 		flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
452 		/* bank_contents and gva_list occupy the same space, thus [1] */
453 		flush_ex->gva_list[1] = (u64)data->test_pages;
454 		hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
455 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
456 				 HV_HYPERCALL_FAST_BIT |
457 				 (1 << HV_HYPERCALL_VARHEAD_OFFSET) |
458 				 (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
459 				 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
460 		post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);
461 	}
462 
463 	GUEST_SYNC(stage++);
464 
465 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for both vCPUs */
466 	for (i = 0; i < NTRY; i++) {
467 		prepare_to_test(data);
468 		flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
469 		flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64) |
470 			BIT_ULL(WORKER_VCPU_ID_1 / 64);
471 		flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
472 		flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
473 		hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
474 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
475 				 HV_HYPERCALL_FAST_BIT |
476 				 (2 << HV_HYPERCALL_VARHEAD_OFFSET),
477 				 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
478 		post_test(data, i % 2 ? TESTVAL1 :
479 			  TESTVAL2, i % 2 ? TESTVAL1 : TESTVAL2);
480 	}
481 
482 	GUEST_SYNC(stage++);
483 
484 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for both vCPUs */
485 	for (i = 0; i < NTRY; i++) {
486 		prepare_to_test(data);
487 		flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
488 		flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_1 / 64) |
489 			BIT_ULL(WORKER_VCPU_ID_2 / 64);
490 		flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
491 		flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
492 		/* bank_contents and gva_list occupy the same space, thus [2] */
493 		flush_ex->gva_list[2] = (u64)data->test_pages;
494 		hyperv_write_xmm_input(&flush_ex->hv_vp_set, 3);
495 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
496 				 HV_HYPERCALL_FAST_BIT |
497 				 (2 << HV_HYPERCALL_VARHEAD_OFFSET) |
498 				 (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
499 				 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
500 		post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
501 			  i % 2 ? TESTVAL1 : TESTVAL2);
502 	}
503 
504 	GUEST_SYNC(stage++);
505 
506 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for HV_GENERIC_SET_ALL */
507 	for (i = 0; i < NTRY; i++) {
508 		prepare_to_test(data);
509 		flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
510 		flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
511 		hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
512 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
513 				 HV_HYPERCALL_FAST_BIT,
514 				 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
515 		post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
516 			  i % 2 ? TESTVAL1 : TESTVAL2);
517 	}
518 
519 	GUEST_SYNC(stage++);
520 
521 	/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for HV_GENERIC_SET_ALL */
522 	for (i = 0; i < NTRY; i++) {
523 		prepare_to_test(data);
524 		flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
525 		flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
526 		flush_ex->gva_list[0] = (u64)data->test_pages;
527 		hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
528 		hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
529 				 HV_HYPERCALL_FAST_BIT |
530 				 (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
531 				 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
532 		post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
533 			  i % 2 ? TESTVAL1 : TESTVAL2);
534 	}
535 
536 	GUEST_DONE();
537 }
538 
539 static void *vcpu_thread(void *arg)
540 {
541 	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)arg;
542 	struct ucall uc;
543 	int old;
544 	int r;
545 	unsigned int exit_reason;
546 
547 	r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
548 	TEST_ASSERT(!r, "pthread_setcanceltype failed on vcpu_id=%u with errno=%d",
549 		    vcpu->id, r);
550 
551 	vcpu_run(vcpu);
552 	exit_reason = vcpu->run->exit_reason;
553 
554 	TEST_ASSERT(exit_reason == KVM_EXIT_IO,
555 		    "vCPU %u exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO",
556 		    vcpu->id, exit_reason, exit_reason_str(exit_reason));
557 
558 	switch (get_ucall(vcpu, &uc)) {
559 	case UCALL_ABORT:
560 		REPORT_GUEST_ASSERT(uc);
561 		/* NOT REACHED */
562 	default:
563 		TEST_FAIL("Unexpected ucall %lu, vCPU %d", uc.cmd, vcpu->id);
564 	}
565 
566 	return NULL;
567 }
568 
569 static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)
570 {
571 	void *retval;
572 	int r;
573 
574 	r = pthread_cancel(thread);
575 	TEST_ASSERT(!r, "pthread_cancel on vcpu_id=%d failed with errno=%d",
576 		    vcpu->id, r);
577 
578 	r = pthread_join(thread, &retval);
579 	TEST_ASSERT(!r, "pthread_join on vcpu_id=%d failed with errno=%d",
580 		    vcpu->id, r);
581 	TEST_ASSERT(retval == PTHREAD_CANCELED,
582 		    "expected retval=%p, got %p", PTHREAD_CANCELED,
583 		    retval);
584 }
585 
586 int main(int argc, char *argv[])
587 {
588 	struct kvm_vm *vm;
589 	struct kvm_vcpu *vcpu[3];
590 	unsigned int exit_reason;
591 	pthread_t threads[2];
592 	vm_vaddr_t test_data_page, gva;
593 	vm_paddr_t gpa;
594 	uint64_t *pte;
595 	struct test_data *data;
596 	struct ucall uc;
597 	int stage = 1, r, i;
598 
599 	vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code);
600 
601 	/* Test data page */
602 	test_data_page = vm_vaddr_alloc_page(vm);
603 	data = (struct test_data *)addr_gva2hva(vm, test_data_page);
604 
605 	/* Hypercall input/output */
606 	data->hcall_gva = vm_vaddr_alloc_pages(vm, 2);
607 	data->hcall_gpa = addr_gva2gpa(vm, data->hcall_gva);
608 	memset(addr_gva2hva(vm, data->hcall_gva), 0x0, 2 * PAGE_SIZE);
609 
610 	/*
611 	 * Test pages: the first one is filled with '0x01's, the second with '0x02's
612 	 * and the test will swap their mappings. The third page keeps the indication
613 	 * about the current state of mappings.
614 	 */
615 	data->test_pages = vm_vaddr_alloc_pages(vm, NTEST_PAGES + 1);
616 	for (i = 0; i < NTEST_PAGES; i++)
617 		memset(addr_gva2hva(vm, data->test_pages + PAGE_SIZE * i),
618 		       (u8)(i + 1), PAGE_SIZE);
619 	set_expected_val(addr_gva2hva(vm, data->test_pages), 0x0, WORKER_VCPU_ID_1);
620 	set_expected_val(addr_gva2hva(vm, data->test_pages), 0x0, WORKER_VCPU_ID_2);
621 
622 	/*
623 	 * Get PTE pointers for test pages and map them inside the guest.
624 	 * Use separate page for each PTE for simplicity.
625 	 */
626 	gva = vm_vaddr_unused_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR);
627 	for (i = 0; i < NTEST_PAGES; i++) {
628 		pte = vm_get_page_table_entry(vm, data->test_pages + i * PAGE_SIZE);
629 		gpa = addr_hva2gpa(vm, pte);
630 		__virt_pg_map(vm, gva + PAGE_SIZE * i, gpa & PAGE_MASK, PG_LEVEL_4K);
631 		data->test_pages_pte[i] = gva + (gpa & ~PAGE_MASK);
632 	}
633 
634 	/*
635 	 * Sender vCPU which performs the test: swaps test pages, sets expectation
636 	 * for 'workers' and issues TLB flush hypercalls.
637 	 */
638 	vcpu_args_set(vcpu[0], 1, test_data_page);
639 	vcpu_set_hv_cpuid(vcpu[0]);
640 
641 	/* Create worker vCPUs which check the contents of the test pages */
642 	vcpu[1] = vm_vcpu_add(vm, WORKER_VCPU_ID_1, worker_guest_code);
643 	vcpu_args_set(vcpu[1], 1, test_data_page);
644 	vcpu_set_msr(vcpu[1], HV_X64_MSR_VP_INDEX, WORKER_VCPU_ID_1);
645 	vcpu_set_hv_cpuid(vcpu[1]);
646 
647 	vcpu[2] = vm_vcpu_add(vm, WORKER_VCPU_ID_2, worker_guest_code);
648 	vcpu_args_set(vcpu[2], 1, test_data_page);
649 	vcpu_set_msr(vcpu[2], HV_X64_MSR_VP_INDEX, WORKER_VCPU_ID_2);
650 	vcpu_set_hv_cpuid(vcpu[2]);
651 
652 	r = pthread_create(&threads[0], NULL, vcpu_thread, vcpu[1]);
653 	TEST_ASSERT(!r, "pthread_create() failed");
654 
655 	r = pthread_create(&threads[1], NULL, vcpu_thread, vcpu[2]);
656 	TEST_ASSERT(!r, "pthread_create() failed");
657 
658 	while (true) {
659 		vcpu_run(vcpu[0]);
660 		exit_reason = vcpu[0]->run->exit_reason;
661 
662 		TEST_ASSERT(exit_reason == KVM_EXIT_IO,
663 			    "unexpected exit reason: %u (%s)",
664 			    exit_reason, exit_reason_str(exit_reason));
665 
666 		switch (get_ucall(vcpu[0], &uc)) {
667 		case UCALL_SYNC:
668 			TEST_ASSERT(uc.args[1] == stage,
669 				    "Unexpected stage: %ld (%d expected)\n",
670 				    uc.args[1], stage);
671 			break;
672 		case UCALL_ABORT:
673 			REPORT_GUEST_ASSERT(uc);
674 			/* NOT REACHED */
675 		case UCALL_DONE:
676 			goto done;
677 		default:
678 			TEST_FAIL("Unknown ucall %lu", uc.cmd);
679 		}
680 
681 		stage++;
682 	}
683 
684 done:
685 	cancel_join_vcpu_thread(threads[0], vcpu[1]);
686 	cancel_join_vcpu_thread(threads[1], vcpu[2]);
687 	kvm_vm_free(vm);
688 
689 	return 0;
690 }
691