xref: /linux/tools/testing/selftests/kvm/x86_64/hyperv_features.c (revision 69bfec7548f4c1595bac0e3ddfc0458a5af31f4c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021, Red Hat, Inc.
4  *
5  * Tests for Hyper-V features enablement
6  */
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
9 #include <stdint.h>
10 
11 #include "test_util.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include "hyperv.h"
15 
16 /*
17  * HYPERV_CPUID_ENLIGHTMENT_INFO.EBX is not a 'feature' CPUID leaf
18  * but to activate the feature it is sufficient to set it to a non-zero
19  * value. Use BIT(0) for that.
20  */
21 #define HV_PV_SPINLOCKS_TEST            \
22 	KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0)
23 
24 struct msr_data {
25 	uint32_t idx;
26 	bool fault_expected;
27 	bool write;
28 	u64 write_val;
29 };
30 
31 struct hcall_data {
32 	uint64_t control;
33 	uint64_t expect;
34 	bool ud_expected;
35 };
36 
37 static bool is_write_only_msr(uint32_t msr)
38 {
39 	return msr == HV_X64_MSR_EOI;
40 }
41 
42 static void guest_msr(struct msr_data *msr)
43 {
44 	uint8_t vector = 0;
45 	uint64_t msr_val = 0;
46 
47 	GUEST_ASSERT(msr->idx);
48 
49 	if (msr->write)
50 		vector = wrmsr_safe(msr->idx, msr->write_val);
51 
52 	if (!vector && (!msr->write || !is_write_only_msr(msr->idx)))
53 		vector = rdmsr_safe(msr->idx, &msr_val);
54 
55 	if (msr->fault_expected)
56 		GUEST_ASSERT_3(vector == GP_VECTOR, msr->idx, vector, GP_VECTOR);
57 	else
58 		GUEST_ASSERT_3(!vector, msr->idx, vector, 0);
59 
60 	if (vector || is_write_only_msr(msr->idx))
61 		goto done;
62 
63 	if (msr->write)
64 		GUEST_ASSERT_3(msr_val == msr->write_val, msr->idx,
65 			       msr_val, msr->write_val);
66 
67 	/* Invariant TSC bit appears when TSC invariant control MSR is written to */
68 	if (msr->idx == HV_X64_MSR_TSC_INVARIANT_CONTROL) {
69 		if (!this_cpu_has(HV_ACCESS_TSC_INVARIANT))
70 			GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC));
71 		else
72 			GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC) ==
73 				     !!(msr_val & HV_INVARIANT_TSC_EXPOSED));
74 	}
75 
76 done:
77 	GUEST_DONE();
78 }
79 
80 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
81 {
82 	u64 res, input, output;
83 	uint8_t vector;
84 
85 	GUEST_ASSERT(hcall->control);
86 
87 	wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
88 	wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
89 
90 	if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
91 		input = pgs_gpa;
92 		output = pgs_gpa + 4096;
93 	} else {
94 		input = output = 0;
95 	}
96 
97 	vector = __hyperv_hypercall(hcall->control, input, output, &res);
98 	if (hcall->ud_expected) {
99 		GUEST_ASSERT_2(vector == UD_VECTOR, hcall->control, vector);
100 	} else {
101 		GUEST_ASSERT_2(!vector, hcall->control, vector);
102 		GUEST_ASSERT_2(res == hcall->expect, hcall->expect, res);
103 	}
104 
105 	GUEST_DONE();
106 }
107 
108 static void vcpu_reset_hv_cpuid(struct kvm_vcpu *vcpu)
109 {
110 	/*
111 	 * Enable all supported Hyper-V features, then clear the leafs holding
112 	 * the features that will be tested one by one.
113 	 */
114 	vcpu_set_hv_cpuid(vcpu);
115 
116 	vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
117 	vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
118 	vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
119 }
120 
121 static void guest_test_msrs_access(void)
122 {
123 	struct kvm_cpuid2 *prev_cpuid = NULL;
124 	struct kvm_vcpu *vcpu;
125 	struct kvm_run *run;
126 	struct kvm_vm *vm;
127 	struct ucall uc;
128 	int stage = 0;
129 	vm_vaddr_t msr_gva;
130 	struct msr_data *msr;
131 	bool has_invtsc = kvm_cpu_has(X86_FEATURE_INVTSC);
132 
133 	while (true) {
134 		vm = vm_create_with_one_vcpu(&vcpu, guest_msr);
135 
136 		msr_gva = vm_vaddr_alloc_page(vm);
137 		memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
138 		msr = addr_gva2hva(vm, msr_gva);
139 
140 		vcpu_args_set(vcpu, 1, msr_gva);
141 		vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
142 
143 		if (!prev_cpuid) {
144 			vcpu_reset_hv_cpuid(vcpu);
145 
146 			prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
147 		} else {
148 			vcpu_init_cpuid(vcpu, prev_cpuid);
149 		}
150 
151 		vm_init_descriptor_tables(vm);
152 		vcpu_init_descriptor_tables(vcpu);
153 
154 		run = vcpu->run;
155 
156 		/* TODO: Make this entire test easier to maintain. */
157 		if (stage >= 21)
158 			vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0);
159 
160 		switch (stage) {
161 		case 0:
162 			/*
163 			 * Only available when Hyper-V identification is set
164 			 */
165 			msr->idx = HV_X64_MSR_GUEST_OS_ID;
166 			msr->write = false;
167 			msr->fault_expected = true;
168 			break;
169 		case 1:
170 			msr->idx = HV_X64_MSR_HYPERCALL;
171 			msr->write = false;
172 			msr->fault_expected = true;
173 			break;
174 		case 2:
175 			vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
176 			/*
177 			 * HV_X64_MSR_GUEST_OS_ID has to be written first to make
178 			 * HV_X64_MSR_HYPERCALL available.
179 			 */
180 			msr->idx = HV_X64_MSR_GUEST_OS_ID;
181 			msr->write = true;
182 			msr->write_val = HYPERV_LINUX_OS_ID;
183 			msr->fault_expected = false;
184 			break;
185 		case 3:
186 			msr->idx = HV_X64_MSR_GUEST_OS_ID;
187 			msr->write = false;
188 			msr->fault_expected = false;
189 			break;
190 		case 4:
191 			msr->idx = HV_X64_MSR_HYPERCALL;
192 			msr->write = false;
193 			msr->fault_expected = false;
194 			break;
195 
196 		case 5:
197 			msr->idx = HV_X64_MSR_VP_RUNTIME;
198 			msr->write = false;
199 			msr->fault_expected = true;
200 			break;
201 		case 6:
202 			vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_RUNTIME_AVAILABLE);
203 			msr->idx = HV_X64_MSR_VP_RUNTIME;
204 			msr->write = false;
205 			msr->fault_expected = false;
206 			break;
207 		case 7:
208 			/* Read only */
209 			msr->idx = HV_X64_MSR_VP_RUNTIME;
210 			msr->write = true;
211 			msr->write_val = 1;
212 			msr->fault_expected = true;
213 			break;
214 
215 		case 8:
216 			msr->idx = HV_X64_MSR_TIME_REF_COUNT;
217 			msr->write = false;
218 			msr->fault_expected = true;
219 			break;
220 		case 9:
221 			vcpu_set_cpuid_feature(vcpu, HV_MSR_TIME_REF_COUNT_AVAILABLE);
222 			msr->idx = HV_X64_MSR_TIME_REF_COUNT;
223 			msr->write = false;
224 			msr->fault_expected = false;
225 			break;
226 		case 10:
227 			/* Read only */
228 			msr->idx = HV_X64_MSR_TIME_REF_COUNT;
229 			msr->write = true;
230 			msr->write_val = 1;
231 			msr->fault_expected = true;
232 			break;
233 
234 		case 11:
235 			msr->idx = HV_X64_MSR_VP_INDEX;
236 			msr->write = false;
237 			msr->fault_expected = true;
238 			break;
239 		case 12:
240 			vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_INDEX_AVAILABLE);
241 			msr->idx = HV_X64_MSR_VP_INDEX;
242 			msr->write = false;
243 			msr->fault_expected = false;
244 			break;
245 		case 13:
246 			/* Read only */
247 			msr->idx = HV_X64_MSR_VP_INDEX;
248 			msr->write = true;
249 			msr->write_val = 1;
250 			msr->fault_expected = true;
251 			break;
252 
253 		case 14:
254 			msr->idx = HV_X64_MSR_RESET;
255 			msr->write = false;
256 			msr->fault_expected = true;
257 			break;
258 		case 15:
259 			vcpu_set_cpuid_feature(vcpu, HV_MSR_RESET_AVAILABLE);
260 			msr->idx = HV_X64_MSR_RESET;
261 			msr->write = false;
262 			msr->fault_expected = false;
263 			break;
264 		case 16:
265 			msr->idx = HV_X64_MSR_RESET;
266 			msr->write = true;
267 			/*
268 			 * TODO: the test only writes '0' to HV_X64_MSR_RESET
269 			 * at the moment, writing some other value there will
270 			 * trigger real vCPU reset and the code is not prepared
271 			 * to handle it yet.
272 			 */
273 			msr->write_val = 0;
274 			msr->fault_expected = false;
275 			break;
276 
277 		case 17:
278 			msr->idx = HV_X64_MSR_REFERENCE_TSC;
279 			msr->write = false;
280 			msr->fault_expected = true;
281 			break;
282 		case 18:
283 			vcpu_set_cpuid_feature(vcpu, HV_MSR_REFERENCE_TSC_AVAILABLE);
284 			msr->idx = HV_X64_MSR_REFERENCE_TSC;
285 			msr->write = false;
286 			msr->fault_expected = false;
287 			break;
288 		case 19:
289 			msr->idx = HV_X64_MSR_REFERENCE_TSC;
290 			msr->write = true;
291 			msr->write_val = 0;
292 			msr->fault_expected = false;
293 			break;
294 
295 		case 20:
296 			msr->idx = HV_X64_MSR_EOM;
297 			msr->write = false;
298 			msr->fault_expected = true;
299 			break;
300 		case 21:
301 			/*
302 			 * Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
303 			 * capability enabled and guest visible CPUID bit unset.
304 			 */
305 			msr->idx = HV_X64_MSR_EOM;
306 			msr->write = false;
307 			msr->fault_expected = true;
308 			break;
309 		case 22:
310 			vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNIC_AVAILABLE);
311 			msr->idx = HV_X64_MSR_EOM;
312 			msr->write = false;
313 			msr->fault_expected = false;
314 			break;
315 		case 23:
316 			msr->idx = HV_X64_MSR_EOM;
317 			msr->write = true;
318 			msr->write_val = 0;
319 			msr->fault_expected = false;
320 			break;
321 
322 		case 24:
323 			msr->idx = HV_X64_MSR_STIMER0_CONFIG;
324 			msr->write = false;
325 			msr->fault_expected = true;
326 			break;
327 		case 25:
328 			vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNTIMER_AVAILABLE);
329 			msr->idx = HV_X64_MSR_STIMER0_CONFIG;
330 			msr->write = false;
331 			msr->fault_expected = false;
332 			break;
333 		case 26:
334 			msr->idx = HV_X64_MSR_STIMER0_CONFIG;
335 			msr->write = true;
336 			msr->write_val = 0;
337 			msr->fault_expected = false;
338 			break;
339 		case 27:
340 			/* Direct mode test */
341 			msr->idx = HV_X64_MSR_STIMER0_CONFIG;
342 			msr->write = true;
343 			msr->write_val = 1 << 12;
344 			msr->fault_expected = true;
345 			break;
346 		case 28:
347 			vcpu_set_cpuid_feature(vcpu, HV_STIMER_DIRECT_MODE_AVAILABLE);
348 			msr->idx = HV_X64_MSR_STIMER0_CONFIG;
349 			msr->write = true;
350 			msr->write_val = 1 << 12;
351 			msr->fault_expected = false;
352 			break;
353 
354 		case 29:
355 			msr->idx = HV_X64_MSR_EOI;
356 			msr->write = false;
357 			msr->fault_expected = true;
358 			break;
359 		case 30:
360 			vcpu_set_cpuid_feature(vcpu, HV_MSR_APIC_ACCESS_AVAILABLE);
361 			msr->idx = HV_X64_MSR_EOI;
362 			msr->write = true;
363 			msr->write_val = 1;
364 			msr->fault_expected = false;
365 			break;
366 
367 		case 31:
368 			msr->idx = HV_X64_MSR_TSC_FREQUENCY;
369 			msr->write = false;
370 			msr->fault_expected = true;
371 			break;
372 		case 32:
373 			vcpu_set_cpuid_feature(vcpu, HV_ACCESS_FREQUENCY_MSRS);
374 			msr->idx = HV_X64_MSR_TSC_FREQUENCY;
375 			msr->write = false;
376 			msr->fault_expected = false;
377 			break;
378 		case 33:
379 			/* Read only */
380 			msr->idx = HV_X64_MSR_TSC_FREQUENCY;
381 			msr->write = true;
382 			msr->write_val = 1;
383 			msr->fault_expected = true;
384 			break;
385 
386 		case 34:
387 			msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
388 			msr->write = false;
389 			msr->fault_expected = true;
390 			break;
391 		case 35:
392 			vcpu_set_cpuid_feature(vcpu, HV_ACCESS_REENLIGHTENMENT);
393 			msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
394 			msr->write = false;
395 			msr->fault_expected = false;
396 			break;
397 		case 36:
398 			msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
399 			msr->write = true;
400 			msr->write_val = 1;
401 			msr->fault_expected = false;
402 			break;
403 		case 37:
404 			/* Can only write '0' */
405 			msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS;
406 			msr->write = true;
407 			msr->write_val = 1;
408 			msr->fault_expected = true;
409 			break;
410 
411 		case 38:
412 			msr->idx = HV_X64_MSR_CRASH_P0;
413 			msr->write = false;
414 			msr->fault_expected = true;
415 			break;
416 		case 39:
417 			vcpu_set_cpuid_feature(vcpu, HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE);
418 			msr->idx = HV_X64_MSR_CRASH_P0;
419 			msr->write = false;
420 			msr->fault_expected = false;
421 			break;
422 		case 40:
423 			msr->idx = HV_X64_MSR_CRASH_P0;
424 			msr->write = true;
425 			msr->write_val = 1;
426 			msr->fault_expected = false;
427 			break;
428 
429 		case 41:
430 			msr->idx = HV_X64_MSR_SYNDBG_STATUS;
431 			msr->write = false;
432 			msr->fault_expected = true;
433 			break;
434 		case 42:
435 			vcpu_set_cpuid_feature(vcpu, HV_FEATURE_DEBUG_MSRS_AVAILABLE);
436 			vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING);
437 			msr->idx = HV_X64_MSR_SYNDBG_STATUS;
438 			msr->write = false;
439 			msr->fault_expected = false;
440 			break;
441 		case 43:
442 			msr->idx = HV_X64_MSR_SYNDBG_STATUS;
443 			msr->write = true;
444 			msr->write_val = 0;
445 			msr->fault_expected = false;
446 			break;
447 
448 		case 44:
449 			/* MSR is not available when CPUID feature bit is unset */
450 			if (!has_invtsc)
451 				continue;
452 			msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
453 			msr->write = false;
454 			msr->fault_expected = true;
455 			break;
456 		case 45:
457 			/* MSR is vailable when CPUID feature bit is set */
458 			if (!has_invtsc)
459 				continue;
460 			vcpu_set_cpuid_feature(vcpu, HV_ACCESS_TSC_INVARIANT);
461 			msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
462 			msr->write = false;
463 			msr->fault_expected = false;
464 			break;
465 		case 46:
466 			/* Writing bits other than 0 is forbidden */
467 			if (!has_invtsc)
468 				continue;
469 			msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
470 			msr->write = true;
471 			msr->write_val = 0xdeadbeef;
472 			msr->fault_expected = true;
473 			break;
474 		case 47:
475 			/* Setting bit 0 enables the feature */
476 			if (!has_invtsc)
477 				continue;
478 			msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
479 			msr->write = true;
480 			msr->write_val = 1;
481 			msr->fault_expected = false;
482 			break;
483 
484 		default:
485 			kvm_vm_free(vm);
486 			return;
487 		}
488 
489 		vcpu_set_cpuid(vcpu);
490 
491 		memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
492 
493 		pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage,
494 			 msr->idx, msr->write ? "write" : "read");
495 
496 		vcpu_run(vcpu);
497 		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
498 			    "unexpected exit reason: %u (%s)",
499 			    run->exit_reason, exit_reason_str(run->exit_reason));
500 
501 		switch (get_ucall(vcpu, &uc)) {
502 		case UCALL_ABORT:
503 			REPORT_GUEST_ASSERT_3(uc, "MSR = %lx, arg1 = %lx, arg2 = %lx");
504 			return;
505 		case UCALL_DONE:
506 			break;
507 		default:
508 			TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
509 			return;
510 		}
511 
512 		stage++;
513 		kvm_vm_free(vm);
514 	}
515 }
516 
517 static void guest_test_hcalls_access(void)
518 {
519 	struct kvm_cpuid2 *prev_cpuid = NULL;
520 	struct kvm_vcpu *vcpu;
521 	struct kvm_run *run;
522 	struct kvm_vm *vm;
523 	struct ucall uc;
524 	int stage = 0;
525 	vm_vaddr_t hcall_page, hcall_params;
526 	struct hcall_data *hcall;
527 
528 	while (true) {
529 		vm = vm_create_with_one_vcpu(&vcpu, guest_hcall);
530 
531 		vm_init_descriptor_tables(vm);
532 		vcpu_init_descriptor_tables(vcpu);
533 
534 		/* Hypercall input/output */
535 		hcall_page = vm_vaddr_alloc_pages(vm, 2);
536 		memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
537 
538 		hcall_params = vm_vaddr_alloc_page(vm);
539 		memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
540 		hcall = addr_gva2hva(vm, hcall_params);
541 
542 		vcpu_args_set(vcpu, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
543 		vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
544 
545 		if (!prev_cpuid) {
546 			vcpu_reset_hv_cpuid(vcpu);
547 
548 			prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
549 		} else {
550 			vcpu_init_cpuid(vcpu, prev_cpuid);
551 		}
552 
553 		run = vcpu->run;
554 
555 		switch (stage) {
556 		case 0:
557 			vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
558 			hcall->control = 0xbeef;
559 			hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
560 			break;
561 
562 		case 1:
563 			hcall->control = HVCALL_POST_MESSAGE;
564 			hcall->expect = HV_STATUS_ACCESS_DENIED;
565 			break;
566 		case 2:
567 			vcpu_set_cpuid_feature(vcpu, HV_POST_MESSAGES);
568 			hcall->control = HVCALL_POST_MESSAGE;
569 			hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
570 			break;
571 
572 		case 3:
573 			hcall->control = HVCALL_SIGNAL_EVENT;
574 			hcall->expect = HV_STATUS_ACCESS_DENIED;
575 			break;
576 		case 4:
577 			vcpu_set_cpuid_feature(vcpu, HV_SIGNAL_EVENTS);
578 			hcall->control = HVCALL_SIGNAL_EVENT;
579 			hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
580 			break;
581 
582 		case 5:
583 			hcall->control = HVCALL_RESET_DEBUG_SESSION;
584 			hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
585 			break;
586 		case 6:
587 			vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING);
588 			hcall->control = HVCALL_RESET_DEBUG_SESSION;
589 			hcall->expect = HV_STATUS_ACCESS_DENIED;
590 			break;
591 		case 7:
592 			vcpu_set_cpuid_feature(vcpu, HV_DEBUGGING);
593 			hcall->control = HVCALL_RESET_DEBUG_SESSION;
594 			hcall->expect = HV_STATUS_OPERATION_DENIED;
595 			break;
596 
597 		case 8:
598 			hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
599 			hcall->expect = HV_STATUS_ACCESS_DENIED;
600 			break;
601 		case 9:
602 			vcpu_set_cpuid_feature(vcpu, HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED);
603 			hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
604 			hcall->expect = HV_STATUS_SUCCESS;
605 			break;
606 		case 10:
607 			hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
608 			hcall->expect = HV_STATUS_ACCESS_DENIED;
609 			break;
610 		case 11:
611 			vcpu_set_cpuid_feature(vcpu, HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED);
612 			hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
613 			hcall->expect = HV_STATUS_SUCCESS;
614 			break;
615 
616 		case 12:
617 			hcall->control = HVCALL_SEND_IPI;
618 			hcall->expect = HV_STATUS_ACCESS_DENIED;
619 			break;
620 		case 13:
621 			vcpu_set_cpuid_feature(vcpu, HV_X64_CLUSTER_IPI_RECOMMENDED);
622 			hcall->control = HVCALL_SEND_IPI;
623 			hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
624 			break;
625 		case 14:
626 			/* Nothing in 'sparse banks' -> success */
627 			hcall->control = HVCALL_SEND_IPI_EX;
628 			hcall->expect = HV_STATUS_SUCCESS;
629 			break;
630 
631 		case 15:
632 			hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
633 			hcall->expect = HV_STATUS_ACCESS_DENIED;
634 			break;
635 		case 16:
636 			vcpu_set_cpuid_feature(vcpu, HV_PV_SPINLOCKS_TEST);
637 			hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
638 			hcall->expect = HV_STATUS_SUCCESS;
639 			break;
640 		case 17:
641 			/* XMM fast hypercall */
642 			hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
643 			hcall->ud_expected = true;
644 			break;
645 		case 18:
646 			vcpu_set_cpuid_feature(vcpu, HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE);
647 			hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
648 			hcall->ud_expected = false;
649 			hcall->expect = HV_STATUS_SUCCESS;
650 			break;
651 		case 19:
652 			hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES;
653 			hcall->expect = HV_STATUS_ACCESS_DENIED;
654 			break;
655 		case 20:
656 			vcpu_set_cpuid_feature(vcpu, HV_ENABLE_EXTENDED_HYPERCALLS);
657 			hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES | HV_HYPERCALL_FAST_BIT;
658 			hcall->expect = HV_STATUS_INVALID_PARAMETER;
659 			break;
660 		case 21:
661 			kvm_vm_free(vm);
662 			return;
663 		}
664 
665 		vcpu_set_cpuid(vcpu);
666 
667 		memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
668 
669 		pr_debug("Stage %d: testing hcall: 0x%lx\n", stage, hcall->control);
670 
671 		vcpu_run(vcpu);
672 		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
673 			    "unexpected exit reason: %u (%s)",
674 			    run->exit_reason, exit_reason_str(run->exit_reason));
675 
676 		switch (get_ucall(vcpu, &uc)) {
677 		case UCALL_ABORT:
678 			REPORT_GUEST_ASSERT_2(uc, "arg1 = %lx, arg2 = %lx");
679 			return;
680 		case UCALL_DONE:
681 			break;
682 		default:
683 			TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
684 			return;
685 		}
686 
687 		stage++;
688 		kvm_vm_free(vm);
689 	}
690 }
691 
692 int main(void)
693 {
694 	pr_info("Testing access to Hyper-V specific MSRs\n");
695 	guest_test_msrs_access();
696 
697 	pr_info("Testing access to Hyper-V hypercalls\n");
698 	guest_test_hcalls_access();
699 }
700