xref: /linux/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c (revision 34f7c6e7d4396090692a09789db231e12cb4762b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM_GET/SET_* tests
4  *
5  * Copyright (C) 2022, Red Hat, Inc.
6  *
7  * Tests for Hyper-V extensions to SVM.
8  */
9 #define _GNU_SOURCE /* for program_invocation_short_name */
10 #include <fcntl.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <sys/ioctl.h>
15 #include <linux/bitmap.h>
16 
17 #include "test_util.h"
18 
19 #include "kvm_util.h"
20 #include "processor.h"
21 #include "svm_util.h"
22 #include "hyperv.h"
23 
24 #define VCPU_ID		1
25 #define L2_GUEST_STACK_SIZE 256
26 
27 struct hv_enlightenments {
28 	struct __packed hv_enlightenments_control {
29 		u32 nested_flush_hypercall:1;
30 		u32 msr_bitmap:1;
31 		u32 enlightened_npt_tlb: 1;
32 		u32 reserved:29;
33 	} __packed hv_enlightenments_control;
34 	u32 hv_vp_id;
35 	u64 hv_vm_id;
36 	u64 partition_assist_page;
37 	u64 reserved;
38 } __packed;
39 
40 /*
41  * Hyper-V uses the software reserved clean bit in VMCB
42  */
43 #define VMCB_HV_NESTED_ENLIGHTENMENTS (1U << 31)
44 
45 static inline void vmmcall(void)
46 {
47 	__asm__ __volatile__("vmmcall");
48 }
49 
50 void l2_guest_code(void)
51 {
52 	GUEST_SYNC(3);
53 	/* Exit to L1 */
54 	vmmcall();
55 
56 	/* MSR-Bitmap tests */
57 	rdmsr(MSR_FS_BASE); /* intercepted */
58 	rdmsr(MSR_FS_BASE); /* intercepted */
59 	rdmsr(MSR_GS_BASE); /* not intercepted */
60 	vmmcall();
61 	rdmsr(MSR_GS_BASE); /* intercepted */
62 
63 	GUEST_SYNC(5);
64 
65 	/* Done, exit to L1 and never come back.  */
66 	vmmcall();
67 }
68 
69 static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm)
70 {
71 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
72 	struct vmcb *vmcb = svm->vmcb;
73 	struct hv_enlightenments *hve =
74 		(struct hv_enlightenments *)vmcb->control.reserved_sw;
75 
76 	GUEST_SYNC(1);
77 
78 	wrmsr(HV_X64_MSR_GUEST_OS_ID, (u64)0x8100 << 48);
79 
80 	GUEST_ASSERT(svm->vmcb_gpa);
81 	/* Prepare for L2 execution. */
82 	generic_svm_setup(svm, l2_guest_code,
83 			  &l2_guest_stack[L2_GUEST_STACK_SIZE]);
84 
85 	GUEST_SYNC(2);
86 	run_guest(vmcb, svm->vmcb_gpa);
87 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
88 	GUEST_SYNC(4);
89 	vmcb->save.rip += 3;
90 
91 	/* Intercept RDMSR 0xc0000100 */
92 	vmcb->control.intercept |= 1ULL << INTERCEPT_MSR_PROT;
93 	set_bit(2 * (MSR_FS_BASE & 0x1fff), svm->msr + 0x800);
94 	run_guest(vmcb, svm->vmcb_gpa);
95 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
96 	vmcb->save.rip += 2; /* rdmsr */
97 
98 	/* Enable enlightened MSR bitmap */
99 	hve->hv_enlightenments_control.msr_bitmap = 1;
100 	run_guest(vmcb, svm->vmcb_gpa);
101 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
102 	vmcb->save.rip += 2; /* rdmsr */
103 
104 	/* Intercept RDMSR 0xc0000101 without telling KVM about it */
105 	set_bit(2 * (MSR_GS_BASE & 0x1fff), svm->msr + 0x800);
106 	/* Make sure HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP is set */
107 	vmcb->control.clean |= VMCB_HV_NESTED_ENLIGHTENMENTS;
108 	run_guest(vmcb, svm->vmcb_gpa);
109 	/* Make sure we don't see SVM_EXIT_MSR here so eMSR bitmap works */
110 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
111 	vmcb->save.rip += 3; /* vmcall */
112 
113 	/* Now tell KVM we've changed MSR-Bitmap */
114 	vmcb->control.clean &= ~VMCB_HV_NESTED_ENLIGHTENMENTS;
115 	run_guest(vmcb, svm->vmcb_gpa);
116 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
117 	vmcb->save.rip += 2; /* rdmsr */
118 
119 	run_guest(vmcb, svm->vmcb_gpa);
120 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
121 	GUEST_SYNC(6);
122 
123 	GUEST_DONE();
124 }
125 
126 int main(int argc, char *argv[])
127 {
128 	vm_vaddr_t nested_gva = 0;
129 
130 	struct kvm_vm *vm;
131 	struct kvm_run *run;
132 	struct ucall uc;
133 	int stage;
134 
135 	if (!nested_svm_supported()) {
136 		print_skip("Nested SVM not supported");
137 		exit(KSFT_SKIP);
138 	}
139 	/* Create VM */
140 	vm = vm_create_default(VCPU_ID, 0, guest_code);
141 	vcpu_set_hv_cpuid(vm, VCPU_ID);
142 	run = vcpu_state(vm, VCPU_ID);
143 	vcpu_alloc_svm(vm, &nested_gva);
144 	vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
145 
146 	for (stage = 1;; stage++) {
147 		_vcpu_run(vm, VCPU_ID);
148 		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
149 			    "Stage %d: unexpected exit reason: %u (%s),\n",
150 			    stage, run->exit_reason,
151 			    exit_reason_str(run->exit_reason));
152 
153 		switch (get_ucall(vm, VCPU_ID, &uc)) {
154 		case UCALL_ABORT:
155 			TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
156 				  __FILE__, uc.args[1]);
157 			/* NOT REACHED */
158 		case UCALL_SYNC:
159 			break;
160 		case UCALL_DONE:
161 			goto done;
162 		default:
163 			TEST_FAIL("Unknown ucall %lu", uc.cmd);
164 		}
165 
166 		/* UCALL_SYNC is handled here.  */
167 		TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
168 			    uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
169 			    stage, (ulong)uc.args[1]);
170 
171 	}
172 
173 done:
174 	kvm_vm_free(vm);
175 }
176