xref: /linux/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c (revision 3efc57369a0ce8f76bf0804f7e673982384e4ac9)
1e67bd7dfSVitaly Kuznetsov // SPDX-License-Identifier: GPL-2.0-only
2e67bd7dfSVitaly Kuznetsov /*
3e67bd7dfSVitaly Kuznetsov  * Copyright (C) 2022, Red Hat, Inc.
4e67bd7dfSVitaly Kuznetsov  *
5e67bd7dfSVitaly Kuznetsov  * Tests for Hyper-V extensions to SVM.
6e67bd7dfSVitaly Kuznetsov  */
7e67bd7dfSVitaly Kuznetsov #include <fcntl.h>
8e67bd7dfSVitaly Kuznetsov #include <stdio.h>
9e67bd7dfSVitaly Kuznetsov #include <stdlib.h>
10e67bd7dfSVitaly Kuznetsov #include <string.h>
11e67bd7dfSVitaly Kuznetsov #include <sys/ioctl.h>
12e67bd7dfSVitaly Kuznetsov #include <linux/bitmap.h>
13e67bd7dfSVitaly Kuznetsov 
14e67bd7dfSVitaly Kuznetsov #include "test_util.h"
15e67bd7dfSVitaly Kuznetsov 
16e67bd7dfSVitaly Kuznetsov #include "kvm_util.h"
17e67bd7dfSVitaly Kuznetsov #include "processor.h"
18e67bd7dfSVitaly Kuznetsov #include "svm_util.h"
19e67bd7dfSVitaly Kuznetsov #include "hyperv.h"
20e67bd7dfSVitaly Kuznetsov 
21e67bd7dfSVitaly Kuznetsov #define L2_GUEST_STACK_SIZE 256
22e67bd7dfSVitaly Kuznetsov 
2375ee7505SVitaly Kuznetsov /* Exit to L1 from L2 with RDMSR instruction */
rdmsr_from_l2(uint32_t msr)2475ee7505SVitaly Kuznetsov static inline void rdmsr_from_l2(uint32_t msr)
2575ee7505SVitaly Kuznetsov {
2675ee7505SVitaly Kuznetsov 	/* Currently, L1 doesn't preserve GPRs during vmexits. */
2775ee7505SVitaly Kuznetsov 	__asm__ __volatile__ ("rdmsr" : : "c"(msr) :
2875ee7505SVitaly Kuznetsov 			      "rax", "rbx", "rdx", "rsi", "rdi", "r8", "r9",
2975ee7505SVitaly Kuznetsov 			      "r10", "r11", "r12", "r13", "r14", "r15");
3075ee7505SVitaly Kuznetsov }
3175ee7505SVitaly Kuznetsov 
l2_guest_code(void)32e67bd7dfSVitaly Kuznetsov void l2_guest_code(void)
33e67bd7dfSVitaly Kuznetsov {
349c2e8819SVitaly Kuznetsov 	u64 unused;
359c2e8819SVitaly Kuznetsov 
36e67bd7dfSVitaly Kuznetsov 	GUEST_SYNC(3);
37e67bd7dfSVitaly Kuznetsov 	/* Exit to L1 */
38e67bd7dfSVitaly Kuznetsov 	vmmcall();
39e67bd7dfSVitaly Kuznetsov 
40e67bd7dfSVitaly Kuznetsov 	/* MSR-Bitmap tests */
4175ee7505SVitaly Kuznetsov 	rdmsr_from_l2(MSR_FS_BASE); /* intercepted */
4275ee7505SVitaly Kuznetsov 	rdmsr_from_l2(MSR_FS_BASE); /* intercepted */
4375ee7505SVitaly Kuznetsov 	rdmsr_from_l2(MSR_GS_BASE); /* not intercepted */
44e67bd7dfSVitaly Kuznetsov 	vmmcall();
4575ee7505SVitaly Kuznetsov 	rdmsr_from_l2(MSR_GS_BASE); /* intercepted */
46e67bd7dfSVitaly Kuznetsov 
47e67bd7dfSVitaly Kuznetsov 	GUEST_SYNC(5);
48e67bd7dfSVitaly Kuznetsov 
499c2e8819SVitaly Kuznetsov 	/* L2 TLB flush tests */
509c2e8819SVitaly Kuznetsov 	hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
519c2e8819SVitaly Kuznetsov 			 HV_HYPERCALL_FAST_BIT, 0x0,
529c2e8819SVitaly Kuznetsov 			 HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
539c2e8819SVitaly Kuznetsov 			 HV_FLUSH_ALL_PROCESSORS);
549c2e8819SVitaly Kuznetsov 	rdmsr_from_l2(MSR_FS_BASE);
559c2e8819SVitaly Kuznetsov 	/*
569c2e8819SVitaly Kuznetsov 	 * Note: hypercall status (RAX) is not preserved correctly by L1 after
579c2e8819SVitaly Kuznetsov 	 * synthetic vmexit, use unchecked version.
589c2e8819SVitaly Kuznetsov 	 */
599c2e8819SVitaly Kuznetsov 	__hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
609c2e8819SVitaly Kuznetsov 			   HV_HYPERCALL_FAST_BIT, 0x0,
619c2e8819SVitaly Kuznetsov 			   HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
629c2e8819SVitaly Kuznetsov 			   HV_FLUSH_ALL_PROCESSORS, &unused);
639c2e8819SVitaly Kuznetsov 
64e67bd7dfSVitaly Kuznetsov 	/* Done, exit to L1 and never come back.  */
65e67bd7dfSVitaly Kuznetsov 	vmmcall();
66e67bd7dfSVitaly Kuznetsov }
67e67bd7dfSVitaly Kuznetsov 
guest_code(struct svm_test_data * svm,struct hyperv_test_pages * hv_pages,vm_vaddr_t pgs_gpa)689c2e8819SVitaly Kuznetsov static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm,
699c2e8819SVitaly Kuznetsov 						    struct hyperv_test_pages *hv_pages,
709c2e8819SVitaly Kuznetsov 						    vm_vaddr_t pgs_gpa)
71e67bd7dfSVitaly Kuznetsov {
72e67bd7dfSVitaly Kuznetsov 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
73e67bd7dfSVitaly Kuznetsov 	struct vmcb *vmcb = svm->vmcb;
7426b516bbSSean Christopherson 	struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments;
75e67bd7dfSVitaly Kuznetsov 
76e67bd7dfSVitaly Kuznetsov 	GUEST_SYNC(1);
77e67bd7dfSVitaly Kuznetsov 
789c2e8819SVitaly Kuznetsov 	wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
799c2e8819SVitaly Kuznetsov 	wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
809c2e8819SVitaly Kuznetsov 	enable_vp_assist(hv_pages->vp_assist_gpa, hv_pages->vp_assist);
81e67bd7dfSVitaly Kuznetsov 
82e67bd7dfSVitaly Kuznetsov 	GUEST_ASSERT(svm->vmcb_gpa);
83e67bd7dfSVitaly Kuznetsov 	/* Prepare for L2 execution. */
84e67bd7dfSVitaly Kuznetsov 	generic_svm_setup(svm, l2_guest_code,
85e67bd7dfSVitaly Kuznetsov 			  &l2_guest_stack[L2_GUEST_STACK_SIZE]);
86e67bd7dfSVitaly Kuznetsov 
879c2e8819SVitaly Kuznetsov 	/* L2 TLB flush setup */
889c2e8819SVitaly Kuznetsov 	hve->partition_assist_page = hv_pages->partition_assist_gpa;
899c2e8819SVitaly Kuznetsov 	hve->hv_enlightenments_control.nested_flush_hypercall = 1;
909c2e8819SVitaly Kuznetsov 	hve->hv_vm_id = 1;
919c2e8819SVitaly Kuznetsov 	hve->hv_vp_id = 1;
929c2e8819SVitaly Kuznetsov 	current_vp_assist->nested_control.features.directhypercall = 1;
939c2e8819SVitaly Kuznetsov 	*(u32 *)(hv_pages->partition_assist) = 0;
949c2e8819SVitaly Kuznetsov 
95e67bd7dfSVitaly Kuznetsov 	GUEST_SYNC(2);
96e67bd7dfSVitaly Kuznetsov 	run_guest(vmcb, svm->vmcb_gpa);
97e67bd7dfSVitaly Kuznetsov 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
98e67bd7dfSVitaly Kuznetsov 	GUEST_SYNC(4);
99e67bd7dfSVitaly Kuznetsov 	vmcb->save.rip += 3;
100e67bd7dfSVitaly Kuznetsov 
101e67bd7dfSVitaly Kuznetsov 	/* Intercept RDMSR 0xc0000100 */
102e67bd7dfSVitaly Kuznetsov 	vmcb->control.intercept |= 1ULL << INTERCEPT_MSR_PROT;
10303a0c819SSean Christopherson 	__set_bit(2 * (MSR_FS_BASE & 0x1fff), svm->msr + 0x800);
104e67bd7dfSVitaly Kuznetsov 	run_guest(vmcb, svm->vmcb_gpa);
105e67bd7dfSVitaly Kuznetsov 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
106e67bd7dfSVitaly Kuznetsov 	vmcb->save.rip += 2; /* rdmsr */
107e67bd7dfSVitaly Kuznetsov 
108e67bd7dfSVitaly Kuznetsov 	/* Enable enlightened MSR bitmap */
109e67bd7dfSVitaly Kuznetsov 	hve->hv_enlightenments_control.msr_bitmap = 1;
110e67bd7dfSVitaly Kuznetsov 	run_guest(vmcb, svm->vmcb_gpa);
111e67bd7dfSVitaly Kuznetsov 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
112e67bd7dfSVitaly Kuznetsov 	vmcb->save.rip += 2; /* rdmsr */
113e67bd7dfSVitaly Kuznetsov 
114e67bd7dfSVitaly Kuznetsov 	/* Intercept RDMSR 0xc0000101 without telling KVM about it */
11503a0c819SSean Christopherson 	__set_bit(2 * (MSR_GS_BASE & 0x1fff), svm->msr + 0x800);
116e67bd7dfSVitaly Kuznetsov 	/* Make sure HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP is set */
117089fe572SSean Christopherson 	vmcb->control.clean |= HV_VMCB_NESTED_ENLIGHTENMENTS;
118e67bd7dfSVitaly Kuznetsov 	run_guest(vmcb, svm->vmcb_gpa);
119e67bd7dfSVitaly Kuznetsov 	/* Make sure we don't see SVM_EXIT_MSR here so eMSR bitmap works */
120e67bd7dfSVitaly Kuznetsov 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
121e67bd7dfSVitaly Kuznetsov 	vmcb->save.rip += 3; /* vmcall */
122e67bd7dfSVitaly Kuznetsov 
123e67bd7dfSVitaly Kuznetsov 	/* Now tell KVM we've changed MSR-Bitmap */
124089fe572SSean Christopherson 	vmcb->control.clean &= ~HV_VMCB_NESTED_ENLIGHTENMENTS;
125e67bd7dfSVitaly Kuznetsov 	run_guest(vmcb, svm->vmcb_gpa);
126e67bd7dfSVitaly Kuznetsov 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
127e67bd7dfSVitaly Kuznetsov 	vmcb->save.rip += 2; /* rdmsr */
128e67bd7dfSVitaly Kuznetsov 
1299c2e8819SVitaly Kuznetsov 
1309c2e8819SVitaly Kuznetsov 	/*
1319c2e8819SVitaly Kuznetsov 	 * L2 TLB flush test. First VMCALL should be handled directly by L0,
1329c2e8819SVitaly Kuznetsov 	 * no VMCALL exit expected.
1339c2e8819SVitaly Kuznetsov 	 */
1349c2e8819SVitaly Kuznetsov 	run_guest(vmcb, svm->vmcb_gpa);
1359c2e8819SVitaly Kuznetsov 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
1369c2e8819SVitaly Kuznetsov 	vmcb->save.rip += 2; /* rdmsr */
1379c2e8819SVitaly Kuznetsov 	/* Enable synthetic vmexit */
1389c2e8819SVitaly Kuznetsov 	*(u32 *)(hv_pages->partition_assist) = 1;
1399c2e8819SVitaly Kuznetsov 	run_guest(vmcb, svm->vmcb_gpa);
1409c2e8819SVitaly Kuznetsov 	GUEST_ASSERT(vmcb->control.exit_code == HV_SVM_EXITCODE_ENL);
1419c2e8819SVitaly Kuznetsov 	GUEST_ASSERT(vmcb->control.exit_info_1 == HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH);
1429c2e8819SVitaly Kuznetsov 
143e67bd7dfSVitaly Kuznetsov 	run_guest(vmcb, svm->vmcb_gpa);
144e67bd7dfSVitaly Kuznetsov 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
145e67bd7dfSVitaly Kuznetsov 	GUEST_SYNC(6);
146e67bd7dfSVitaly Kuznetsov 
147e67bd7dfSVitaly Kuznetsov 	GUEST_DONE();
148e67bd7dfSVitaly Kuznetsov }
149e67bd7dfSVitaly Kuznetsov 
main(int argc,char * argv[])150e67bd7dfSVitaly Kuznetsov int main(int argc, char *argv[])
151e67bd7dfSVitaly Kuznetsov {
1529c2e8819SVitaly Kuznetsov 	vm_vaddr_t nested_gva = 0, hv_pages_gva = 0;
1539c2e8819SVitaly Kuznetsov 	vm_vaddr_t hcall_page;
154a1918c0fSSean Christopherson 	struct kvm_vcpu *vcpu;
155e67bd7dfSVitaly Kuznetsov 	struct kvm_vm *vm;
156e67bd7dfSVitaly Kuznetsov 	struct ucall uc;
157e67bd7dfSVitaly Kuznetsov 	int stage;
158e67bd7dfSVitaly Kuznetsov 
159f21940a3SSean Christopherson 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
160*d8414067SVitaly Kuznetsov 	TEST_REQUIRE(kvm_hv_cpu_has(HV_X64_NESTED_DIRECT_FLUSH));
1617ed397d1SSean Christopherson 
162e67bd7dfSVitaly Kuznetsov 	/* Create VM */
163a1918c0fSSean Christopherson 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
164768e9a61SSean Christopherson 	vcpu_set_hv_cpuid(vcpu);
165e67bd7dfSVitaly Kuznetsov 	vcpu_alloc_svm(vm, &nested_gva);
1669c2e8819SVitaly Kuznetsov 	vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva);
1679c2e8819SVitaly Kuznetsov 
1689c2e8819SVitaly Kuznetsov 	hcall_page = vm_vaddr_alloc_pages(vm, 1);
1699c2e8819SVitaly Kuznetsov 	memset(addr_gva2hva(vm, hcall_page), 0x0,  getpagesize());
1709c2e8819SVitaly Kuznetsov 
1719c2e8819SVitaly Kuznetsov 	vcpu_args_set(vcpu, 3, nested_gva, hv_pages_gva, addr_gva2gpa(vm, hcall_page));
1729c2e8819SVitaly Kuznetsov 	vcpu_set_msr(vcpu, HV_X64_MSR_VP_INDEX, vcpu->id);
173e67bd7dfSVitaly Kuznetsov 
174e67bd7dfSVitaly Kuznetsov 	for (stage = 1;; stage++) {
175768e9a61SSean Christopherson 		vcpu_run(vcpu);
176c96f57b0SVipin Sharma 		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
177e67bd7dfSVitaly Kuznetsov 
178768e9a61SSean Christopherson 		switch (get_ucall(vcpu, &uc)) {
179e67bd7dfSVitaly Kuznetsov 		case UCALL_ABORT:
180594a1c27SColton Lewis 			REPORT_GUEST_ASSERT(uc);
181e67bd7dfSVitaly Kuznetsov 			/* NOT REACHED */
182e67bd7dfSVitaly Kuznetsov 		case UCALL_SYNC:
183e67bd7dfSVitaly Kuznetsov 			break;
184e67bd7dfSVitaly Kuznetsov 		case UCALL_DONE:
185e67bd7dfSVitaly Kuznetsov 			goto done;
186e67bd7dfSVitaly Kuznetsov 		default:
187e67bd7dfSVitaly Kuznetsov 			TEST_FAIL("Unknown ucall %lu", uc.cmd);
188e67bd7dfSVitaly Kuznetsov 		}
189e67bd7dfSVitaly Kuznetsov 
190e67bd7dfSVitaly Kuznetsov 		/* UCALL_SYNC is handled here.  */
191e67bd7dfSVitaly Kuznetsov 		TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
192e67bd7dfSVitaly Kuznetsov 			    uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
193e67bd7dfSVitaly Kuznetsov 			    stage, (ulong)uc.args[1]);
194e67bd7dfSVitaly Kuznetsov 
195e67bd7dfSVitaly Kuznetsov 	}
196e67bd7dfSVitaly Kuznetsov 
197e67bd7dfSVitaly Kuznetsov done:
198e67bd7dfSVitaly Kuznetsov 	kvm_vm_free(vm);
199e67bd7dfSVitaly Kuznetsov }
200