xref: /linux/tools/testing/selftests/kvm/lib/x86_64/memstress.c (revision 7ae9fb1b7ecbb5d85d07857943f677fd1a559b18)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * x86_64-specific extensions to memstress.c.
4  *
5  * Copyright (C) 2022, Google, Inc.
6  */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <linux/bitmap.h>
10 #include <linux/bitops.h>
11 
12 #include "test_util.h"
13 #include "kvm_util.h"
14 #include "memstress.h"
15 #include "processor.h"
16 #include "vmx.h"
17 
memstress_l2_guest_code(uint64_t vcpu_id)18 void memstress_l2_guest_code(uint64_t vcpu_id)
19 {
20 	memstress_guest_code(vcpu_id);
21 	vmcall();
22 }
23 
24 extern char memstress_l2_guest_entry[];
25 __asm__(
26 "memstress_l2_guest_entry:"
27 "	mov (%rsp), %rdi;"
28 "	call memstress_l2_guest_code;"
29 "	ud2;"
30 );
31 
memstress_l1_guest_code(struct vmx_pages * vmx,uint64_t vcpu_id)32 static void memstress_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
33 {
34 #define L2_GUEST_STACK_SIZE 64
35 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
36 	unsigned long *rsp;
37 
38 	GUEST_ASSERT(vmx->vmcs_gpa);
39 	GUEST_ASSERT(prepare_for_vmx_operation(vmx));
40 	GUEST_ASSERT(load_vmcs(vmx));
41 	GUEST_ASSERT(ept_1g_pages_supported());
42 
43 	rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
44 	*rsp = vcpu_id;
45 	prepare_vmcs(vmx, memstress_l2_guest_entry, rsp);
46 
47 	GUEST_ASSERT(!vmlaunch());
48 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
49 	GUEST_DONE();
50 }
51 
memstress_nested_pages(int nr_vcpus)52 uint64_t memstress_nested_pages(int nr_vcpus)
53 {
54 	/*
55 	 * 513 page tables is enough to identity-map 256 TiB of L2 with 1G
56 	 * pages and 4-level paging, plus a few pages per-vCPU for data
57 	 * structures such as the VMCS.
58 	 */
59 	return 513 + 10 * nr_vcpus;
60 }
61 
memstress_setup_ept(struct vmx_pages * vmx,struct kvm_vm * vm)62 void memstress_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
63 {
64 	uint64_t start, end;
65 
66 	prepare_eptp(vmx, vm, 0);
67 
68 	/*
69 	 * Identity map the first 4G and the test region with 1G pages so that
70 	 * KVM can shadow the EPT12 with the maximum huge page size supported
71 	 * by the backing source.
72 	 */
73 	nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL);
74 
75 	start = align_down(memstress_args.gpa, PG_SIZE_1G);
76 	end = align_up(memstress_args.gpa + memstress_args.size, PG_SIZE_1G);
77 	nested_identity_map_1g(vmx, vm, start, end - start);
78 }
79 
memstress_setup_nested(struct kvm_vm * vm,int nr_vcpus,struct kvm_vcpu * vcpus[])80 void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
81 {
82 	struct vmx_pages *vmx, *vmx0 = NULL;
83 	struct kvm_regs regs;
84 	vm_vaddr_t vmx_gva;
85 	int vcpu_id;
86 
87 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
88 	TEST_REQUIRE(kvm_cpu_has_ept());
89 
90 	for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
91 		vmx = vcpu_alloc_vmx(vm, &vmx_gva);
92 
93 		if (vcpu_id == 0) {
94 			memstress_setup_ept(vmx, vm);
95 			vmx0 = vmx;
96 		} else {
97 			/* Share the same EPT table across all vCPUs. */
98 			vmx->eptp = vmx0->eptp;
99 			vmx->eptp_hva = vmx0->eptp_hva;
100 			vmx->eptp_gpa = vmx0->eptp_gpa;
101 		}
102 
103 		/*
104 		 * Override the vCPU to run memstress_l1_guest_code() which will
105 		 * bounce it into L2 before calling memstress_guest_code().
106 		 */
107 		vcpu_regs_get(vcpus[vcpu_id], &regs);
108 		regs.rip = (unsigned long) memstress_l1_guest_code;
109 		vcpu_regs_set(vcpus[vcpu_id], &regs);
110 		vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id);
111 	}
112 }
113