xref: /linux/tools/testing/selftests/kvm/lib/memstress.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020, Google LLC.
4  */
5 #include <inttypes.h>
6 #include <linux/bitmap.h>
7 
8 #include "kvm_util.h"
9 #include "memstress.h"
10 #include "processor.h"
11 #include "ucall_common.h"
12 
13 struct memstress_args memstress_args;
14 
15 /*
16  * Guest virtual memory offset of the testing memory slot.
17  * Must not conflict with identity mapped test code.
18  */
19 static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
20 
21 struct vcpu_thread {
22 	/* The index of the vCPU. */
23 	int vcpu_idx;
24 
25 	/* The pthread backing the vCPU. */
26 	pthread_t thread;
27 
28 	/* Set to true once the vCPU thread is up and running. */
29 	bool running;
30 };
31 
32 /* The vCPU threads involved in this test. */
33 static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS];
34 
35 /* The function run by each vCPU thread, as provided by the test. */
36 static void (*vcpu_thread_fn)(struct memstress_vcpu_args *);
37 
38 /* Set to true once all vCPU threads are up and running. */
39 static bool all_vcpu_threads_running;
40 
41 static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
42 
43 /*
44  * Continuously write to the first 8 bytes of each page in the
45  * specified region.
46  */
47 void memstress_guest_code(uint32_t vcpu_idx)
48 {
49 	struct memstress_args *args = &memstress_args;
50 	struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx];
51 	struct guest_random_state rand_state;
52 	uint64_t gva;
53 	uint64_t pages;
54 	uint64_t addr;
55 	uint64_t page;
56 	int i;
57 
58 	rand_state = new_guest_random_state(guest_random_seed + vcpu_idx);
59 
60 	gva = vcpu_args->gva;
61 	pages = vcpu_args->pages;
62 
63 	/* Make sure vCPU args data structure is not corrupt. */
64 	GUEST_ASSERT(vcpu_args->vcpu_idx == vcpu_idx);
65 
66 	while (true) {
67 		for (i = 0; i < sizeof(memstress_args); i += args->guest_page_size)
68 			(void) *((volatile char *)args + i);
69 
70 		for (i = 0; i < pages; i++) {
71 			if (args->random_access)
72 				page = guest_random_u32(&rand_state) % pages;
73 			else
74 				page = i;
75 
76 			addr = gva + (page * args->guest_page_size);
77 
78 			if (__guest_random_bool(&rand_state, args->write_percent))
79 				*(uint64_t *)addr = 0x0123456789ABCDEF;
80 			else
81 				READ_ONCE(*(uint64_t *)addr);
82 		}
83 
84 		GUEST_SYNC(1);
85 	}
86 }
87 
88 void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
89 			   struct kvm_vcpu *vcpus[],
90 			   uint64_t vcpu_memory_bytes,
91 			   bool partition_vcpu_memory_access)
92 {
93 	struct memstress_args *args = &memstress_args;
94 	struct memstress_vcpu_args *vcpu_args;
95 	int i;
96 
97 	for (i = 0; i < nr_vcpus; i++) {
98 		vcpu_args = &args->vcpu_args[i];
99 
100 		vcpu_args->vcpu = vcpus[i];
101 		vcpu_args->vcpu_idx = i;
102 
103 		if (partition_vcpu_memory_access) {
104 			vcpu_args->gva = guest_test_virt_mem +
105 					 (i * vcpu_memory_bytes);
106 			vcpu_args->pages = vcpu_memory_bytes /
107 					   args->guest_page_size;
108 			vcpu_args->gpa = args->gpa + (i * vcpu_memory_bytes);
109 		} else {
110 			vcpu_args->gva = guest_test_virt_mem;
111 			vcpu_args->pages = (nr_vcpus * vcpu_memory_bytes) /
112 					   args->guest_page_size;
113 			vcpu_args->gpa = args->gpa;
114 		}
115 
116 		vcpu_args_set(vcpus[i], 1, i);
117 
118 		pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
119 			 i, vcpu_args->gpa, vcpu_args->gpa +
120 			 (vcpu_args->pages * args->guest_page_size));
121 	}
122 }
123 
124 struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
125 				   uint64_t vcpu_memory_bytes, int slots,
126 				   enum vm_mem_backing_src_type backing_src,
127 				   bool partition_vcpu_memory_access)
128 {
129 	struct memstress_args *args = &memstress_args;
130 	struct kvm_vm *vm;
131 	uint64_t guest_num_pages, slot0_pages = 0;
132 	uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
133 	uint64_t region_end_gfn;
134 	int i;
135 
136 	pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
137 
138 	/* By default vCPUs will write to memory. */
139 	args->write_percent = 100;
140 
141 	/*
142 	 * Snapshot the non-huge page size.  This is used by the guest code to
143 	 * access/dirty pages at the logging granularity.
144 	 */
145 	args->guest_page_size = vm_guest_mode_params[mode].page_size;
146 
147 	guest_num_pages = vm_adjust_num_guest_pages(mode,
148 				(nr_vcpus * vcpu_memory_bytes) / args->guest_page_size);
149 
150 	TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0,
151 		    "Guest memory size is not host page size aligned.");
152 	TEST_ASSERT(vcpu_memory_bytes % args->guest_page_size == 0,
153 		    "Guest memory size is not guest page size aligned.");
154 	TEST_ASSERT(guest_num_pages % slots == 0,
155 		    "Guest memory cannot be evenly divided into %d slots.",
156 		    slots);
157 
158 	/*
159 	 * If using nested, allocate extra pages for the nested page tables and
160 	 * in-memory data structures.
161 	 */
162 	if (args->nested)
163 		slot0_pages += memstress_nested_pages(nr_vcpus);
164 
165 	/*
166 	 * Pass guest_num_pages to populate the page tables for test memory.
167 	 * The memory is also added to memslot 0, but that's a benign side
168 	 * effect as KVM allows aliasing HVAs in meslots.
169 	 */
170 	vm = __vm_create_with_vcpus(VM_SHAPE(mode), nr_vcpus,
171 				    slot0_pages + guest_num_pages,
172 				    memstress_guest_code, vcpus);
173 
174 	args->vm = vm;
175 
176 	/* Put the test region at the top guest physical memory. */
177 	region_end_gfn = vm->max_gfn + 1;
178 
179 #ifdef __x86_64__
180 	/*
181 	 * When running vCPUs in L2, restrict the test region to 48 bits to
182 	 * avoid needing 5-level page tables to identity map L2.
183 	 */
184 	if (args->nested)
185 		region_end_gfn = min(region_end_gfn, (1UL << 48) / args->guest_page_size);
186 #endif
187 	/*
188 	 * If there should be more memory in the guest test region than there
189 	 * can be pages in the guest, it will definitely cause problems.
190 	 */
191 	TEST_ASSERT(guest_num_pages < region_end_gfn,
192 		    "Requested more guest memory than address space allows.\n"
193 		    "    guest pages: %" PRIx64 " max gfn: %" PRIx64
194 		    " nr_vcpus: %d wss: %" PRIx64 "]",
195 		    guest_num_pages, region_end_gfn - 1, nr_vcpus, vcpu_memory_bytes);
196 
197 	args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size;
198 	args->gpa = align_down(args->gpa, backing_src_pagesz);
199 	args->size = guest_num_pages * args->guest_page_size;
200 	pr_info("guest physical test memory: [0x%lx, 0x%lx)\n",
201 		args->gpa, args->gpa + args->size);
202 
203 	/* Add extra memory slots for testing */
204 	for (i = 0; i < slots; i++) {
205 		uint64_t region_pages = guest_num_pages / slots;
206 		vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i;
207 
208 		vm_userspace_mem_region_add(vm, backing_src, region_start,
209 					    MEMSTRESS_MEM_SLOT_INDEX + i,
210 					    region_pages, 0);
211 	}
212 
213 	/* Do mapping for the demand paging memory slot */
214 	virt_map(vm, guest_test_virt_mem, args->gpa, guest_num_pages);
215 
216 	memstress_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
217 			      partition_vcpu_memory_access);
218 
219 	if (args->nested) {
220 		pr_info("Configuring vCPUs to run in L2 (nested).\n");
221 		memstress_setup_nested(vm, nr_vcpus, vcpus);
222 	}
223 
224 	/* Export the shared variables to the guest. */
225 	sync_global_to_guest(vm, memstress_args);
226 
227 	return vm;
228 }
229 
230 void memstress_destroy_vm(struct kvm_vm *vm)
231 {
232 	kvm_vm_free(vm);
233 }
234 
235 void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent)
236 {
237 	memstress_args.write_percent = write_percent;
238 	sync_global_to_guest(vm, memstress_args.write_percent);
239 }
240 
241 void memstress_set_random_access(struct kvm_vm *vm, bool random_access)
242 {
243 	memstress_args.random_access = random_access;
244 	sync_global_to_guest(vm, memstress_args.random_access);
245 }
246 
247 uint64_t __weak memstress_nested_pages(int nr_vcpus)
248 {
249 	return 0;
250 }
251 
252 void __weak memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus)
253 {
254 	pr_info("%s() not support on this architecture, skipping.\n", __func__);
255 	exit(KSFT_SKIP);
256 }
257 
258 static void *vcpu_thread_main(void *data)
259 {
260 	struct vcpu_thread *vcpu = data;
261 	int vcpu_idx = vcpu->vcpu_idx;
262 
263 	if (memstress_args.pin_vcpus)
264 		pin_self_to_cpu(memstress_args.vcpu_to_pcpu[vcpu_idx]);
265 
266 	WRITE_ONCE(vcpu->running, true);
267 
268 	/*
269 	 * Wait for all vCPU threads to be up and running before calling the test-
270 	 * provided vCPU thread function. This prevents thread creation (which
271 	 * requires taking the mmap_sem in write mode) from interfering with the
272 	 * guest faulting in its memory.
273 	 */
274 	while (!READ_ONCE(all_vcpu_threads_running))
275 		;
276 
277 	vcpu_thread_fn(&memstress_args.vcpu_args[vcpu_idx]);
278 
279 	return NULL;
280 }
281 
282 void memstress_start_vcpu_threads(int nr_vcpus,
283 				  void (*vcpu_fn)(struct memstress_vcpu_args *))
284 {
285 	int i;
286 
287 	vcpu_thread_fn = vcpu_fn;
288 	WRITE_ONCE(all_vcpu_threads_running, false);
289 	WRITE_ONCE(memstress_args.stop_vcpus, false);
290 
291 	for (i = 0; i < nr_vcpus; i++) {
292 		struct vcpu_thread *vcpu = &vcpu_threads[i];
293 
294 		vcpu->vcpu_idx = i;
295 		WRITE_ONCE(vcpu->running, false);
296 
297 		pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu);
298 	}
299 
300 	for (i = 0; i < nr_vcpus; i++) {
301 		while (!READ_ONCE(vcpu_threads[i].running))
302 			;
303 	}
304 
305 	WRITE_ONCE(all_vcpu_threads_running, true);
306 }
307 
308 void memstress_join_vcpu_threads(int nr_vcpus)
309 {
310 	int i;
311 
312 	WRITE_ONCE(memstress_args.stop_vcpus, true);
313 
314 	for (i = 0; i < nr_vcpus; i++)
315 		pthread_join(vcpu_threads[i].thread, NULL);
316 }
317 
318 static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
319 {
320 	int i;
321 
322 	for (i = 0; i < slots; i++) {
323 		int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
324 		int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
325 
326 		vm_mem_region_set_flags(vm, slot, flags);
327 	}
328 }
329 
330 void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots)
331 {
332 	toggle_dirty_logging(vm, slots, true);
333 }
334 
335 void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots)
336 {
337 	toggle_dirty_logging(vm, slots, false);
338 }
339 
340 void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
341 {
342 	int i;
343 
344 	for (i = 0; i < slots; i++) {
345 		int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
346 
347 		kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
348 	}
349 }
350 
351 void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
352 			       int slots, uint64_t pages_per_slot)
353 {
354 	int i;
355 
356 	for (i = 0; i < slots; i++) {
357 		int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
358 
359 		kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
360 	}
361 }
362 
363 unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot)
364 {
365 	unsigned long **bitmaps;
366 	int i;
367 
368 	bitmaps = malloc(slots * sizeof(bitmaps[0]));
369 	TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
370 
371 	for (i = 0; i < slots; i++) {
372 		bitmaps[i] = bitmap_zalloc(pages_per_slot);
373 		TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
374 	}
375 
376 	return bitmaps;
377 }
378 
379 void memstress_free_bitmaps(unsigned long *bitmaps[], int slots)
380 {
381 	int i;
382 
383 	for (i = 0; i < slots; i++)
384 		free(bitmaps[i]);
385 
386 	free(bitmaps);
387 }
388