1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020, Google LLC.
4 */
5 #include <inttypes.h>
6 #include <linux/bitmap.h>
7
8 #include "kvm_util.h"
9 #include "memstress.h"
10 #include "processor.h"
11 #include "ucall_common.h"
12
13 struct memstress_args memstress_args;
14
15 /*
16 * Guest virtual memory offset of the testing memory slot.
17 * Must not conflict with identity mapped test code.
18 */
19 static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
20
21 struct vcpu_thread {
22 /* The index of the vCPU. */
23 int vcpu_idx;
24
25 /* The pthread backing the vCPU. */
26 pthread_t thread;
27
28 /* Set to true once the vCPU thread is up and running. */
29 bool running;
30 };
31
32 /* The vCPU threads involved in this test. */
33 static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS];
34
35 /* The function run by each vCPU thread, as provided by the test. */
36 static void (*vcpu_thread_fn)(struct memstress_vcpu_args *);
37
38 /* Set to true once all vCPU threads are up and running. */
39 static bool all_vcpu_threads_running;
40
41 static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
42
43 /*
44 * Continuously write to the first 8 bytes of each page in the
45 * specified region.
46 */
memstress_guest_code(uint32_t vcpu_idx)47 void memstress_guest_code(uint32_t vcpu_idx)
48 {
49 struct memstress_args *args = &memstress_args;
50 struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx];
51 struct guest_random_state rand_state;
52 uint64_t gva;
53 uint64_t pages;
54 uint64_t addr;
55 uint64_t page;
56 int i;
57
58 rand_state = new_guest_random_state(guest_random_seed + vcpu_idx);
59
60 gva = vcpu_args->gva;
61 pages = vcpu_args->pages;
62
63 /* Make sure vCPU args data structure is not corrupt. */
64 GUEST_ASSERT(vcpu_args->vcpu_idx == vcpu_idx);
65
66 while (true) {
67 for (i = 0; i < sizeof(memstress_args); i += args->guest_page_size)
68 (void) *((volatile char *)args + i);
69
70 for (i = 0; i < pages; i++) {
71 if (args->random_access)
72 page = guest_random_u32(&rand_state) % pages;
73 else
74 page = i;
75
76 addr = gva + (page * args->guest_page_size);
77
78 if (__guest_random_bool(&rand_state, args->write_percent))
79 *(uint64_t *)addr = 0x0123456789ABCDEF;
80 else
81 READ_ONCE(*(uint64_t *)addr);
82 }
83
84 GUEST_SYNC(1);
85 }
86 }
87
memstress_setup_vcpus(struct kvm_vm * vm,int nr_vcpus,struct kvm_vcpu * vcpus[],uint64_t vcpu_memory_bytes,bool partition_vcpu_memory_access)88 void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
89 struct kvm_vcpu *vcpus[],
90 uint64_t vcpu_memory_bytes,
91 bool partition_vcpu_memory_access)
92 {
93 struct memstress_args *args = &memstress_args;
94 struct memstress_vcpu_args *vcpu_args;
95 int i;
96
97 for (i = 0; i < nr_vcpus; i++) {
98 vcpu_args = &args->vcpu_args[i];
99
100 vcpu_args->vcpu = vcpus[i];
101 vcpu_args->vcpu_idx = i;
102
103 if (partition_vcpu_memory_access) {
104 vcpu_args->gva = guest_test_virt_mem +
105 (i * vcpu_memory_bytes);
106 vcpu_args->pages = vcpu_memory_bytes /
107 args->guest_page_size;
108 vcpu_args->gpa = args->gpa + (i * vcpu_memory_bytes);
109 } else {
110 vcpu_args->gva = guest_test_virt_mem;
111 vcpu_args->pages = (nr_vcpus * vcpu_memory_bytes) /
112 args->guest_page_size;
113 vcpu_args->gpa = args->gpa;
114 }
115
116 vcpu_args_set(vcpus[i], 1, i);
117
118 pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
119 i, vcpu_args->gpa, vcpu_args->gpa +
120 (vcpu_args->pages * args->guest_page_size));
121 }
122 }
123
memstress_create_vm(enum vm_guest_mode mode,int nr_vcpus,uint64_t vcpu_memory_bytes,int slots,enum vm_mem_backing_src_type backing_src,bool partition_vcpu_memory_access)124 struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
125 uint64_t vcpu_memory_bytes, int slots,
126 enum vm_mem_backing_src_type backing_src,
127 bool partition_vcpu_memory_access)
128 {
129 struct memstress_args *args = &memstress_args;
130 struct kvm_vm *vm;
131 uint64_t guest_num_pages, slot0_pages = 0;
132 uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
133 uint64_t region_end_gfn;
134 int i;
135
136 pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
137
138 /* By default vCPUs will write to memory. */
139 args->write_percent = 100;
140
141 /*
142 * Snapshot the non-huge page size. This is used by the guest code to
143 * access/dirty pages at the logging granularity.
144 */
145 args->guest_page_size = vm_guest_mode_params[mode].page_size;
146
147 guest_num_pages = vm_adjust_num_guest_pages(mode,
148 (nr_vcpus * vcpu_memory_bytes) / args->guest_page_size);
149
150 TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0,
151 "Guest memory size is not host page size aligned.");
152 TEST_ASSERT(vcpu_memory_bytes % args->guest_page_size == 0,
153 "Guest memory size is not guest page size aligned.");
154 TEST_ASSERT(guest_num_pages % slots == 0,
155 "Guest memory cannot be evenly divided into %d slots.",
156 slots);
157
158 /*
159 * If using nested, allocate extra pages for the nested page tables and
160 * in-memory data structures.
161 */
162 if (args->nested)
163 slot0_pages += memstress_nested_pages(nr_vcpus);
164
165 /*
166 * Pass guest_num_pages to populate the page tables for test memory.
167 * The memory is also added to memslot 0, but that's a benign side
168 * effect as KVM allows aliasing HVAs in meslots.
169 */
170 vm = __vm_create_with_vcpus(VM_SHAPE(mode), nr_vcpus,
171 slot0_pages + guest_num_pages,
172 memstress_guest_code, vcpus);
173
174 args->vm = vm;
175
176 /* Put the test region at the top guest physical memory. */
177 region_end_gfn = vm->max_gfn + 1;
178
179 #ifdef __x86_64__
180 /*
181 * When running vCPUs in L2, restrict the test region to 48 bits to
182 * avoid needing 5-level page tables to identity map L2.
183 */
184 if (args->nested)
185 region_end_gfn = min(region_end_gfn, (1UL << 48) / args->guest_page_size);
186 #endif
187 /*
188 * If there should be more memory in the guest test region than there
189 * can be pages in the guest, it will definitely cause problems.
190 */
191 TEST_ASSERT(guest_num_pages < region_end_gfn,
192 "Requested more guest memory than address space allows.\n"
193 " guest pages: %" PRIx64 " max gfn: %" PRIx64
194 " nr_vcpus: %d wss: %" PRIx64 "]",
195 guest_num_pages, region_end_gfn - 1, nr_vcpus, vcpu_memory_bytes);
196
197 args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size;
198 args->gpa = align_down(args->gpa, backing_src_pagesz);
199 #ifdef __s390x__
200 /* Align to 1M (segment size) */
201 args->gpa = align_down(args->gpa, 1 << 20);
202 #endif
203 args->size = guest_num_pages * args->guest_page_size;
204 pr_info("guest physical test memory: [0x%lx, 0x%lx)\n",
205 args->gpa, args->gpa + args->size);
206
207 /* Add extra memory slots for testing */
208 for (i = 0; i < slots; i++) {
209 uint64_t region_pages = guest_num_pages / slots;
210 vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i;
211
212 vm_userspace_mem_region_add(vm, backing_src, region_start,
213 MEMSTRESS_MEM_SLOT_INDEX + i,
214 region_pages, 0);
215 }
216
217 /* Do mapping for the demand paging memory slot */
218 virt_map(vm, guest_test_virt_mem, args->gpa, guest_num_pages);
219
220 memstress_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
221 partition_vcpu_memory_access);
222
223 if (args->nested) {
224 pr_info("Configuring vCPUs to run in L2 (nested).\n");
225 memstress_setup_nested(vm, nr_vcpus, vcpus);
226 }
227
228 /* Export the shared variables to the guest. */
229 sync_global_to_guest(vm, memstress_args);
230
231 return vm;
232 }
233
memstress_destroy_vm(struct kvm_vm * vm)234 void memstress_destroy_vm(struct kvm_vm *vm)
235 {
236 kvm_vm_free(vm);
237 }
238
memstress_set_write_percent(struct kvm_vm * vm,uint32_t write_percent)239 void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent)
240 {
241 memstress_args.write_percent = write_percent;
242 sync_global_to_guest(vm, memstress_args.write_percent);
243 }
244
memstress_set_random_access(struct kvm_vm * vm,bool random_access)245 void memstress_set_random_access(struct kvm_vm *vm, bool random_access)
246 {
247 memstress_args.random_access = random_access;
248 sync_global_to_guest(vm, memstress_args.random_access);
249 }
250
memstress_nested_pages(int nr_vcpus)251 uint64_t __weak memstress_nested_pages(int nr_vcpus)
252 {
253 return 0;
254 }
255
memstress_setup_nested(struct kvm_vm * vm,int nr_vcpus,struct kvm_vcpu ** vcpus)256 void __weak memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus)
257 {
258 pr_info("%s() not support on this architecture, skipping.\n", __func__);
259 exit(KSFT_SKIP);
260 }
261
vcpu_thread_main(void * data)262 static void *vcpu_thread_main(void *data)
263 {
264 struct vcpu_thread *vcpu = data;
265 int vcpu_idx = vcpu->vcpu_idx;
266
267 if (memstress_args.pin_vcpus)
268 kvm_pin_this_task_to_pcpu(memstress_args.vcpu_to_pcpu[vcpu_idx]);
269
270 WRITE_ONCE(vcpu->running, true);
271
272 /*
273 * Wait for all vCPU threads to be up and running before calling the test-
274 * provided vCPU thread function. This prevents thread creation (which
275 * requires taking the mmap_sem in write mode) from interfering with the
276 * guest faulting in its memory.
277 */
278 while (!READ_ONCE(all_vcpu_threads_running))
279 ;
280
281 vcpu_thread_fn(&memstress_args.vcpu_args[vcpu_idx]);
282
283 return NULL;
284 }
285
memstress_start_vcpu_threads(int nr_vcpus,void (* vcpu_fn)(struct memstress_vcpu_args *))286 void memstress_start_vcpu_threads(int nr_vcpus,
287 void (*vcpu_fn)(struct memstress_vcpu_args *))
288 {
289 int i;
290
291 vcpu_thread_fn = vcpu_fn;
292 WRITE_ONCE(all_vcpu_threads_running, false);
293 WRITE_ONCE(memstress_args.stop_vcpus, false);
294
295 for (i = 0; i < nr_vcpus; i++) {
296 struct vcpu_thread *vcpu = &vcpu_threads[i];
297
298 vcpu->vcpu_idx = i;
299 WRITE_ONCE(vcpu->running, false);
300
301 pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu);
302 }
303
304 for (i = 0; i < nr_vcpus; i++) {
305 while (!READ_ONCE(vcpu_threads[i].running))
306 ;
307 }
308
309 WRITE_ONCE(all_vcpu_threads_running, true);
310 }
311
memstress_join_vcpu_threads(int nr_vcpus)312 void memstress_join_vcpu_threads(int nr_vcpus)
313 {
314 int i;
315
316 WRITE_ONCE(memstress_args.stop_vcpus, true);
317
318 for (i = 0; i < nr_vcpus; i++)
319 pthread_join(vcpu_threads[i].thread, NULL);
320 }
321
toggle_dirty_logging(struct kvm_vm * vm,int slots,bool enable)322 static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
323 {
324 int i;
325
326 for (i = 0; i < slots; i++) {
327 int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
328 int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
329
330 vm_mem_region_set_flags(vm, slot, flags);
331 }
332 }
333
memstress_enable_dirty_logging(struct kvm_vm * vm,int slots)334 void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots)
335 {
336 toggle_dirty_logging(vm, slots, true);
337 }
338
memstress_disable_dirty_logging(struct kvm_vm * vm,int slots)339 void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots)
340 {
341 toggle_dirty_logging(vm, slots, false);
342 }
343
memstress_get_dirty_log(struct kvm_vm * vm,unsigned long * bitmaps[],int slots)344 void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
345 {
346 int i;
347
348 for (i = 0; i < slots; i++) {
349 int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
350
351 kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
352 }
353 }
354
memstress_clear_dirty_log(struct kvm_vm * vm,unsigned long * bitmaps[],int slots,uint64_t pages_per_slot)355 void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
356 int slots, uint64_t pages_per_slot)
357 {
358 int i;
359
360 for (i = 0; i < slots; i++) {
361 int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
362
363 kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
364 }
365 }
366
memstress_alloc_bitmaps(int slots,uint64_t pages_per_slot)367 unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot)
368 {
369 unsigned long **bitmaps;
370 int i;
371
372 bitmaps = malloc(slots * sizeof(bitmaps[0]));
373 TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
374
375 for (i = 0; i < slots; i++) {
376 bitmaps[i] = bitmap_zalloc(pages_per_slot);
377 TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
378 }
379
380 return bitmaps;
381 }
382
memstress_free_bitmaps(unsigned long * bitmaps[],int slots)383 void memstress_free_bitmaps(unsigned long *bitmaps[], int slots)
384 {
385 int i;
386
387 for (i = 0; i < slots; i++)
388 free(bitmaps[i]);
389
390 free(bitmaps);
391 }
392