1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * KVM dirty page logging performance test 4 * 5 * Based on dirty_log_test.c 6 * 7 * Copyright (C) 2018, Red Hat, Inc. 8 * Copyright (C) 2020, Google, Inc. 9 */ 10 11 #include <stdio.h> 12 #include <stdlib.h> 13 #include <time.h> 14 #include <pthread.h> 15 #include <linux/bitmap.h> 16 17 #include "kvm_util.h" 18 #include "test_util.h" 19 #include "memstress.h" 20 #include "guest_modes.h" 21 22 #ifdef __aarch64__ 23 #include "aarch64/vgic.h" 24 25 static int gic_fd; 26 27 static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus) 28 { 29 /* 30 * The test can still run even if hardware does not support GICv3, as it 31 * is only an optimization to reduce guest exits. 32 */ 33 gic_fd = vgic_v3_setup(vm, nr_vcpus, 64); 34 } 35 36 static void arch_cleanup_vm(struct kvm_vm *vm) 37 { 38 if (gic_fd > 0) 39 close(gic_fd); 40 } 41 42 #else /* __aarch64__ */ 43 44 static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus) 45 { 46 } 47 48 static void arch_cleanup_vm(struct kvm_vm *vm) 49 { 50 } 51 52 #endif 53 54 /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/ 55 #define TEST_HOST_LOOP_N 2UL 56 57 static int nr_vcpus = 1; 58 static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; 59 static bool run_vcpus_while_disabling_dirty_logging; 60 61 /* Host variables */ 62 static u64 dirty_log_manual_caps; 63 static bool host_quit; 64 static int iteration; 65 static int vcpu_last_completed_iteration[KVM_MAX_VCPUS]; 66 67 static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) 68 { 69 struct kvm_vcpu *vcpu = vcpu_args->vcpu; 70 int vcpu_idx = vcpu_args->vcpu_idx; 71 uint64_t pages_count = 0; 72 struct kvm_run *run; 73 struct timespec start; 74 struct timespec ts_diff; 75 struct timespec total = (struct timespec){0}; 76 struct timespec avg; 77 int ret; 78 79 run = vcpu->run; 80 81 while (!READ_ONCE(host_quit)) { 82 int current_iteration = READ_ONCE(iteration); 83 84 clock_gettime(CLOCK_MONOTONIC, &start); 85 ret = _vcpu_run(vcpu); 86 ts_diff = timespec_elapsed(start); 87 88 TEST_ASSERT(ret == 0, "vcpu_run failed: %d", ret); 89 TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC, 90 "Invalid guest sync status: exit_reason=%s", 91 exit_reason_str(run->exit_reason)); 92 93 pr_debug("Got sync event from vCPU %d\n", vcpu_idx); 94 vcpu_last_completed_iteration[vcpu_idx] = current_iteration; 95 pr_debug("vCPU %d updated last completed iteration to %d\n", 96 vcpu_idx, vcpu_last_completed_iteration[vcpu_idx]); 97 98 if (current_iteration) { 99 pages_count += vcpu_args->pages; 100 total = timespec_add(total, ts_diff); 101 pr_debug("vCPU %d iteration %d dirty memory time: %ld.%.9lds\n", 102 vcpu_idx, current_iteration, ts_diff.tv_sec, 103 ts_diff.tv_nsec); 104 } else { 105 pr_debug("vCPU %d iteration %d populate memory time: %ld.%.9lds\n", 106 vcpu_idx, current_iteration, ts_diff.tv_sec, 107 ts_diff.tv_nsec); 108 } 109 110 /* 111 * Keep running the guest while dirty logging is being disabled 112 * (iteration is negative) so that vCPUs are accessing memory 113 * for the entire duration of zapping collapsible SPTEs. 114 */ 115 while (current_iteration == READ_ONCE(iteration) && 116 READ_ONCE(iteration) >= 0 && !READ_ONCE(host_quit)) {} 117 } 118 119 avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_idx]); 120 pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", 121 vcpu_idx, pages_count, vcpu_last_completed_iteration[vcpu_idx], 122 total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec); 123 } 124 125 struct test_params { 126 unsigned long iterations; 127 uint64_t phys_offset; 128 bool partition_vcpu_memory_access; 129 enum vm_mem_backing_src_type backing_src; 130 int slots; 131 uint32_t write_percent; 132 uint32_t random_seed; 133 bool random_access; 134 }; 135 136 static void run_test(enum vm_guest_mode mode, void *arg) 137 { 138 struct test_params *p = arg; 139 struct kvm_vm *vm; 140 unsigned long **bitmaps; 141 uint64_t guest_num_pages; 142 uint64_t host_num_pages; 143 uint64_t pages_per_slot; 144 struct timespec start; 145 struct timespec ts_diff; 146 struct timespec get_dirty_log_total = (struct timespec){0}; 147 struct timespec vcpu_dirty_total = (struct timespec){0}; 148 struct timespec avg; 149 struct timespec clear_dirty_log_total = (struct timespec){0}; 150 int i; 151 152 vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 153 p->slots, p->backing_src, 154 p->partition_vcpu_memory_access); 155 156 pr_info("Random seed: %u\n", p->random_seed); 157 memstress_set_random_seed(vm, p->random_seed); 158 memstress_set_write_percent(vm, p->write_percent); 159 160 guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift; 161 guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages); 162 host_num_pages = vm_num_host_pages(mode, guest_num_pages); 163 pages_per_slot = host_num_pages / p->slots; 164 165 bitmaps = memstress_alloc_bitmaps(p->slots, pages_per_slot); 166 167 if (dirty_log_manual_caps) 168 vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 169 dirty_log_manual_caps); 170 171 arch_setup_vm(vm, nr_vcpus); 172 173 /* Start the iterations */ 174 iteration = 0; 175 host_quit = false; 176 177 clock_gettime(CLOCK_MONOTONIC, &start); 178 for (i = 0; i < nr_vcpus; i++) 179 vcpu_last_completed_iteration[i] = -1; 180 181 /* 182 * Use 100% writes during the population phase to ensure all 183 * memory is actually populated and not just mapped to the zero 184 * page. The prevents expensive copy-on-write faults from 185 * occurring during the dirty memory iterations below, which 186 * would pollute the performance results. 187 */ 188 memstress_set_write_percent(vm, 100); 189 memstress_set_random_access(vm, false); 190 memstress_start_vcpu_threads(nr_vcpus, vcpu_worker); 191 192 /* Allow the vCPUs to populate memory */ 193 pr_debug("Starting iteration %d - Populating\n", iteration); 194 for (i = 0; i < nr_vcpus; i++) { 195 while (READ_ONCE(vcpu_last_completed_iteration[i]) != 196 iteration) 197 ; 198 } 199 200 ts_diff = timespec_elapsed(start); 201 pr_info("Populate memory time: %ld.%.9lds\n", 202 ts_diff.tv_sec, ts_diff.tv_nsec); 203 204 /* Enable dirty logging */ 205 clock_gettime(CLOCK_MONOTONIC, &start); 206 memstress_enable_dirty_logging(vm, p->slots); 207 ts_diff = timespec_elapsed(start); 208 pr_info("Enabling dirty logging time: %ld.%.9lds\n\n", 209 ts_diff.tv_sec, ts_diff.tv_nsec); 210 211 memstress_set_write_percent(vm, p->write_percent); 212 memstress_set_random_access(vm, p->random_access); 213 214 while (iteration < p->iterations) { 215 /* 216 * Incrementing the iteration number will start the vCPUs 217 * dirtying memory again. 218 */ 219 clock_gettime(CLOCK_MONOTONIC, &start); 220 iteration++; 221 222 pr_debug("Starting iteration %d\n", iteration); 223 for (i = 0; i < nr_vcpus; i++) { 224 while (READ_ONCE(vcpu_last_completed_iteration[i]) 225 != iteration) 226 ; 227 } 228 229 ts_diff = timespec_elapsed(start); 230 vcpu_dirty_total = timespec_add(vcpu_dirty_total, ts_diff); 231 pr_info("Iteration %d dirty memory time: %ld.%.9lds\n", 232 iteration, ts_diff.tv_sec, ts_diff.tv_nsec); 233 234 clock_gettime(CLOCK_MONOTONIC, &start); 235 memstress_get_dirty_log(vm, bitmaps, p->slots); 236 ts_diff = timespec_elapsed(start); 237 get_dirty_log_total = timespec_add(get_dirty_log_total, 238 ts_diff); 239 pr_info("Iteration %d get dirty log time: %ld.%.9lds\n", 240 iteration, ts_diff.tv_sec, ts_diff.tv_nsec); 241 242 if (dirty_log_manual_caps) { 243 clock_gettime(CLOCK_MONOTONIC, &start); 244 memstress_clear_dirty_log(vm, bitmaps, p->slots, 245 pages_per_slot); 246 ts_diff = timespec_elapsed(start); 247 clear_dirty_log_total = timespec_add(clear_dirty_log_total, 248 ts_diff); 249 pr_info("Iteration %d clear dirty log time: %ld.%.9lds\n", 250 iteration, ts_diff.tv_sec, ts_diff.tv_nsec); 251 } 252 } 253 254 /* 255 * Run vCPUs while dirty logging is being disabled to stress disabling 256 * in terms of both performance and correctness. Opt-in via command 257 * line as this significantly increases time to disable dirty logging. 258 */ 259 if (run_vcpus_while_disabling_dirty_logging) 260 WRITE_ONCE(iteration, -1); 261 262 /* Disable dirty logging */ 263 clock_gettime(CLOCK_MONOTONIC, &start); 264 memstress_disable_dirty_logging(vm, p->slots); 265 ts_diff = timespec_elapsed(start); 266 pr_info("Disabling dirty logging time: %ld.%.9lds\n", 267 ts_diff.tv_sec, ts_diff.tv_nsec); 268 269 /* 270 * Tell the vCPU threads to quit. No need to manually check that vCPUs 271 * have stopped running after disabling dirty logging, the join will 272 * wait for them to exit. 273 */ 274 host_quit = true; 275 memstress_join_vcpu_threads(nr_vcpus); 276 277 avg = timespec_div(get_dirty_log_total, p->iterations); 278 pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", 279 p->iterations, get_dirty_log_total.tv_sec, 280 get_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec); 281 282 if (dirty_log_manual_caps) { 283 avg = timespec_div(clear_dirty_log_total, p->iterations); 284 pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", 285 p->iterations, clear_dirty_log_total.tv_sec, 286 clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec); 287 } 288 289 memstress_free_bitmaps(bitmaps, p->slots); 290 arch_cleanup_vm(vm); 291 memstress_destroy_vm(vm); 292 } 293 294 static void help(char *name) 295 { 296 puts(""); 297 printf("usage: %s [-h] [-a] [-i iterations] [-p offset] [-g] " 298 "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-r random seed ] [-s mem type]" 299 "[-x memslots] [-w percentage] [-c physical cpus to run test on]\n", name); 300 puts(""); 301 printf(" -a: access memory randomly rather than in order.\n"); 302 printf(" -i: specify iteration counts (default: %"PRIu64")\n", 303 TEST_HOST_LOOP_N); 304 printf(" -g: Do not enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2. This\n" 305 " makes KVM_GET_DIRTY_LOG clear the dirty log (i.e.\n" 306 " KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE is not enabled)\n" 307 " and writes will be tracked as soon as dirty logging is\n" 308 " enabled on the memslot (i.e. KVM_DIRTY_LOG_INITIALLY_SET\n" 309 " is not enabled).\n"); 310 printf(" -p: specify guest physical test memory offset\n" 311 " Warning: a low offset can conflict with the loaded test code.\n"); 312 guest_modes_help(); 313 printf(" -n: Run the vCPUs in nested mode (L2)\n"); 314 printf(" -e: Run vCPUs while dirty logging is being disabled. This\n" 315 " can significantly increase runtime, especially if there\n" 316 " isn't a dedicated pCPU for the main thread.\n"); 317 printf(" -b: specify the size of the memory region which should be\n" 318 " dirtied by each vCPU. e.g. 10M or 3G.\n" 319 " (default: 1G)\n"); 320 printf(" -v: specify the number of vCPUs to run.\n"); 321 printf(" -o: Overlap guest memory accesses instead of partitioning\n" 322 " them into a separate region of memory for each vCPU.\n"); 323 printf(" -r: specify the starting random seed.\n"); 324 backing_src_help("-s"); 325 printf(" -x: Split the memory region into this number of memslots.\n" 326 " (default: 1)\n"); 327 printf(" -w: specify the percentage of pages which should be written to\n" 328 " as an integer from 0-100 inclusive. This is probabilistic,\n" 329 " so -w X means each page has an X%% chance of writing\n" 330 " and a (100-X)%% chance of reading.\n" 331 " (default: 100 i.e. all pages are written to.)\n"); 332 kvm_print_vcpu_pinning_help(); 333 puts(""); 334 exit(0); 335 } 336 337 int main(int argc, char *argv[]) 338 { 339 int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); 340 const char *pcpu_list = NULL; 341 struct test_params p = { 342 .iterations = TEST_HOST_LOOP_N, 343 .partition_vcpu_memory_access = true, 344 .backing_src = DEFAULT_VM_MEM_SRC, 345 .slots = 1, 346 .random_seed = 1, 347 .write_percent = 100, 348 }; 349 int opt; 350 351 dirty_log_manual_caps = 352 kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2); 353 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | 354 KVM_DIRTY_LOG_INITIALLY_SET); 355 356 guest_modes_append_default(); 357 358 while ((opt = getopt(argc, argv, "ab:c:eghi:m:nop:r:s:v:x:w:")) != -1) { 359 switch (opt) { 360 case 'a': 361 p.random_access = true; 362 break; 363 case 'b': 364 guest_percpu_mem_size = parse_size(optarg); 365 break; 366 case 'c': 367 pcpu_list = optarg; 368 break; 369 case 'e': 370 /* 'e' is for evil. */ 371 run_vcpus_while_disabling_dirty_logging = true; 372 break; 373 case 'g': 374 dirty_log_manual_caps = 0; 375 break; 376 case 'h': 377 help(argv[0]); 378 break; 379 case 'i': 380 p.iterations = atoi_positive("Number of iterations", optarg); 381 break; 382 case 'm': 383 guest_modes_cmdline(optarg); 384 break; 385 case 'n': 386 memstress_args.nested = true; 387 break; 388 case 'o': 389 p.partition_vcpu_memory_access = false; 390 break; 391 case 'p': 392 p.phys_offset = strtoull(optarg, NULL, 0); 393 break; 394 case 'r': 395 p.random_seed = atoi_positive("Random seed", optarg); 396 break; 397 case 's': 398 p.backing_src = parse_backing_src_type(optarg); 399 break; 400 case 'v': 401 nr_vcpus = atoi_positive("Number of vCPUs", optarg); 402 TEST_ASSERT(nr_vcpus <= max_vcpus, 403 "Invalid number of vcpus, must be between 1 and %d", max_vcpus); 404 break; 405 case 'w': 406 p.write_percent = atoi_non_negative("Write percentage", optarg); 407 TEST_ASSERT(p.write_percent <= 100, 408 "Write percentage must be between 0 and 100"); 409 break; 410 case 'x': 411 p.slots = atoi_positive("Number of slots", optarg); 412 break; 413 default: 414 help(argv[0]); 415 break; 416 } 417 } 418 419 if (pcpu_list) { 420 kvm_parse_vcpu_pinning(pcpu_list, memstress_args.vcpu_to_pcpu, 421 nr_vcpus); 422 memstress_args.pin_vcpus = true; 423 } 424 425 TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations"); 426 427 pr_info("Test iterations: %"PRIu64"\n", p.iterations); 428 429 for_each_guest_mode(run_test, &p); 430 431 return 0; 432 } 433