1 // SPDX-License-Identifier: GPL-2.0 2 #include <fcntl.h> 3 #include <pthread.h> 4 #include <sched.h> 5 #include <semaphore.h> 6 #include <signal.h> 7 #include <stdio.h> 8 #include <stdlib.h> 9 #include <string.h> 10 #include <sys/ioctl.h> 11 #include <sys/mman.h> 12 13 #include <linux/compiler.h> 14 15 #include <test_util.h> 16 #include <kvm_util.h> 17 #include <processor.h> 18 19 /* 20 * s390x needs at least 1MB alignment, and the x86_64 MOVE/DELETE tests need a 21 * 2MB sized and aligned region so that the initial region corresponds to 22 * exactly one large page. 23 */ 24 #define MEM_REGION_SIZE 0x200000 25 26 #ifdef __x86_64__ 27 /* 28 * Somewhat arbitrary location and slot, intended to not overlap anything. 29 */ 30 #define MEM_REGION_GPA 0xc0000000 31 #define MEM_REGION_SLOT 10 32 33 static const uint64_t MMIO_VAL = 0xbeefull; 34 35 extern const uint64_t final_rip_start; 36 extern const uint64_t final_rip_end; 37 38 static sem_t vcpu_ready; 39 40 static inline uint64_t guest_spin_on_val(uint64_t spin_val) 41 { 42 uint64_t val; 43 44 do { 45 val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA)); 46 } while (val == spin_val); 47 48 GUEST_SYNC(0); 49 return val; 50 } 51 52 static void *vcpu_worker(void *data) 53 { 54 struct kvm_vcpu *vcpu = data; 55 struct kvm_run *run = vcpu->run; 56 struct ucall uc; 57 uint64_t cmd; 58 59 /* 60 * Loop until the guest is done. Re-enter the guest on all MMIO exits, 61 * which will occur if the guest attempts to access a memslot after it 62 * has been deleted or while it is being moved . 63 */ 64 while (1) { 65 vcpu_run(vcpu); 66 67 if (run->exit_reason == KVM_EXIT_IO) { 68 cmd = get_ucall(vcpu, &uc); 69 if (cmd != UCALL_SYNC) 70 break; 71 72 sem_post(&vcpu_ready); 73 continue; 74 } 75 76 if (run->exit_reason != KVM_EXIT_MMIO) 77 break; 78 79 TEST_ASSERT(!run->mmio.is_write, "Unexpected exit mmio write"); 80 TEST_ASSERT(run->mmio.len == 8, 81 "Unexpected exit mmio size = %u", run->mmio.len); 82 83 TEST_ASSERT(run->mmio.phys_addr == MEM_REGION_GPA, 84 "Unexpected exit mmio address = 0x%llx", 85 run->mmio.phys_addr); 86 memcpy(run->mmio.data, &MMIO_VAL, 8); 87 } 88 89 if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT) 90 REPORT_GUEST_ASSERT(uc); 91 92 return NULL; 93 } 94 95 static void wait_for_vcpu(void) 96 { 97 struct timespec ts; 98 99 TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts), 100 "clock_gettime() failed: %d", errno); 101 102 ts.tv_sec += 2; 103 TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts), 104 "sem_timedwait() failed: %d", errno); 105 106 /* Wait for the vCPU thread to reenter the guest. */ 107 usleep(100000); 108 } 109 110 static struct kvm_vm *spawn_vm(struct kvm_vcpu **vcpu, pthread_t *vcpu_thread, 111 void *guest_code) 112 { 113 struct kvm_vm *vm; 114 uint64_t *hva; 115 uint64_t gpa; 116 117 vm = vm_create_with_one_vcpu(vcpu, guest_code); 118 119 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP, 120 MEM_REGION_GPA, MEM_REGION_SLOT, 121 MEM_REGION_SIZE / getpagesize(), 0); 122 123 /* 124 * Allocate and map two pages so that the GPA accessed by guest_code() 125 * stays valid across the memslot move. 126 */ 127 gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT); 128 TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n"); 129 130 virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2); 131 132 /* Ditto for the host mapping so that both pages can be zeroed. */ 133 hva = addr_gpa2hva(vm, MEM_REGION_GPA); 134 memset(hva, 0, 2 * 4096); 135 136 pthread_create(vcpu_thread, NULL, vcpu_worker, *vcpu); 137 138 /* Ensure the guest thread is spun up. */ 139 wait_for_vcpu(); 140 141 return vm; 142 } 143 144 145 static void guest_code_move_memory_region(void) 146 { 147 uint64_t val; 148 149 GUEST_SYNC(0); 150 151 /* 152 * Spin until the memory region starts getting moved to a 153 * misaligned address. 154 * Every region move may or may not trigger MMIO, as the 155 * window where the memslot is invalid is usually quite small. 156 */ 157 val = guest_spin_on_val(0); 158 __GUEST_ASSERT(val == 1 || val == MMIO_VAL, 159 "Expected '1' or MMIO ('%lx'), got '%lx'", MMIO_VAL, val); 160 161 /* Spin until the misaligning memory region move completes. */ 162 val = guest_spin_on_val(MMIO_VAL); 163 __GUEST_ASSERT(val == 1 || val == 0, 164 "Expected '0' or '1' (no MMIO), got '%lx'", val); 165 166 /* Spin until the memory region starts to get re-aligned. */ 167 val = guest_spin_on_val(0); 168 __GUEST_ASSERT(val == 1 || val == MMIO_VAL, 169 "Expected '1' or MMIO ('%lx'), got '%lx'", MMIO_VAL, val); 170 171 /* Spin until the re-aligning memory region move completes. */ 172 val = guest_spin_on_val(MMIO_VAL); 173 GUEST_ASSERT_EQ(val, 1); 174 175 GUEST_DONE(); 176 } 177 178 static void test_move_memory_region(void) 179 { 180 pthread_t vcpu_thread; 181 struct kvm_vcpu *vcpu; 182 struct kvm_vm *vm; 183 uint64_t *hva; 184 185 vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region); 186 187 hva = addr_gpa2hva(vm, MEM_REGION_GPA); 188 189 /* 190 * Shift the region's base GPA. The guest should not see "2" as the 191 * hva->gpa translation is misaligned, i.e. the guest is accessing a 192 * different host pfn. 193 */ 194 vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA - 4096); 195 WRITE_ONCE(*hva, 2); 196 197 /* 198 * The guest _might_ see an invalid memslot and trigger MMIO, but it's 199 * a tiny window. Spin and defer the sync until the memslot is 200 * restored and guest behavior is once again deterministic. 201 */ 202 usleep(100000); 203 204 /* 205 * Note, value in memory needs to be changed *before* restoring the 206 * memslot, else the guest could race the update and see "2". 207 */ 208 WRITE_ONCE(*hva, 1); 209 210 /* Restore the original base, the guest should see "1". */ 211 vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA); 212 wait_for_vcpu(); 213 /* Defered sync from when the memslot was misaligned (above). */ 214 wait_for_vcpu(); 215 216 pthread_join(vcpu_thread, NULL); 217 218 kvm_vm_free(vm); 219 } 220 221 static void guest_code_delete_memory_region(void) 222 { 223 struct desc_ptr idt; 224 uint64_t val; 225 226 /* 227 * Clobber the IDT so that a #PF due to the memory region being deleted 228 * escalates to triple-fault shutdown. Because the memory region is 229 * deleted, there will be no valid mappings. As a result, KVM will 230 * repeatedly intercepts the state-2 page fault that occurs when trying 231 * to vector the guest's #PF. I.e. trying to actually handle the #PF 232 * in the guest will never succeed, and so isn't an option. 233 */ 234 memset(&idt, 0, sizeof(idt)); 235 __asm__ __volatile__("lidt %0" :: "m"(idt)); 236 237 GUEST_SYNC(0); 238 239 /* Spin until the memory region is deleted. */ 240 val = guest_spin_on_val(0); 241 GUEST_ASSERT_EQ(val, MMIO_VAL); 242 243 /* Spin until the memory region is recreated. */ 244 val = guest_spin_on_val(MMIO_VAL); 245 GUEST_ASSERT_EQ(val, 0); 246 247 /* Spin until the memory region is deleted. */ 248 val = guest_spin_on_val(0); 249 GUEST_ASSERT_EQ(val, MMIO_VAL); 250 251 asm("1:\n\t" 252 ".pushsection .rodata\n\t" 253 ".global final_rip_start\n\t" 254 "final_rip_start: .quad 1b\n\t" 255 ".popsection"); 256 257 /* Spin indefinitely (until the code memslot is deleted). */ 258 guest_spin_on_val(MMIO_VAL); 259 260 asm("1:\n\t" 261 ".pushsection .rodata\n\t" 262 ".global final_rip_end\n\t" 263 "final_rip_end: .quad 1b\n\t" 264 ".popsection"); 265 266 GUEST_ASSERT(0); 267 } 268 269 static void test_delete_memory_region(void) 270 { 271 pthread_t vcpu_thread; 272 struct kvm_vcpu *vcpu; 273 struct kvm_regs regs; 274 struct kvm_run *run; 275 struct kvm_vm *vm; 276 277 vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_delete_memory_region); 278 279 /* Delete the memory region, the guest should not die. */ 280 vm_mem_region_delete(vm, MEM_REGION_SLOT); 281 wait_for_vcpu(); 282 283 /* Recreate the memory region. The guest should see "0". */ 284 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP, 285 MEM_REGION_GPA, MEM_REGION_SLOT, 286 MEM_REGION_SIZE / getpagesize(), 0); 287 wait_for_vcpu(); 288 289 /* Delete the region again so that there's only one memslot left. */ 290 vm_mem_region_delete(vm, MEM_REGION_SLOT); 291 wait_for_vcpu(); 292 293 /* 294 * Delete the primary memslot. This should cause an emulation error or 295 * shutdown due to the page tables getting nuked. 296 */ 297 vm_mem_region_delete(vm, 0); 298 299 pthread_join(vcpu_thread, NULL); 300 301 run = vcpu->run; 302 303 TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN || 304 run->exit_reason == KVM_EXIT_INTERNAL_ERROR, 305 "Unexpected exit reason = %d", run->exit_reason); 306 307 vcpu_regs_get(vcpu, ®s); 308 309 /* 310 * On AMD, after KVM_EXIT_SHUTDOWN the VMCB has been reinitialized already, 311 * so the instruction pointer would point to the reset vector. 312 */ 313 if (run->exit_reason == KVM_EXIT_INTERNAL_ERROR) 314 TEST_ASSERT(regs.rip >= final_rip_start && 315 regs.rip < final_rip_end, 316 "Bad rip, expected 0x%lx - 0x%lx, got 0x%llx", 317 final_rip_start, final_rip_end, regs.rip); 318 319 kvm_vm_free(vm); 320 } 321 322 static void test_zero_memory_regions(void) 323 { 324 struct kvm_vcpu *vcpu; 325 struct kvm_vm *vm; 326 327 pr_info("Testing KVM_RUN with zero added memory regions\n"); 328 329 vm = vm_create_barebones(); 330 vcpu = __vm_vcpu_add(vm, 0); 331 332 vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul); 333 vcpu_run(vcpu); 334 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR); 335 336 kvm_vm_free(vm); 337 } 338 #endif /* __x86_64__ */ 339 340 static void test_invalid_memory_region_flags(void) 341 { 342 uint32_t supported_flags = KVM_MEM_LOG_DIRTY_PAGES; 343 const uint32_t v2_only_flags = KVM_MEM_GUEST_MEMFD; 344 struct kvm_vm *vm; 345 int r, i; 346 347 #if defined __aarch64__ || defined __riscv || defined __x86_64__ 348 supported_flags |= KVM_MEM_READONLY; 349 #endif 350 351 #ifdef __x86_64__ 352 if (kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM)) 353 vm = vm_create_barebones_type(KVM_X86_SW_PROTECTED_VM); 354 else 355 #endif 356 vm = vm_create_barebones(); 357 358 if (kvm_check_cap(KVM_CAP_MEMORY_ATTRIBUTES) & KVM_MEMORY_ATTRIBUTE_PRIVATE) 359 supported_flags |= KVM_MEM_GUEST_MEMFD; 360 361 for (i = 0; i < 32; i++) { 362 if ((supported_flags & BIT(i)) && !(v2_only_flags & BIT(i))) 363 continue; 364 365 r = __vm_set_user_memory_region(vm, 0, BIT(i), 366 0, MEM_REGION_SIZE, NULL); 367 368 TEST_ASSERT(r && errno == EINVAL, 369 "KVM_SET_USER_MEMORY_REGION should have failed on v2 only flag 0x%lx", BIT(i)); 370 371 if (supported_flags & BIT(i)) 372 continue; 373 374 r = __vm_set_user_memory_region2(vm, 0, BIT(i), 375 0, MEM_REGION_SIZE, NULL, 0, 0); 376 TEST_ASSERT(r && errno == EINVAL, 377 "KVM_SET_USER_MEMORY_REGION2 should have failed on unsupported flag 0x%lx", BIT(i)); 378 } 379 380 if (supported_flags & KVM_MEM_GUEST_MEMFD) { 381 int guest_memfd = vm_create_guest_memfd(vm, MEM_REGION_SIZE, 0); 382 383 r = __vm_set_user_memory_region2(vm, 0, 384 KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_GUEST_MEMFD, 385 0, MEM_REGION_SIZE, NULL, guest_memfd, 0); 386 TEST_ASSERT(r && errno == EINVAL, 387 "KVM_SET_USER_MEMORY_REGION2 should have failed, dirty logging private memory is unsupported"); 388 389 r = __vm_set_user_memory_region2(vm, 0, 390 KVM_MEM_READONLY | KVM_MEM_GUEST_MEMFD, 391 0, MEM_REGION_SIZE, NULL, guest_memfd, 0); 392 TEST_ASSERT(r && errno == EINVAL, 393 "KVM_SET_USER_MEMORY_REGION2 should have failed, read-only GUEST_MEMFD memslots are unsupported"); 394 395 close(guest_memfd); 396 } 397 } 398 399 /* 400 * Test it can be added memory slots up to KVM_CAP_NR_MEMSLOTS, then any 401 * tentative to add further slots should fail. 402 */ 403 static void test_add_max_memory_regions(void) 404 { 405 int ret; 406 struct kvm_vm *vm; 407 uint32_t max_mem_slots; 408 uint32_t slot; 409 void *mem, *mem_aligned, *mem_extra; 410 size_t alignment; 411 412 #ifdef __s390x__ 413 /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */ 414 alignment = 0x100000; 415 #else 416 alignment = 1; 417 #endif 418 419 max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS); 420 TEST_ASSERT(max_mem_slots > 0, 421 "KVM_CAP_NR_MEMSLOTS should be greater than 0"); 422 pr_info("Allowed number of memory slots: %i\n", max_mem_slots); 423 424 vm = vm_create_barebones(); 425 426 /* Check it can be added memory slots up to the maximum allowed */ 427 pr_info("Adding slots 0..%i, each memory region with %dK size\n", 428 (max_mem_slots - 1), MEM_REGION_SIZE >> 10); 429 430 mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment, 431 PROT_READ | PROT_WRITE, 432 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0); 433 TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host"); 434 mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1)); 435 436 for (slot = 0; slot < max_mem_slots; slot++) 437 vm_set_user_memory_region(vm, slot, 0, 438 ((uint64_t)slot * MEM_REGION_SIZE), 439 MEM_REGION_SIZE, 440 mem_aligned + (uint64_t)slot * MEM_REGION_SIZE); 441 442 /* Check it cannot be added memory slots beyond the limit */ 443 mem_extra = mmap(NULL, MEM_REGION_SIZE, PROT_READ | PROT_WRITE, 444 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 445 TEST_ASSERT(mem_extra != MAP_FAILED, "Failed to mmap() host"); 446 447 ret = __vm_set_user_memory_region(vm, max_mem_slots, 0, 448 (uint64_t)max_mem_slots * MEM_REGION_SIZE, 449 MEM_REGION_SIZE, mem_extra); 450 TEST_ASSERT(ret == -1 && errno == EINVAL, 451 "Adding one more memory slot should fail with EINVAL"); 452 453 munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment); 454 munmap(mem_extra, MEM_REGION_SIZE); 455 kvm_vm_free(vm); 456 } 457 458 459 #ifdef __x86_64__ 460 static void test_invalid_guest_memfd(struct kvm_vm *vm, int memfd, 461 size_t offset, const char *msg) 462 { 463 int r = __vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD, 464 MEM_REGION_GPA, MEM_REGION_SIZE, 465 0, memfd, offset); 466 TEST_ASSERT(r == -1 && errno == EINVAL, "%s", msg); 467 } 468 469 static void test_add_private_memory_region(void) 470 { 471 struct kvm_vm *vm, *vm2; 472 int memfd, i; 473 474 pr_info("Testing ADD of KVM_MEM_GUEST_MEMFD memory regions\n"); 475 476 vm = vm_create_barebones_type(KVM_X86_SW_PROTECTED_VM); 477 478 test_invalid_guest_memfd(vm, vm->kvm_fd, 0, "KVM fd should fail"); 479 test_invalid_guest_memfd(vm, vm->fd, 0, "VM's fd should fail"); 480 481 memfd = kvm_memfd_alloc(MEM_REGION_SIZE, false); 482 test_invalid_guest_memfd(vm, memfd, 0, "Regular memfd() should fail"); 483 close(memfd); 484 485 vm2 = vm_create_barebones_type(KVM_X86_SW_PROTECTED_VM); 486 memfd = vm_create_guest_memfd(vm2, MEM_REGION_SIZE, 0); 487 test_invalid_guest_memfd(vm, memfd, 0, "Other VM's guest_memfd() should fail"); 488 489 vm_set_user_memory_region2(vm2, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD, 490 MEM_REGION_GPA, MEM_REGION_SIZE, 0, memfd, 0); 491 close(memfd); 492 kvm_vm_free(vm2); 493 494 memfd = vm_create_guest_memfd(vm, MEM_REGION_SIZE, 0); 495 for (i = 1; i < PAGE_SIZE; i++) 496 test_invalid_guest_memfd(vm, memfd, i, "Unaligned offset should fail"); 497 498 vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD, 499 MEM_REGION_GPA, MEM_REGION_SIZE, 0, memfd, 0); 500 close(memfd); 501 502 kvm_vm_free(vm); 503 } 504 505 static void test_add_overlapping_private_memory_regions(void) 506 { 507 struct kvm_vm *vm; 508 int memfd; 509 int r; 510 511 pr_info("Testing ADD of overlapping KVM_MEM_GUEST_MEMFD memory regions\n"); 512 513 vm = vm_create_barebones_type(KVM_X86_SW_PROTECTED_VM); 514 515 memfd = vm_create_guest_memfd(vm, MEM_REGION_SIZE * 4, 0); 516 517 vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD, 518 MEM_REGION_GPA, MEM_REGION_SIZE * 2, 0, memfd, 0); 519 520 vm_set_user_memory_region2(vm, MEM_REGION_SLOT + 1, KVM_MEM_GUEST_MEMFD, 521 MEM_REGION_GPA * 2, MEM_REGION_SIZE * 2, 522 0, memfd, MEM_REGION_SIZE * 2); 523 524 /* 525 * Delete the first memslot, and then attempt to recreate it except 526 * with a "bad" offset that results in overlap in the guest_memfd(). 527 */ 528 vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD, 529 MEM_REGION_GPA, 0, NULL, -1, 0); 530 531 /* Overlap the front half of the other slot. */ 532 r = __vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD, 533 MEM_REGION_GPA * 2 - MEM_REGION_SIZE, 534 MEM_REGION_SIZE * 2, 535 0, memfd, 0); 536 TEST_ASSERT(r == -1 && errno == EEXIST, "%s", 537 "Overlapping guest_memfd() bindings should fail with EEXIST"); 538 539 /* And now the back half of the other slot. */ 540 r = __vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD, 541 MEM_REGION_GPA * 2 + MEM_REGION_SIZE, 542 MEM_REGION_SIZE * 2, 543 0, memfd, 0); 544 TEST_ASSERT(r == -1 && errno == EEXIST, "%s", 545 "Overlapping guest_memfd() bindings should fail with EEXIST"); 546 547 close(memfd); 548 kvm_vm_free(vm); 549 } 550 #endif 551 552 int main(int argc, char *argv[]) 553 { 554 #ifdef __x86_64__ 555 int i, loops; 556 557 /* 558 * FIXME: the zero-memslot test fails on aarch64 and s390x because 559 * KVM_RUN fails with ENOEXEC or EFAULT. 560 */ 561 test_zero_memory_regions(); 562 #endif 563 564 test_invalid_memory_region_flags(); 565 566 test_add_max_memory_regions(); 567 568 #ifdef __x86_64__ 569 if (kvm_has_cap(KVM_CAP_GUEST_MEMFD) && 570 (kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM))) { 571 test_add_private_memory_region(); 572 test_add_overlapping_private_memory_regions(); 573 } else { 574 pr_info("Skipping tests for KVM_MEM_GUEST_MEMFD memory regions\n"); 575 } 576 577 if (argc > 1) 578 loops = atoi_positive("Number of iterations", argv[1]); 579 else 580 loops = 10; 581 582 pr_info("Testing MOVE of in-use region, %d loops\n", loops); 583 for (i = 0; i < loops; i++) 584 test_move_memory_region(); 585 586 pr_info("Testing DELETE of in-use region, %d loops\n", loops); 587 for (i = 0; i < loops; i++) 588 test_delete_memory_region(); 589 #endif 590 591 return 0; 592 } 593