1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * KVM dirty page logging test 4 * 5 * Copyright (C) 2018, Red Hat, Inc. 6 */ 7 #include <stdio.h> 8 #include <stdlib.h> 9 #include <linux/bitmap.h> 10 #include <linux/bitops.h> 11 12 #include "test_util.h" 13 #include "kvm_util.h" 14 #include "processor.h" 15 #include "vmx.h" 16 17 /* The memory slot index to track dirty pages */ 18 #define TEST_MEM_SLOT_INDEX 1 19 #define TEST_MEM_PAGES 3 20 21 /* L1 guest test virtual memory offset */ 22 #define GUEST_TEST_MEM 0xc0000000 23 24 /* L2 guest test virtual memory offset */ 25 #define NESTED_TEST_MEM1 0xc0001000 26 #define NESTED_TEST_MEM2 0xc0002000 27 28 static void l2_guest_code(u64 *a, u64 *b) 29 { 30 READ_ONCE(*a); 31 WRITE_ONCE(*a, 1); 32 GUEST_SYNC(true); 33 GUEST_SYNC(false); 34 35 WRITE_ONCE(*b, 1); 36 GUEST_SYNC(true); 37 WRITE_ONCE(*b, 1); 38 GUEST_SYNC(true); 39 GUEST_SYNC(false); 40 41 /* Exit to L1 and never come back. */ 42 vmcall(); 43 } 44 45 static void l2_guest_code_ept_enabled(void) 46 { 47 l2_guest_code((u64 *)NESTED_TEST_MEM1, (u64 *)NESTED_TEST_MEM2); 48 } 49 50 static void l2_guest_code_ept_disabled(void) 51 { 52 /* Access the same L1 GPAs as l2_guest_code_ept_enabled() */ 53 l2_guest_code((u64 *)GUEST_TEST_MEM, (u64 *)GUEST_TEST_MEM); 54 } 55 56 void l1_guest_code(struct vmx_pages *vmx) 57 { 58 #define L2_GUEST_STACK_SIZE 64 59 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 60 void *l2_rip; 61 62 GUEST_ASSERT(vmx->vmcs_gpa); 63 GUEST_ASSERT(prepare_for_vmx_operation(vmx)); 64 GUEST_ASSERT(load_vmcs(vmx)); 65 66 if (vmx->eptp_gpa) 67 l2_rip = l2_guest_code_ept_enabled; 68 else 69 l2_rip = l2_guest_code_ept_disabled; 70 71 prepare_vmcs(vmx, l2_rip, &l2_guest_stack[L2_GUEST_STACK_SIZE]); 72 73 GUEST_SYNC(false); 74 GUEST_ASSERT(!vmlaunch()); 75 GUEST_SYNC(false); 76 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); 77 GUEST_DONE(); 78 } 79 80 static void test_vmx_dirty_log(bool enable_ept) 81 { 82 vm_vaddr_t vmx_pages_gva = 0; 83 struct vmx_pages *vmx; 84 unsigned long *bmap; 85 uint64_t *host_test_mem; 86 87 struct kvm_vcpu *vcpu; 88 struct kvm_vm *vm; 89 struct ucall uc; 90 bool done = false; 91 92 pr_info("Nested EPT: %s\n", enable_ept ? "enabled" : "disabled"); 93 94 /* Create VM */ 95 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); 96 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); 97 vcpu_args_set(vcpu, 1, vmx_pages_gva); 98 99 /* Add an extra memory slot for testing dirty logging */ 100 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 101 GUEST_TEST_MEM, 102 TEST_MEM_SLOT_INDEX, 103 TEST_MEM_PAGES, 104 KVM_MEM_LOG_DIRTY_PAGES); 105 106 /* 107 * Add an identity map for GVA range [0xc0000000, 0xc0002000). This 108 * affects both L1 and L2. However... 109 */ 110 virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES); 111 112 /* 113 * ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to 114 * 0xc0000000. 115 * 116 * Note that prepare_eptp should be called only L1's GPA map is done, 117 * meaning after the last call to virt_map. 118 * 119 * When EPT is disabled, the L2 guest code will still access the same L1 120 * GPAs as the EPT enabled case. 121 */ 122 if (enable_ept) { 123 prepare_eptp(vmx, vm, 0); 124 nested_map_memslot(vmx, vm, 0); 125 nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096); 126 nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096); 127 } 128 129 bmap = bitmap_zalloc(TEST_MEM_PAGES); 130 host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM); 131 132 while (!done) { 133 memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096); 134 vcpu_run(vcpu); 135 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); 136 137 switch (get_ucall(vcpu, &uc)) { 138 case UCALL_ABORT: 139 REPORT_GUEST_ASSERT(uc); 140 /* NOT REACHED */ 141 case UCALL_SYNC: 142 /* 143 * The nested guest wrote at offset 0x1000 in the memslot, but the 144 * dirty bitmap must be filled in according to L1 GPA, not L2. 145 */ 146 kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap); 147 if (uc.args[1]) { 148 TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean"); 149 TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest"); 150 } else { 151 TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty"); 152 TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest"); 153 } 154 155 TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty"); 156 TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest"); 157 TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty"); 158 TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest"); 159 break; 160 case UCALL_DONE: 161 done = true; 162 break; 163 default: 164 TEST_FAIL("Unknown ucall %lu", uc.cmd); 165 } 166 } 167 } 168 169 int main(int argc, char *argv[]) 170 { 171 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX)); 172 173 test_vmx_dirty_log(/*enable_ept=*/false); 174 175 if (kvm_cpu_has_ept()) 176 test_vmx_dirty_log(/*enable_ept=*/true); 177 178 return 0; 179 } 180