1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VMX-preemption timer test 4 * 5 * Copyright (C) 2020, Google, LLC. 6 * 7 * Test to ensure the VM-Enter after migration doesn't 8 * incorrectly restarts the timer with the full timer 9 * value instead of partially decayed timer value 10 * 11 */ 12 #include <fcntl.h> 13 #include <stdio.h> 14 #include <stdlib.h> 15 #include <string.h> 16 #include <sys/ioctl.h> 17 18 #include "test_util.h" 19 20 #include "kvm_util.h" 21 #include "processor.h" 22 #include "vmx.h" 23 24 #define PREEMPTION_TIMER_VALUE 100000000ull 25 #define PREEMPTION_TIMER_VALUE_THRESHOLD1 80000000ull 26 27 u32 vmx_pt_rate; 28 bool l2_save_restore_done; 29 static u64 l2_vmx_pt_start; 30 volatile u64 l2_vmx_pt_finish; 31 32 union vmx_basic basic; 33 union vmx_ctrl_msr ctrl_pin_rev; 34 union vmx_ctrl_msr ctrl_exit_rev; 35 36 void l2_guest_code(void) 37 { 38 u64 vmx_pt_delta; 39 40 vmcall(); 41 l2_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate; 42 43 /* 44 * Wait until the 1st threshold has passed 45 */ 46 do { 47 l2_vmx_pt_finish = rdtsc(); 48 vmx_pt_delta = (l2_vmx_pt_finish - l2_vmx_pt_start) >> 49 vmx_pt_rate; 50 } while (vmx_pt_delta < PREEMPTION_TIMER_VALUE_THRESHOLD1); 51 52 /* 53 * Force L2 through Save and Restore cycle 54 */ 55 GUEST_SYNC(1); 56 57 l2_save_restore_done = 1; 58 59 /* 60 * Now wait for the preemption timer to fire and 61 * exit to L1 62 */ 63 while ((l2_vmx_pt_finish = rdtsc())) 64 ; 65 } 66 67 void l1_guest_code(struct vmx_pages *vmx_pages) 68 { 69 #define L2_GUEST_STACK_SIZE 64 70 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 71 u64 l1_vmx_pt_start; 72 u64 l1_vmx_pt_finish; 73 u64 l1_tsc_deadline, l2_tsc_deadline; 74 75 GUEST_ASSERT(vmx_pages->vmcs_gpa); 76 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 77 GUEST_ASSERT(load_vmcs(vmx_pages)); 78 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); 79 80 prepare_vmcs(vmx_pages, l2_guest_code, 81 &l2_guest_stack[L2_GUEST_STACK_SIZE]); 82 83 /* 84 * Check for Preemption timer support 85 */ 86 basic.val = rdmsr(MSR_IA32_VMX_BASIC); 87 ctrl_pin_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_PINBASED_CTLS 88 : MSR_IA32_VMX_PINBASED_CTLS); 89 ctrl_exit_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_EXIT_CTLS 90 : MSR_IA32_VMX_EXIT_CTLS); 91 92 if (!(ctrl_pin_rev.clr & PIN_BASED_VMX_PREEMPTION_TIMER) || 93 !(ctrl_exit_rev.clr & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)) 94 return; 95 96 GUEST_ASSERT(!vmlaunch()); 97 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); 98 vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + vmreadz(VM_EXIT_INSTRUCTION_LEN)); 99 100 /* 101 * Turn on PIN control and resume the guest 102 */ 103 GUEST_ASSERT(!vmwrite(PIN_BASED_VM_EXEC_CONTROL, 104 vmreadz(PIN_BASED_VM_EXEC_CONTROL) | 105 PIN_BASED_VMX_PREEMPTION_TIMER)); 106 107 GUEST_ASSERT(!vmwrite(VMX_PREEMPTION_TIMER_VALUE, 108 PREEMPTION_TIMER_VALUE)); 109 110 vmx_pt_rate = rdmsr(MSR_IA32_VMX_MISC) & 0x1F; 111 112 l2_save_restore_done = 0; 113 114 l1_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate; 115 116 GUEST_ASSERT(!vmresume()); 117 118 l1_vmx_pt_finish = rdtsc(); 119 120 /* 121 * Ensure exit from L2 happens after L2 goes through 122 * save and restore 123 */ 124 GUEST_ASSERT(l2_save_restore_done); 125 126 /* 127 * Ensure the exit from L2 is due to preemption timer expiry 128 */ 129 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_PREEMPTION_TIMER); 130 131 l1_tsc_deadline = l1_vmx_pt_start + 132 (PREEMPTION_TIMER_VALUE << vmx_pt_rate); 133 134 l2_tsc_deadline = l2_vmx_pt_start + 135 (PREEMPTION_TIMER_VALUE << vmx_pt_rate); 136 137 /* 138 * Sync with the host and pass the l1|l2 pt_expiry_finish times and 139 * tsc deadlines so that host can verify they are as expected 140 */ 141 GUEST_SYNC_ARGS(2, l1_vmx_pt_finish, l1_tsc_deadline, 142 l2_vmx_pt_finish, l2_tsc_deadline); 143 } 144 145 void guest_code(struct vmx_pages *vmx_pages) 146 { 147 if (vmx_pages) 148 l1_guest_code(vmx_pages); 149 150 GUEST_DONE(); 151 } 152 153 int main(int argc, char *argv[]) 154 { 155 vm_vaddr_t vmx_pages_gva = 0; 156 157 struct kvm_regs regs1, regs2; 158 struct kvm_vm *vm; 159 struct kvm_vcpu *vcpu; 160 struct kvm_x86_state *state; 161 struct ucall uc; 162 int stage; 163 164 /* 165 * AMD currently does not implement any VMX features, so for now we 166 * just early out. 167 */ 168 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX)); 169 170 TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE)); 171 172 /* Create VM */ 173 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 174 175 vcpu_regs_get(vcpu, ®s1); 176 177 vcpu_alloc_vmx(vm, &vmx_pages_gva); 178 vcpu_args_set(vcpu, 1, vmx_pages_gva); 179 180 for (stage = 1;; stage++) { 181 vcpu_run(vcpu); 182 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); 183 184 switch (get_ucall(vcpu, &uc)) { 185 case UCALL_ABORT: 186 REPORT_GUEST_ASSERT(uc); 187 /* NOT REACHED */ 188 case UCALL_SYNC: 189 break; 190 case UCALL_DONE: 191 goto done; 192 default: 193 TEST_FAIL("Unknown ucall %lu", uc.cmd); 194 } 195 196 /* UCALL_SYNC is handled here. */ 197 TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && 198 uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx", 199 stage, (ulong)uc.args[1]); 200 /* 201 * If this stage 2 then we should verify the vmx pt expiry 202 * is as expected. 203 * From L1's perspective verify Preemption timer hasn't 204 * expired too early. 205 * From L2's perspective verify Preemption timer hasn't 206 * expired too late. 207 */ 208 if (stage == 2) { 209 210 pr_info("Stage %d: L1 PT expiry TSC (%lu) , L1 TSC deadline (%lu)\n", 211 stage, uc.args[2], uc.args[3]); 212 213 pr_info("Stage %d: L2 PT expiry TSC (%lu) , L2 TSC deadline (%lu)\n", 214 stage, uc.args[4], uc.args[5]); 215 216 TEST_ASSERT(uc.args[2] >= uc.args[3], 217 "Stage %d: L1 PT expiry TSC (%lu) < L1 TSC deadline (%lu)", 218 stage, uc.args[2], uc.args[3]); 219 220 TEST_ASSERT(uc.args[4] < uc.args[5], 221 "Stage %d: L2 PT expiry TSC (%lu) > L2 TSC deadline (%lu)", 222 stage, uc.args[4], uc.args[5]); 223 } 224 225 state = vcpu_save_state(vcpu); 226 memset(®s1, 0, sizeof(regs1)); 227 vcpu_regs_get(vcpu, ®s1); 228 229 kvm_vm_release(vm); 230 231 /* Restore state in a new VM. */ 232 vcpu = vm_recreate_with_one_vcpu(vm); 233 vcpu_load_state(vcpu, state); 234 kvm_x86_state_cleanup(state); 235 236 memset(®s2, 0, sizeof(regs2)); 237 vcpu_regs_get(vcpu, ®s2); 238 TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)), 239 "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx", 240 (ulong) regs2.rdi, (ulong) regs2.rsi); 241 } 242 243 done: 244 kvm_vm_free(vm); 245 } 246