1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2021, Red Hat Inc. 4 * 5 * Generic tests for KVM CPUID set/get ioctls 6 */ 7 #include <asm/kvm_para.h> 8 #include <linux/kvm_para.h> 9 #include <stdint.h> 10 11 #include "test_util.h" 12 #include "kvm_util.h" 13 #include "processor.h" 14 15 /* CPUIDs known to differ */ 16 struct { 17 u32 function; 18 u32 index; 19 } mangled_cpuids[] = { 20 /* 21 * These entries depend on the vCPU's XCR0 register and IA32_XSS MSR, 22 * which are not controlled for by this test. 23 */ 24 {.function = 0xd, .index = 0}, 25 {.function = 0xd, .index = 1}, 26 }; 27 28 static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid) 29 { 30 int i; 31 u32 eax, ebx, ecx, edx; 32 33 for (i = 0; i < guest_cpuid->nent; i++) { 34 __cpuid(guest_cpuid->entries[i].function, 35 guest_cpuid->entries[i].index, 36 &eax, &ebx, &ecx, &edx); 37 38 GUEST_ASSERT_EQ(eax, guest_cpuid->entries[i].eax); 39 GUEST_ASSERT_EQ(ebx, guest_cpuid->entries[i].ebx); 40 GUEST_ASSERT_EQ(ecx, guest_cpuid->entries[i].ecx); 41 GUEST_ASSERT_EQ(edx, guest_cpuid->entries[i].edx); 42 } 43 44 } 45 46 static void guest_main(struct kvm_cpuid2 *guest_cpuid) 47 { 48 GUEST_SYNC(1); 49 50 test_guest_cpuids(guest_cpuid); 51 52 GUEST_SYNC(2); 53 54 GUEST_ASSERT_EQ(this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF), 0x40000001); 55 56 GUEST_DONE(); 57 } 58 59 static bool is_cpuid_mangled(const struct kvm_cpuid_entry2 *entrie) 60 { 61 int i; 62 63 for (i = 0; i < sizeof(mangled_cpuids); i++) { 64 if (mangled_cpuids[i].function == entrie->function && 65 mangled_cpuids[i].index == entrie->index) 66 return true; 67 } 68 69 return false; 70 } 71 72 static void compare_cpuids(const struct kvm_cpuid2 *cpuid1, 73 const struct kvm_cpuid2 *cpuid2) 74 { 75 const struct kvm_cpuid_entry2 *e1, *e2; 76 int i; 77 78 TEST_ASSERT(cpuid1->nent == cpuid2->nent, 79 "CPUID nent mismatch: %d vs. %d", cpuid1->nent, cpuid2->nent); 80 81 for (i = 0; i < cpuid1->nent; i++) { 82 e1 = &cpuid1->entries[i]; 83 e2 = &cpuid2->entries[i]; 84 85 TEST_ASSERT(e1->function == e2->function && 86 e1->index == e2->index && e1->flags == e2->flags, 87 "CPUID entries[%d] mismtach: 0x%x.%d.%x vs. 0x%x.%d.%x", 88 i, e1->function, e1->index, e1->flags, 89 e2->function, e2->index, e2->flags); 90 91 if (is_cpuid_mangled(e1)) 92 continue; 93 94 TEST_ASSERT(e1->eax == e2->eax && e1->ebx == e2->ebx && 95 e1->ecx == e2->ecx && e1->edx == e2->edx, 96 "CPUID 0x%x.%x differ: 0x%x:0x%x:0x%x:0x%x vs 0x%x:0x%x:0x%x:0x%x", 97 e1->function, e1->index, 98 e1->eax, e1->ebx, e1->ecx, e1->edx, 99 e2->eax, e2->ebx, e2->ecx, e2->edx); 100 } 101 } 102 103 static void run_vcpu(struct kvm_vcpu *vcpu, int stage) 104 { 105 struct ucall uc; 106 107 vcpu_run(vcpu); 108 109 switch (get_ucall(vcpu, &uc)) { 110 case UCALL_SYNC: 111 TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && 112 uc.args[1] == stage + 1, 113 "Stage %d: Unexpected register values vmexit, got %lx", 114 stage + 1, (ulong)uc.args[1]); 115 return; 116 case UCALL_DONE: 117 return; 118 case UCALL_ABORT: 119 REPORT_GUEST_ASSERT(uc); 120 default: 121 TEST_ASSERT(false, "Unexpected exit: %s", 122 exit_reason_str(vcpu->run->exit_reason)); 123 } 124 } 125 126 struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid) 127 { 128 int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]); 129 vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR); 130 struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva); 131 132 memcpy(guest_cpuids, cpuid, size); 133 134 *p_gva = gva; 135 return guest_cpuids; 136 } 137 138 static void set_cpuid_after_run(struct kvm_vcpu *vcpu) 139 { 140 struct kvm_cpuid_entry2 *ent; 141 int rc; 142 u32 eax, ebx, x; 143 144 /* Setting unmodified CPUID is allowed */ 145 rc = __vcpu_set_cpuid(vcpu); 146 TEST_ASSERT(!rc, "Setting unmodified CPUID after KVM_RUN failed: %d", rc); 147 148 /* Changing CPU features is forbidden */ 149 ent = vcpu_get_cpuid_entry(vcpu, 0x7); 150 ebx = ent->ebx; 151 ent->ebx--; 152 rc = __vcpu_set_cpuid(vcpu); 153 TEST_ASSERT(rc, "Changing CPU features should fail"); 154 ent->ebx = ebx; 155 156 /* Changing MAXPHYADDR is forbidden */ 157 ent = vcpu_get_cpuid_entry(vcpu, 0x80000008); 158 eax = ent->eax; 159 x = eax & 0xff; 160 ent->eax = (eax & ~0xffu) | (x - 1); 161 rc = __vcpu_set_cpuid(vcpu); 162 TEST_ASSERT(rc, "Changing MAXPHYADDR should fail"); 163 ent->eax = eax; 164 } 165 166 static void test_get_cpuid2(struct kvm_vcpu *vcpu) 167 { 168 struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent + 1); 169 int i, r; 170 171 vcpu_ioctl(vcpu, KVM_GET_CPUID2, cpuid); 172 TEST_ASSERT(cpuid->nent == vcpu->cpuid->nent, 173 "KVM didn't update nent on success, wanted %u, got %u", 174 vcpu->cpuid->nent, cpuid->nent); 175 176 for (i = 0; i < vcpu->cpuid->nent; i++) { 177 cpuid->nent = i; 178 r = __vcpu_ioctl(vcpu, KVM_GET_CPUID2, cpuid); 179 TEST_ASSERT(r && errno == E2BIG, KVM_IOCTL_ERROR(KVM_GET_CPUID2, r)); 180 TEST_ASSERT(cpuid->nent == i, "KVM modified nent on failure"); 181 } 182 free(cpuid); 183 } 184 185 int main(void) 186 { 187 struct kvm_vcpu *vcpu; 188 vm_vaddr_t cpuid_gva; 189 struct kvm_vm *vm; 190 int stage; 191 192 vm = vm_create_with_one_vcpu(&vcpu, guest_main); 193 194 compare_cpuids(kvm_get_supported_cpuid(), vcpu->cpuid); 195 196 vcpu_alloc_cpuid(vm, &cpuid_gva, vcpu->cpuid); 197 198 vcpu_args_set(vcpu, 1, cpuid_gva); 199 200 for (stage = 0; stage < 3; stage++) 201 run_vcpu(vcpu, stage); 202 203 set_cpuid_after_run(vcpu); 204 205 test_get_cpuid2(vcpu); 206 207 kvm_vm_free(vm); 208 } 209