1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2018, Google LLC. 4 */ 5 6 #include "linux/bitmap.h" 7 #include "test_util.h" 8 #include "kvm_util.h" 9 #include "pmu.h" 10 #include "processor.h" 11 #include "svm_util.h" 12 #include "sev.h" 13 #include "vmx.h" 14 15 #ifndef NUM_INTERRUPTS 16 #define NUM_INTERRUPTS 256 17 #endif 18 19 #define KERNEL_CS 0x8 20 #define KERNEL_DS 0x10 21 #define KERNEL_TSS 0x18 22 23 vm_vaddr_t exception_handlers; 24 bool host_cpu_is_amd; 25 bool host_cpu_is_intel; 26 bool is_forced_emulation_enabled; 27 uint64_t guest_tsc_khz; 28 29 const char *ex_str(int vector) 30 { 31 switch (vector) { 32 #define VEC_STR(v) case v##_VECTOR: return "#" #v 33 case DE_VECTOR: return "no exception"; 34 case KVM_MAGIC_DE_VECTOR: return "#DE"; 35 VEC_STR(DB); 36 VEC_STR(NMI); 37 VEC_STR(BP); 38 VEC_STR(OF); 39 VEC_STR(BR); 40 VEC_STR(UD); 41 VEC_STR(NM); 42 VEC_STR(DF); 43 VEC_STR(TS); 44 VEC_STR(NP); 45 VEC_STR(SS); 46 VEC_STR(GP); 47 VEC_STR(PF); 48 VEC_STR(MF); 49 VEC_STR(AC); 50 VEC_STR(MC); 51 VEC_STR(XM); 52 VEC_STR(VE); 53 VEC_STR(CP); 54 VEC_STR(HV); 55 VEC_STR(VC); 56 VEC_STR(SX); 57 default: return "#??"; 58 #undef VEC_STR 59 } 60 } 61 62 static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent) 63 { 64 fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx " 65 "rcx: 0x%.16llx rdx: 0x%.16llx\n", 66 indent, "", 67 regs->rax, regs->rbx, regs->rcx, regs->rdx); 68 fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx " 69 "rsp: 0x%.16llx rbp: 0x%.16llx\n", 70 indent, "", 71 regs->rsi, regs->rdi, regs->rsp, regs->rbp); 72 fprintf(stream, "%*sr8: 0x%.16llx r9: 0x%.16llx " 73 "r10: 0x%.16llx r11: 0x%.16llx\n", 74 indent, "", 75 regs->r8, regs->r9, regs->r10, regs->r11); 76 fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx " 77 "r14: 0x%.16llx r15: 0x%.16llx\n", 78 indent, "", 79 regs->r12, regs->r13, regs->r14, regs->r15); 80 fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n", 81 indent, "", 82 regs->rip, regs->rflags); 83 } 84 85 static void segment_dump(FILE *stream, struct kvm_segment *segment, 86 uint8_t indent) 87 { 88 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x " 89 "selector: 0x%.4x type: 0x%.2x\n", 90 indent, "", segment->base, segment->limit, 91 segment->selector, segment->type); 92 fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x " 93 "db: 0x%.2x s: 0x%.2x l: 0x%.2x\n", 94 indent, "", segment->present, segment->dpl, 95 segment->db, segment->s, segment->l); 96 fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x " 97 "unusable: 0x%.2x padding: 0x%.2x\n", 98 indent, "", segment->g, segment->avl, 99 segment->unusable, segment->padding); 100 } 101 102 static void dtable_dump(FILE *stream, struct kvm_dtable *dtable, 103 uint8_t indent) 104 { 105 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x " 106 "padding: 0x%.4x 0x%.4x 0x%.4x\n", 107 indent, "", dtable->base, dtable->limit, 108 dtable->padding[0], dtable->padding[1], dtable->padding[2]); 109 } 110 111 static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent) 112 { 113 unsigned int i; 114 115 fprintf(stream, "%*scs:\n", indent, ""); 116 segment_dump(stream, &sregs->cs, indent + 2); 117 fprintf(stream, "%*sds:\n", indent, ""); 118 segment_dump(stream, &sregs->ds, indent + 2); 119 fprintf(stream, "%*ses:\n", indent, ""); 120 segment_dump(stream, &sregs->es, indent + 2); 121 fprintf(stream, "%*sfs:\n", indent, ""); 122 segment_dump(stream, &sregs->fs, indent + 2); 123 fprintf(stream, "%*sgs:\n", indent, ""); 124 segment_dump(stream, &sregs->gs, indent + 2); 125 fprintf(stream, "%*sss:\n", indent, ""); 126 segment_dump(stream, &sregs->ss, indent + 2); 127 fprintf(stream, "%*str:\n", indent, ""); 128 segment_dump(stream, &sregs->tr, indent + 2); 129 fprintf(stream, "%*sldt:\n", indent, ""); 130 segment_dump(stream, &sregs->ldt, indent + 2); 131 132 fprintf(stream, "%*sgdt:\n", indent, ""); 133 dtable_dump(stream, &sregs->gdt, indent + 2); 134 fprintf(stream, "%*sidt:\n", indent, ""); 135 dtable_dump(stream, &sregs->idt, indent + 2); 136 137 fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx " 138 "cr3: 0x%.16llx cr4: 0x%.16llx\n", 139 indent, "", 140 sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4); 141 fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx " 142 "apic_base: 0x%.16llx\n", 143 indent, "", 144 sregs->cr8, sregs->efer, sregs->apic_base); 145 146 fprintf(stream, "%*sinterrupt_bitmap:\n", indent, ""); 147 for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) { 148 fprintf(stream, "%*s%.16llx\n", indent + 2, "", 149 sregs->interrupt_bitmap[i]); 150 } 151 } 152 153 bool kvm_is_tdp_enabled(void) 154 { 155 if (host_cpu_is_intel) 156 return get_kvm_intel_param_bool("ept"); 157 else 158 return get_kvm_amd_param_bool("npt"); 159 } 160 161 static void virt_mmu_init(struct kvm_vm *vm, struct kvm_mmu *mmu, 162 struct pte_masks *pte_masks) 163 { 164 /* If needed, create the top-level page table. */ 165 if (!mmu->pgd_created) { 166 mmu->pgd = vm_alloc_page_table(vm); 167 mmu->pgd_created = true; 168 mmu->arch.pte_masks = *pte_masks; 169 } 170 171 TEST_ASSERT(mmu->pgtable_levels == 4 || mmu->pgtable_levels == 5, 172 "Selftests MMU only supports 4-level and 5-level paging, not %u-level paging", 173 mmu->pgtable_levels); 174 } 175 176 void virt_arch_pgd_alloc(struct kvm_vm *vm) 177 { 178 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, 179 "Unknown or unsupported guest mode: 0x%x", vm->mode); 180 181 struct pte_masks pte_masks = (struct pte_masks){ 182 .present = BIT_ULL(0), 183 .writable = BIT_ULL(1), 184 .user = BIT_ULL(2), 185 .accessed = BIT_ULL(5), 186 .dirty = BIT_ULL(6), 187 .huge = BIT_ULL(7), 188 .nx = BIT_ULL(63), 189 .executable = 0, 190 .c = vm->arch.c_bit, 191 .s = vm->arch.s_bit, 192 }; 193 194 virt_mmu_init(vm, &vm->mmu, &pte_masks); 195 } 196 197 void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels, 198 struct pte_masks *pte_masks) 199 { 200 TEST_ASSERT(!vm->stage2_mmu.pgtable_levels, "TDP MMU already initialized"); 201 202 vm->stage2_mmu.pgtable_levels = pgtable_levels; 203 virt_mmu_init(vm, &vm->stage2_mmu, pte_masks); 204 } 205 206 static void *virt_get_pte(struct kvm_vm *vm, struct kvm_mmu *mmu, 207 uint64_t *parent_pte, uint64_t vaddr, int level) 208 { 209 uint64_t pt_gpa = PTE_GET_PA(*parent_pte); 210 uint64_t *page_table = addr_gpa2hva(vm, pt_gpa); 211 int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu; 212 213 TEST_ASSERT((*parent_pte == mmu->pgd) || is_present_pte(mmu, parent_pte), 214 "Parent PTE (level %d) not PRESENT for gva: 0x%08lx", 215 level + 1, vaddr); 216 217 return &page_table[index]; 218 } 219 220 static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, 221 struct kvm_mmu *mmu, 222 uint64_t *parent_pte, 223 uint64_t vaddr, 224 uint64_t paddr, 225 int current_level, 226 int target_level) 227 { 228 uint64_t *pte = virt_get_pte(vm, mmu, parent_pte, vaddr, current_level); 229 230 paddr = vm_untag_gpa(vm, paddr); 231 232 if (!is_present_pte(mmu, pte)) { 233 *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) | 234 PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) | 235 PTE_ALWAYS_SET_MASK(mmu); 236 if (current_level == target_level) 237 *pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK); 238 else 239 *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK; 240 } else { 241 /* 242 * Entry already present. Assert that the caller doesn't want 243 * a hugepage at this level, and that there isn't a hugepage at 244 * this level. 245 */ 246 TEST_ASSERT(current_level != target_level, 247 "Cannot create hugepage at level: %u, vaddr: 0x%lx", 248 current_level, vaddr); 249 TEST_ASSERT(!is_huge_pte(mmu, pte), 250 "Cannot create page table at level: %u, vaddr: 0x%lx", 251 current_level, vaddr); 252 } 253 return pte; 254 } 255 256 void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr, 257 uint64_t paddr, int level) 258 { 259 const uint64_t pg_size = PG_LEVEL_SIZE(level); 260 uint64_t *pte = &mmu->pgd; 261 int current_level; 262 263 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, 264 "Unknown or unsupported guest mode: 0x%x", vm->mode); 265 266 TEST_ASSERT((vaddr % pg_size) == 0, 267 "Virtual address not aligned,\n" 268 "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size); 269 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)), 270 "Invalid virtual address, vaddr: 0x%lx", vaddr); 271 TEST_ASSERT((paddr % pg_size) == 0, 272 "Physical address not aligned,\n" 273 " paddr: 0x%lx page size: 0x%lx", paddr, pg_size); 274 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, 275 "Physical address beyond maximum supported,\n" 276 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", 277 paddr, vm->max_gfn, vm->page_size); 278 TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr, 279 "Unexpected bits in paddr: %lx", paddr); 280 281 TEST_ASSERT(!PTE_EXECUTABLE_MASK(mmu) || !PTE_NX_MASK(mmu), 282 "X and NX bit masks cannot be used simultaneously"); 283 284 /* 285 * Allocate upper level page tables, if not already present. Return 286 * early if a hugepage was created. 287 */ 288 for (current_level = mmu->pgtable_levels; 289 current_level > PG_LEVEL_4K; 290 current_level--) { 291 pte = virt_create_upper_pte(vm, mmu, pte, vaddr, paddr, 292 current_level, level); 293 if (is_huge_pte(mmu, pte)) 294 return; 295 } 296 297 /* Fill in page table entry. */ 298 pte = virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K); 299 TEST_ASSERT(!is_present_pte(mmu, pte), 300 "PTE already present for 4k page at vaddr: 0x%lx", vaddr); 301 *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) | 302 PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) | 303 PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK); 304 305 /* 306 * Neither SEV nor TDX supports shared page tables, so only the final 307 * leaf PTE needs manually set the C/S-bit. 308 */ 309 if (vm_is_gpa_protected(vm, paddr)) 310 *pte |= PTE_C_BIT_MASK(mmu); 311 else 312 *pte |= PTE_S_BIT_MASK(mmu); 313 } 314 315 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) 316 { 317 __virt_pg_map(vm, &vm->mmu, vaddr, paddr, PG_LEVEL_4K); 318 } 319 320 void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 321 uint64_t nr_bytes, int level) 322 { 323 uint64_t pg_size = PG_LEVEL_SIZE(level); 324 uint64_t nr_pages = nr_bytes / pg_size; 325 int i; 326 327 TEST_ASSERT(nr_bytes % pg_size == 0, 328 "Region size not aligned: nr_bytes: 0x%lx, page size: 0x%lx", 329 nr_bytes, pg_size); 330 331 for (i = 0; i < nr_pages; i++) { 332 __virt_pg_map(vm, &vm->mmu, vaddr, paddr, level); 333 sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift, 334 nr_bytes / PAGE_SIZE); 335 336 vaddr += pg_size; 337 paddr += pg_size; 338 } 339 } 340 341 static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte, 342 int *level, int current_level) 343 { 344 if (is_huge_pte(mmu, pte)) { 345 TEST_ASSERT(*level == PG_LEVEL_NONE || 346 *level == current_level, 347 "Unexpected hugepage at level %d", current_level); 348 *level = current_level; 349 } 350 351 return *level == current_level; 352 } 353 354 static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, 355 struct kvm_mmu *mmu, 356 uint64_t vaddr, 357 int *level) 358 { 359 int va_width = 12 + (mmu->pgtable_levels) * 9; 360 uint64_t *pte = &mmu->pgd; 361 int current_level; 362 363 TEST_ASSERT(!vm->arch.is_pt_protected, 364 "Walking page tables of protected guests is impossible"); 365 366 TEST_ASSERT(*level >= PG_LEVEL_NONE && *level <= mmu->pgtable_levels, 367 "Invalid PG_LEVEL_* '%d'", *level); 368 369 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, 370 "Unknown or unsupported guest mode: 0x%x", vm->mode); 371 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, 372 (vaddr >> vm->page_shift)), 373 "Invalid virtual address, vaddr: 0x%lx", 374 vaddr); 375 /* 376 * Check that the vaddr is a sign-extended va_width value. 377 */ 378 TEST_ASSERT(vaddr == 379 (((int64_t)vaddr << (64 - va_width) >> (64 - va_width))), 380 "Canonical check failed. The virtual address is invalid."); 381 382 for (current_level = mmu->pgtable_levels; 383 current_level > PG_LEVEL_4K; 384 current_level--) { 385 pte = virt_get_pte(vm, mmu, pte, vaddr, current_level); 386 if (vm_is_target_pte(mmu, pte, level, current_level)) 387 return pte; 388 } 389 390 return virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K); 391 } 392 393 uint64_t *tdp_get_pte(struct kvm_vm *vm, uint64_t l2_gpa) 394 { 395 int level = PG_LEVEL_4K; 396 397 return __vm_get_page_table_entry(vm, &vm->stage2_mmu, l2_gpa, &level); 398 } 399 400 uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr) 401 { 402 int level = PG_LEVEL_4K; 403 404 return __vm_get_page_table_entry(vm, &vm->mmu, vaddr, &level); 405 } 406 407 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) 408 { 409 struct kvm_mmu *mmu = &vm->mmu; 410 uint64_t *pml4e, *pml4e_start; 411 uint64_t *pdpe, *pdpe_start; 412 uint64_t *pde, *pde_start; 413 uint64_t *pte, *pte_start; 414 415 if (!mmu->pgd_created) 416 return; 417 418 fprintf(stream, "%*s " 419 " no\n", indent, ""); 420 fprintf(stream, "%*s index hvaddr gpaddr " 421 "addr w exec dirty\n", 422 indent, ""); 423 pml4e_start = (uint64_t *) addr_gpa2hva(vm, mmu->pgd); 424 for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) { 425 pml4e = &pml4e_start[n1]; 426 if (!is_present_pte(mmu, pml4e)) 427 continue; 428 fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u " 429 " %u\n", 430 indent, "", 431 pml4e - pml4e_start, pml4e, 432 addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e), 433 is_writable_pte(mmu, pml4e), is_nx_pte(mmu, pml4e)); 434 435 pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK); 436 for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) { 437 pdpe = &pdpe_start[n2]; 438 if (!is_present_pte(mmu, pdpe)) 439 continue; 440 fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10llx " 441 "%u %u\n", 442 indent, "", 443 pdpe - pdpe_start, pdpe, 444 addr_hva2gpa(vm, pdpe), 445 PTE_GET_PFN(*pdpe), is_writable_pte(mmu, pdpe), 446 is_nx_pte(mmu, pdpe)); 447 448 pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK); 449 for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) { 450 pde = &pde_start[n3]; 451 if (!is_present_pte(mmu, pde)) 452 continue; 453 fprintf(stream, "%*spde 0x%-3zx %p " 454 "0x%-12lx 0x%-10llx %u %u\n", 455 indent, "", pde - pde_start, pde, 456 addr_hva2gpa(vm, pde), 457 PTE_GET_PFN(*pde), is_writable_pte(mmu, pde), 458 is_nx_pte(mmu, pde)); 459 460 pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK); 461 for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) { 462 pte = &pte_start[n4]; 463 if (!is_present_pte(mmu, pte)) 464 continue; 465 fprintf(stream, "%*spte 0x%-3zx %p " 466 "0x%-12lx 0x%-10llx %u %u " 467 " %u 0x%-10lx\n", 468 indent, "", 469 pte - pte_start, pte, 470 addr_hva2gpa(vm, pte), 471 PTE_GET_PFN(*pte), 472 is_writable_pte(mmu, pte), 473 is_nx_pte(mmu, pte), 474 is_dirty_pte(mmu, pte), 475 ((uint64_t) n1 << 27) 476 | ((uint64_t) n2 << 18) 477 | ((uint64_t) n3 << 9) 478 | ((uint64_t) n4)); 479 } 480 } 481 } 482 } 483 } 484 485 void vm_enable_tdp(struct kvm_vm *vm) 486 { 487 if (kvm_cpu_has(X86_FEATURE_VMX)) 488 vm_enable_ept(vm); 489 else 490 vm_enable_npt(vm); 491 } 492 493 bool kvm_cpu_has_tdp(void) 494 { 495 return kvm_cpu_has_ept() || kvm_cpu_has_npt(); 496 } 497 498 void __tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, 499 uint64_t size, int level) 500 { 501 size_t page_size = PG_LEVEL_SIZE(level); 502 size_t npages = size / page_size; 503 504 TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow"); 505 TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); 506 507 while (npages--) { 508 __virt_pg_map(vm, &vm->stage2_mmu, nested_paddr, paddr, level); 509 nested_paddr += page_size; 510 paddr += page_size; 511 } 512 } 513 514 void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, 515 uint64_t size) 516 { 517 __tdp_map(vm, nested_paddr, paddr, size, PG_LEVEL_4K); 518 } 519 520 /* Prepare an identity extended page table that maps all the 521 * physical pages in VM. 522 */ 523 void tdp_identity_map_default_memslots(struct kvm_vm *vm) 524 { 525 uint32_t s, memslot = 0; 526 sparsebit_idx_t i, last; 527 struct userspace_mem_region *region = memslot2region(vm, memslot); 528 529 /* Only memslot 0 is mapped here, ensure it's the only one being used */ 530 for (s = 0; s < NR_MEM_REGIONS; s++) 531 TEST_ASSERT_EQ(vm->memslots[s], 0); 532 533 i = (region->region.guest_phys_addr >> vm->page_shift) - 1; 534 last = i + (region->region.memory_size >> vm->page_shift); 535 for (;;) { 536 i = sparsebit_next_clear(region->unused_phy_pages, i); 537 if (i > last) 538 break; 539 540 tdp_map(vm, (uint64_t)i << vm->page_shift, 541 (uint64_t)i << vm->page_shift, 1 << vm->page_shift); 542 } 543 } 544 545 /* Identity map a region with 1GiB Pages. */ 546 void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size) 547 { 548 __tdp_map(vm, addr, addr, size, PG_LEVEL_1G); 549 } 550 551 /* 552 * Set Unusable Segment 553 * 554 * Input Args: None 555 * 556 * Output Args: 557 * segp - Pointer to segment register 558 * 559 * Return: None 560 * 561 * Sets the segment register pointed to by @segp to an unusable state. 562 */ 563 static void kvm_seg_set_unusable(struct kvm_segment *segp) 564 { 565 memset(segp, 0, sizeof(*segp)); 566 segp->unusable = true; 567 } 568 569 static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp) 570 { 571 void *gdt = addr_gva2hva(vm, vm->arch.gdt); 572 struct desc64 *desc = gdt + (segp->selector >> 3) * 8; 573 574 desc->limit0 = segp->limit & 0xFFFF; 575 desc->base0 = segp->base & 0xFFFF; 576 desc->base1 = segp->base >> 16; 577 desc->type = segp->type; 578 desc->s = segp->s; 579 desc->dpl = segp->dpl; 580 desc->p = segp->present; 581 desc->limit1 = segp->limit >> 16; 582 desc->avl = segp->avl; 583 desc->l = segp->l; 584 desc->db = segp->db; 585 desc->g = segp->g; 586 desc->base2 = segp->base >> 24; 587 if (!segp->s) 588 desc->base3 = segp->base >> 32; 589 } 590 591 static void kvm_seg_set_kernel_code_64bit(struct kvm_segment *segp) 592 { 593 memset(segp, 0, sizeof(*segp)); 594 segp->selector = KERNEL_CS; 595 segp->limit = 0xFFFFFFFFu; 596 segp->s = 0x1; /* kTypeCodeData */ 597 segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed 598 * | kFlagCodeReadable 599 */ 600 segp->g = true; 601 segp->l = true; 602 segp->present = 1; 603 } 604 605 static void kvm_seg_set_kernel_data_64bit(struct kvm_segment *segp) 606 { 607 memset(segp, 0, sizeof(*segp)); 608 segp->selector = KERNEL_DS; 609 segp->limit = 0xFFFFFFFFu; 610 segp->s = 0x1; /* kTypeCodeData */ 611 segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed 612 * | kFlagDataWritable 613 */ 614 segp->g = true; 615 segp->present = true; 616 } 617 618 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) 619 { 620 int level = PG_LEVEL_NONE; 621 uint64_t *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level); 622 623 TEST_ASSERT(is_present_pte(&vm->mmu, pte), 624 "Leaf PTE not PRESENT for gva: 0x%08lx", gva); 625 626 /* 627 * No need for a hugepage mask on the PTE, x86-64 requires the "unused" 628 * address bits to be zero. 629 */ 630 return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level)); 631 } 632 633 static void kvm_seg_set_tss_64bit(vm_vaddr_t base, struct kvm_segment *segp) 634 { 635 memset(segp, 0, sizeof(*segp)); 636 segp->base = base; 637 segp->limit = 0x67; 638 segp->selector = KERNEL_TSS; 639 segp->type = 0xb; 640 segp->present = 1; 641 } 642 643 static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu) 644 { 645 struct kvm_sregs sregs; 646 647 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, 648 "Unknown or unsupported guest mode: 0x%x", vm->mode); 649 650 /* Set mode specific system register values. */ 651 vcpu_sregs_get(vcpu, &sregs); 652 653 sregs.idt.base = vm->arch.idt; 654 sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1; 655 sregs.gdt.base = vm->arch.gdt; 656 sregs.gdt.limit = getpagesize() - 1; 657 658 sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG; 659 sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR; 660 if (kvm_cpu_has(X86_FEATURE_XSAVE)) 661 sregs.cr4 |= X86_CR4_OSXSAVE; 662 if (vm->mmu.pgtable_levels == 5) 663 sregs.cr4 |= X86_CR4_LA57; 664 sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX); 665 666 kvm_seg_set_unusable(&sregs.ldt); 667 kvm_seg_set_kernel_code_64bit(&sregs.cs); 668 kvm_seg_set_kernel_data_64bit(&sregs.ds); 669 kvm_seg_set_kernel_data_64bit(&sregs.es); 670 kvm_seg_set_kernel_data_64bit(&sregs.gs); 671 kvm_seg_set_tss_64bit(vm->arch.tss, &sregs.tr); 672 673 sregs.cr3 = vm->mmu.pgd; 674 vcpu_sregs_set(vcpu, &sregs); 675 } 676 677 static void vcpu_init_xcrs(struct kvm_vm *vm, struct kvm_vcpu *vcpu) 678 { 679 struct kvm_xcrs xcrs = { 680 .nr_xcrs = 1, 681 .xcrs[0].xcr = 0, 682 .xcrs[0].value = kvm_cpu_supported_xcr0(), 683 }; 684 685 if (!kvm_cpu_has(X86_FEATURE_XSAVE)) 686 return; 687 688 vcpu_xcrs_set(vcpu, &xcrs); 689 } 690 691 static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr, 692 int dpl, unsigned short selector) 693 { 694 struct idt_entry *base = 695 (struct idt_entry *)addr_gva2hva(vm, vm->arch.idt); 696 struct idt_entry *e = &base[vector]; 697 698 memset(e, 0, sizeof(*e)); 699 e->offset0 = addr; 700 e->selector = selector; 701 e->ist = 0; 702 e->type = 14; 703 e->dpl = dpl; 704 e->p = 1; 705 e->offset1 = addr >> 16; 706 e->offset2 = addr >> 32; 707 } 708 709 static bool kvm_fixup_exception(struct ex_regs *regs) 710 { 711 if (regs->r9 != KVM_EXCEPTION_MAGIC || regs->rip != regs->r10) 712 return false; 713 714 if (regs->vector == DE_VECTOR) 715 regs->vector = KVM_MAGIC_DE_VECTOR; 716 717 regs->rip = regs->r11; 718 regs->r9 = regs->vector; 719 regs->r10 = regs->error_code; 720 return true; 721 } 722 723 void route_exception(struct ex_regs *regs) 724 { 725 typedef void(*handler)(struct ex_regs *); 726 handler *handlers = (handler *)exception_handlers; 727 728 if (handlers && handlers[regs->vector]) { 729 handlers[regs->vector](regs); 730 return; 731 } 732 733 if (kvm_fixup_exception(regs)) 734 return; 735 736 GUEST_FAIL("Unhandled exception '0x%lx' at guest RIP '0x%lx'", 737 regs->vector, regs->rip); 738 } 739 740 static void vm_init_descriptor_tables(struct kvm_vm *vm) 741 { 742 extern void *idt_handlers; 743 struct kvm_segment seg; 744 int i; 745 746 vm->arch.gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); 747 vm->arch.idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); 748 vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); 749 vm->arch.tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); 750 751 /* Handlers have the same address in both address spaces.*/ 752 for (i = 0; i < NUM_INTERRUPTS; i++) 753 set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, KERNEL_CS); 754 755 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; 756 757 kvm_seg_set_kernel_code_64bit(&seg); 758 kvm_seg_fill_gdt_64bit(vm, &seg); 759 760 kvm_seg_set_kernel_data_64bit(&seg); 761 kvm_seg_fill_gdt_64bit(vm, &seg); 762 763 kvm_seg_set_tss_64bit(vm->arch.tss, &seg); 764 kvm_seg_fill_gdt_64bit(vm, &seg); 765 } 766 767 void vm_install_exception_handler(struct kvm_vm *vm, int vector, 768 void (*handler)(struct ex_regs *)) 769 { 770 vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers); 771 772 handlers[vector] = (vm_vaddr_t)handler; 773 } 774 775 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) 776 { 777 struct ucall uc; 778 779 if (get_ucall(vcpu, &uc) == UCALL_ABORT) 780 REPORT_GUEST_ASSERT(uc); 781 } 782 783 void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus) 784 { 785 int r; 786 787 TEST_ASSERT(kvm_has_cap(KVM_CAP_GET_TSC_KHZ), 788 "Require KVM_GET_TSC_KHZ to provide udelay() to guest."); 789 790 vm_create_irqchip(vm); 791 vm_init_descriptor_tables(vm); 792 793 sync_global_to_guest(vm, host_cpu_is_intel); 794 sync_global_to_guest(vm, host_cpu_is_amd); 795 sync_global_to_guest(vm, is_forced_emulation_enabled); 796 sync_global_to_guest(vm, pmu_errata_mask); 797 798 if (is_sev_vm(vm)) { 799 struct kvm_sev_init init = { 0 }; 800 801 vm_sev_ioctl(vm, KVM_SEV_INIT2, &init); 802 } 803 804 r = __vm_ioctl(vm, KVM_GET_TSC_KHZ, NULL); 805 TEST_ASSERT(r > 0, "KVM_GET_TSC_KHZ did not provide a valid TSC frequency."); 806 guest_tsc_khz = r; 807 sync_global_to_guest(vm, guest_tsc_khz); 808 } 809 810 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) 811 { 812 struct kvm_regs regs; 813 814 vcpu_regs_get(vcpu, ®s); 815 regs.rip = (unsigned long) guest_code; 816 vcpu_regs_set(vcpu, ®s); 817 } 818 819 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) 820 { 821 struct kvm_mp_state mp_state; 822 struct kvm_regs regs; 823 vm_vaddr_t stack_vaddr; 824 struct kvm_vcpu *vcpu; 825 826 stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(), 827 DEFAULT_GUEST_STACK_VADDR_MIN, 828 MEM_REGION_DATA); 829 830 stack_vaddr += DEFAULT_STACK_PGS * getpagesize(); 831 832 /* 833 * Align stack to match calling sequence requirements in section "The 834 * Stack Frame" of the System V ABI AMD64 Architecture Processor 835 * Supplement, which requires the value (%rsp + 8) to be a multiple of 836 * 16 when control is transferred to the function entry point. 837 * 838 * If this code is ever used to launch a vCPU with 32-bit entry point it 839 * may need to subtract 4 bytes instead of 8 bytes. 840 */ 841 TEST_ASSERT(IS_ALIGNED(stack_vaddr, PAGE_SIZE), 842 "__vm_vaddr_alloc() did not provide a page-aligned address"); 843 stack_vaddr -= 8; 844 845 vcpu = __vm_vcpu_add(vm, vcpu_id); 846 vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid()); 847 vcpu_init_sregs(vm, vcpu); 848 vcpu_init_xcrs(vm, vcpu); 849 850 /* Setup guest general purpose registers */ 851 vcpu_regs_get(vcpu, ®s); 852 regs.rflags = regs.rflags | 0x2; 853 regs.rsp = stack_vaddr; 854 vcpu_regs_set(vcpu, ®s); 855 856 /* Setup the MP state */ 857 mp_state.mp_state = 0; 858 vcpu_mp_state_set(vcpu, &mp_state); 859 860 /* 861 * Refresh CPUID after setting SREGS and XCR0, so that KVM's "runtime" 862 * updates to guest CPUID, e.g. for OSXSAVE and XSAVE state size, are 863 * reflected into selftests' vCPU CPUID cache, i.e. so that the cache 864 * is consistent with vCPU state. 865 */ 866 vcpu_get_cpuid(vcpu); 867 return vcpu; 868 } 869 870 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id) 871 { 872 struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); 873 874 vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid()); 875 876 return vcpu; 877 } 878 879 void vcpu_arch_free(struct kvm_vcpu *vcpu) 880 { 881 if (vcpu->cpuid) 882 free(vcpu->cpuid); 883 } 884 885 /* Do not use kvm_supported_cpuid directly except for validity checks. */ 886 static void *kvm_supported_cpuid; 887 888 const struct kvm_cpuid2 *kvm_get_supported_cpuid(void) 889 { 890 int kvm_fd; 891 892 if (kvm_supported_cpuid) 893 return kvm_supported_cpuid; 894 895 kvm_supported_cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES); 896 kvm_fd = open_kvm_dev_path_or_exit(); 897 898 kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, 899 (struct kvm_cpuid2 *)kvm_supported_cpuid); 900 901 close(kvm_fd); 902 return kvm_supported_cpuid; 903 } 904 905 static uint32_t __kvm_cpu_has(const struct kvm_cpuid2 *cpuid, 906 uint32_t function, uint32_t index, 907 uint8_t reg, uint8_t lo, uint8_t hi) 908 { 909 const struct kvm_cpuid_entry2 *entry; 910 int i; 911 912 for (i = 0; i < cpuid->nent; i++) { 913 entry = &cpuid->entries[i]; 914 915 /* 916 * The output registers in kvm_cpuid_entry2 are in alphabetical 917 * order, but kvm_x86_cpu_feature matches that mess, so yay 918 * pointer shenanigans! 919 */ 920 if (entry->function == function && entry->index == index) 921 return ((&entry->eax)[reg] & GENMASK(hi, lo)) >> lo; 922 } 923 924 return 0; 925 } 926 927 bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid, 928 struct kvm_x86_cpu_feature feature) 929 { 930 return __kvm_cpu_has(cpuid, feature.function, feature.index, 931 feature.reg, feature.bit, feature.bit); 932 } 933 934 uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, 935 struct kvm_x86_cpu_property property) 936 { 937 return __kvm_cpu_has(cpuid, property.function, property.index, 938 property.reg, property.lo_bit, property.hi_bit); 939 } 940 941 uint64_t kvm_get_feature_msr(uint64_t msr_index) 942 { 943 struct { 944 struct kvm_msrs header; 945 struct kvm_msr_entry entry; 946 } buffer = {}; 947 int r, kvm_fd; 948 949 buffer.header.nmsrs = 1; 950 buffer.entry.index = msr_index; 951 kvm_fd = open_kvm_dev_path_or_exit(); 952 953 r = __kvm_ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header); 954 TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_GET_MSRS, r)); 955 956 close(kvm_fd); 957 return buffer.entry.data; 958 } 959 960 void __vm_xsave_require_permission(uint64_t xfeature, const char *name) 961 { 962 int kvm_fd; 963 u64 bitmask; 964 long rc; 965 struct kvm_device_attr attr = { 966 .group = 0, 967 .attr = KVM_X86_XCOMP_GUEST_SUPP, 968 .addr = (unsigned long) &bitmask, 969 }; 970 971 TEST_ASSERT(!kvm_supported_cpuid, 972 "kvm_get_supported_cpuid() cannot be used before ARCH_REQ_XCOMP_GUEST_PERM"); 973 974 TEST_ASSERT(is_power_of_2(xfeature), 975 "Dynamic XFeatures must be enabled one at a time"); 976 977 kvm_fd = open_kvm_dev_path_or_exit(); 978 rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr); 979 close(kvm_fd); 980 981 if (rc == -1 && (errno == ENXIO || errno == EINVAL)) 982 __TEST_REQUIRE(0, "KVM_X86_XCOMP_GUEST_SUPP not supported"); 983 984 TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc); 985 986 __TEST_REQUIRE(bitmask & xfeature, 987 "Required XSAVE feature '%s' not supported", name); 988 989 TEST_REQUIRE(!syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, ilog2(xfeature))); 990 991 rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask); 992 TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc); 993 TEST_ASSERT(bitmask & xfeature, 994 "'%s' (0x%lx) not permitted after prctl(ARCH_REQ_XCOMP_GUEST_PERM) permitted=0x%lx", 995 name, xfeature, bitmask); 996 } 997 998 void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid) 999 { 1000 TEST_ASSERT(cpuid != vcpu->cpuid, "@cpuid can't be the vCPU's CPUID"); 1001 1002 /* Allow overriding the default CPUID. */ 1003 if (vcpu->cpuid && vcpu->cpuid->nent < cpuid->nent) { 1004 free(vcpu->cpuid); 1005 vcpu->cpuid = NULL; 1006 } 1007 1008 if (!vcpu->cpuid) 1009 vcpu->cpuid = allocate_kvm_cpuid2(cpuid->nent); 1010 1011 memcpy(vcpu->cpuid, cpuid, kvm_cpuid2_size(cpuid->nent)); 1012 vcpu_set_cpuid(vcpu); 1013 } 1014 1015 void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu, 1016 struct kvm_x86_cpu_property property, 1017 uint32_t value) 1018 { 1019 struct kvm_cpuid_entry2 *entry; 1020 1021 entry = __vcpu_get_cpuid_entry(vcpu, property.function, property.index); 1022 1023 (&entry->eax)[property.reg] &= ~GENMASK(property.hi_bit, property.lo_bit); 1024 (&entry->eax)[property.reg] |= value << property.lo_bit; 1025 1026 vcpu_set_cpuid(vcpu); 1027 1028 /* Sanity check that @value doesn't exceed the bounds in any way. */ 1029 TEST_ASSERT_EQ(kvm_cpuid_property(vcpu->cpuid, property), value); 1030 } 1031 1032 void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function) 1033 { 1034 struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function); 1035 1036 entry->eax = 0; 1037 entry->ebx = 0; 1038 entry->ecx = 0; 1039 entry->edx = 0; 1040 vcpu_set_cpuid(vcpu); 1041 } 1042 1043 void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu, 1044 struct kvm_x86_cpu_feature feature, 1045 bool set) 1046 { 1047 struct kvm_cpuid_entry2 *entry; 1048 u32 *reg; 1049 1050 entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index); 1051 reg = (&entry->eax) + feature.reg; 1052 1053 if (set) 1054 *reg |= BIT(feature.bit); 1055 else 1056 *reg &= ~BIT(feature.bit); 1057 1058 vcpu_set_cpuid(vcpu); 1059 } 1060 1061 uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index) 1062 { 1063 struct { 1064 struct kvm_msrs header; 1065 struct kvm_msr_entry entry; 1066 } buffer = {}; 1067 1068 buffer.header.nmsrs = 1; 1069 buffer.entry.index = msr_index; 1070 1071 vcpu_msrs_get(vcpu, &buffer.header); 1072 1073 return buffer.entry.data; 1074 } 1075 1076 int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value) 1077 { 1078 struct { 1079 struct kvm_msrs header; 1080 struct kvm_msr_entry entry; 1081 } buffer = {}; 1082 1083 memset(&buffer, 0, sizeof(buffer)); 1084 buffer.header.nmsrs = 1; 1085 buffer.entry.index = msr_index; 1086 buffer.entry.data = msr_value; 1087 1088 return __vcpu_ioctl(vcpu, KVM_SET_MSRS, &buffer.header); 1089 } 1090 1091 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) 1092 { 1093 va_list ap; 1094 struct kvm_regs regs; 1095 1096 TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n" 1097 " num: %u", 1098 num); 1099 1100 va_start(ap, num); 1101 vcpu_regs_get(vcpu, ®s); 1102 1103 if (num >= 1) 1104 regs.rdi = va_arg(ap, uint64_t); 1105 1106 if (num >= 2) 1107 regs.rsi = va_arg(ap, uint64_t); 1108 1109 if (num >= 3) 1110 regs.rdx = va_arg(ap, uint64_t); 1111 1112 if (num >= 4) 1113 regs.rcx = va_arg(ap, uint64_t); 1114 1115 if (num >= 5) 1116 regs.r8 = va_arg(ap, uint64_t); 1117 1118 if (num >= 6) 1119 regs.r9 = va_arg(ap, uint64_t); 1120 1121 vcpu_regs_set(vcpu, ®s); 1122 va_end(ap); 1123 } 1124 1125 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) 1126 { 1127 struct kvm_regs regs; 1128 struct kvm_sregs sregs; 1129 1130 fprintf(stream, "%*svCPU ID: %u\n", indent, "", vcpu->id); 1131 1132 fprintf(stream, "%*sregs:\n", indent + 2, ""); 1133 vcpu_regs_get(vcpu, ®s); 1134 regs_dump(stream, ®s, indent + 4); 1135 1136 fprintf(stream, "%*ssregs:\n", indent + 2, ""); 1137 vcpu_sregs_get(vcpu, &sregs); 1138 sregs_dump(stream, &sregs, indent + 4); 1139 } 1140 1141 static struct kvm_msr_list *__kvm_get_msr_index_list(bool feature_msrs) 1142 { 1143 struct kvm_msr_list *list; 1144 struct kvm_msr_list nmsrs; 1145 int kvm_fd, r; 1146 1147 kvm_fd = open_kvm_dev_path_or_exit(); 1148 1149 nmsrs.nmsrs = 0; 1150 if (!feature_msrs) 1151 r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs); 1152 else 1153 r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, &nmsrs); 1154 1155 TEST_ASSERT(r == -1 && errno == E2BIG, 1156 "Expected -E2BIG, got rc: %i errno: %i (%s)", 1157 r, errno, strerror(errno)); 1158 1159 list = malloc(sizeof(*list) + nmsrs.nmsrs * sizeof(list->indices[0])); 1160 TEST_ASSERT(list, "-ENOMEM when allocating MSR index list"); 1161 list->nmsrs = nmsrs.nmsrs; 1162 1163 if (!feature_msrs) 1164 kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list); 1165 else 1166 kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list); 1167 close(kvm_fd); 1168 1169 TEST_ASSERT(list->nmsrs == nmsrs.nmsrs, 1170 "Number of MSRs in list changed, was %d, now %d", 1171 nmsrs.nmsrs, list->nmsrs); 1172 return list; 1173 } 1174 1175 const struct kvm_msr_list *kvm_get_msr_index_list(void) 1176 { 1177 static const struct kvm_msr_list *list; 1178 1179 if (!list) 1180 list = __kvm_get_msr_index_list(false); 1181 return list; 1182 } 1183 1184 1185 const struct kvm_msr_list *kvm_get_feature_msr_index_list(void) 1186 { 1187 static const struct kvm_msr_list *list; 1188 1189 if (!list) 1190 list = __kvm_get_msr_index_list(true); 1191 return list; 1192 } 1193 1194 bool kvm_msr_is_in_save_restore_list(uint32_t msr_index) 1195 { 1196 const struct kvm_msr_list *list = kvm_get_msr_index_list(); 1197 int i; 1198 1199 for (i = 0; i < list->nmsrs; ++i) { 1200 if (list->indices[i] == msr_index) 1201 return true; 1202 } 1203 1204 return false; 1205 } 1206 1207 static void vcpu_save_xsave_state(struct kvm_vcpu *vcpu, 1208 struct kvm_x86_state *state) 1209 { 1210 int size = vm_check_cap(vcpu->vm, KVM_CAP_XSAVE2); 1211 1212 if (size) { 1213 state->xsave = malloc(size); 1214 vcpu_xsave2_get(vcpu, state->xsave); 1215 } else { 1216 state->xsave = malloc(sizeof(struct kvm_xsave)); 1217 vcpu_xsave_get(vcpu, state->xsave); 1218 } 1219 } 1220 1221 struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu) 1222 { 1223 const struct kvm_msr_list *msr_list = kvm_get_msr_index_list(); 1224 struct kvm_x86_state *state; 1225 int i; 1226 1227 static int nested_size = -1; 1228 1229 if (nested_size == -1) { 1230 nested_size = kvm_check_cap(KVM_CAP_NESTED_STATE); 1231 TEST_ASSERT(nested_size <= sizeof(state->nested_), 1232 "Nested state size too big, %i > %zi", 1233 nested_size, sizeof(state->nested_)); 1234 } 1235 1236 /* 1237 * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees 1238 * guest state is consistent only after userspace re-enters the 1239 * kernel with KVM_RUN. Complete IO prior to migrating state 1240 * to a new VM. 1241 */ 1242 vcpu_run_complete_io(vcpu); 1243 1244 state = malloc(sizeof(*state) + msr_list->nmsrs * sizeof(state->msrs.entries[0])); 1245 TEST_ASSERT(state, "-ENOMEM when allocating kvm state"); 1246 1247 vcpu_events_get(vcpu, &state->events); 1248 vcpu_mp_state_get(vcpu, &state->mp_state); 1249 vcpu_regs_get(vcpu, &state->regs); 1250 vcpu_save_xsave_state(vcpu, state); 1251 1252 if (kvm_has_cap(KVM_CAP_XCRS)) 1253 vcpu_xcrs_get(vcpu, &state->xcrs); 1254 1255 vcpu_sregs_get(vcpu, &state->sregs); 1256 1257 if (nested_size) { 1258 state->nested.size = sizeof(state->nested_); 1259 1260 vcpu_nested_state_get(vcpu, &state->nested); 1261 TEST_ASSERT(state->nested.size <= nested_size, 1262 "Nested state size too big, %i (KVM_CHECK_CAP gave %i)", 1263 state->nested.size, nested_size); 1264 } else { 1265 state->nested.size = 0; 1266 } 1267 1268 state->msrs.nmsrs = msr_list->nmsrs; 1269 for (i = 0; i < msr_list->nmsrs; i++) 1270 state->msrs.entries[i].index = msr_list->indices[i]; 1271 vcpu_msrs_get(vcpu, &state->msrs); 1272 1273 vcpu_debugregs_get(vcpu, &state->debugregs); 1274 1275 return state; 1276 } 1277 1278 void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state) 1279 { 1280 vcpu_sregs_set(vcpu, &state->sregs); 1281 vcpu_msrs_set(vcpu, &state->msrs); 1282 1283 if (kvm_has_cap(KVM_CAP_XCRS)) 1284 vcpu_xcrs_set(vcpu, &state->xcrs); 1285 1286 vcpu_xsave_set(vcpu, state->xsave); 1287 vcpu_events_set(vcpu, &state->events); 1288 vcpu_mp_state_set(vcpu, &state->mp_state); 1289 vcpu_debugregs_set(vcpu, &state->debugregs); 1290 vcpu_regs_set(vcpu, &state->regs); 1291 1292 if (state->nested.size) 1293 vcpu_nested_state_set(vcpu, &state->nested); 1294 } 1295 1296 void kvm_x86_state_cleanup(struct kvm_x86_state *state) 1297 { 1298 free(state->xsave); 1299 free(state); 1300 } 1301 1302 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits) 1303 { 1304 if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) { 1305 *pa_bits = kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32; 1306 *va_bits = 32; 1307 } else { 1308 *pa_bits = kvm_cpu_property(X86_PROPERTY_MAX_PHY_ADDR); 1309 *va_bits = kvm_cpu_property(X86_PROPERTY_MAX_VIRT_ADDR); 1310 } 1311 } 1312 1313 void kvm_init_vm_address_properties(struct kvm_vm *vm) 1314 { 1315 if (is_sev_vm(vm)) { 1316 vm->arch.sev_fd = open_sev_dev_path_or_exit(); 1317 vm->arch.c_bit = BIT_ULL(this_cpu_property(X86_PROPERTY_SEV_C_BIT)); 1318 vm->gpa_tag_mask = vm->arch.c_bit; 1319 } else { 1320 vm->arch.sev_fd = -1; 1321 } 1322 } 1323 1324 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, 1325 uint32_t function, uint32_t index) 1326 { 1327 int i; 1328 1329 for (i = 0; i < cpuid->nent; i++) { 1330 if (cpuid->entries[i].function == function && 1331 cpuid->entries[i].index == index) 1332 return &cpuid->entries[i]; 1333 } 1334 1335 TEST_FAIL("CPUID function 0x%x index 0x%x not found ", function, index); 1336 1337 return NULL; 1338 } 1339 1340 #define X86_HYPERCALL(inputs...) \ 1341 ({ \ 1342 uint64_t r; \ 1343 \ 1344 asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" \ 1345 "jnz 1f\n\t" \ 1346 "vmcall\n\t" \ 1347 "jmp 2f\n\t" \ 1348 "1: vmmcall\n\t" \ 1349 "2:" \ 1350 : "=a"(r) \ 1351 : [use_vmmcall] "r" (host_cpu_is_amd), inputs); \ 1352 \ 1353 r; \ 1354 }) 1355 1356 uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, 1357 uint64_t a3) 1358 { 1359 return X86_HYPERCALL("a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3)); 1360 } 1361 1362 uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1) 1363 { 1364 return X86_HYPERCALL("a"(nr), "D"(a0), "S"(a1)); 1365 } 1366 1367 void xen_hypercall(uint64_t nr, uint64_t a0, void *a1) 1368 { 1369 GUEST_ASSERT(!__xen_hypercall(nr, a0, a1)); 1370 } 1371 1372 unsigned long vm_compute_max_gfn(struct kvm_vm *vm) 1373 { 1374 const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */ 1375 unsigned long ht_gfn, max_gfn, max_pfn; 1376 uint8_t maxphyaddr, guest_maxphyaddr; 1377 1378 /* 1379 * Use "guest MAXPHYADDR" from KVM if it's available. Guest MAXPHYADDR 1380 * enumerates the max _mappable_ GPA, which can be less than the raw 1381 * MAXPHYADDR, e.g. if MAXPHYADDR=52, KVM is using TDP, and the CPU 1382 * doesn't support 5-level TDP. 1383 */ 1384 guest_maxphyaddr = kvm_cpu_property(X86_PROPERTY_GUEST_MAX_PHY_ADDR); 1385 guest_maxphyaddr = guest_maxphyaddr ?: vm->pa_bits; 1386 TEST_ASSERT(guest_maxphyaddr <= vm->pa_bits, 1387 "Guest MAXPHYADDR should never be greater than raw MAXPHYADDR"); 1388 1389 max_gfn = (1ULL << (guest_maxphyaddr - vm->page_shift)) - 1; 1390 1391 /* Avoid reserved HyperTransport region on AMD processors. */ 1392 if (!host_cpu_is_amd) 1393 return max_gfn; 1394 1395 /* On parts with <40 physical address bits, the area is fully hidden */ 1396 if (vm->pa_bits < 40) 1397 return max_gfn; 1398 1399 /* Before family 17h, the HyperTransport area is just below 1T. */ 1400 ht_gfn = (1 << 28) - num_ht_pages; 1401 if (this_cpu_family() < 0x17) 1402 goto done; 1403 1404 /* 1405 * Otherwise it's at the top of the physical address space, possibly 1406 * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use 1407 * the old conservative value if MAXPHYADDR is not enumerated. 1408 */ 1409 if (!this_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) 1410 goto done; 1411 1412 maxphyaddr = this_cpu_property(X86_PROPERTY_MAX_PHY_ADDR); 1413 max_pfn = (1ULL << (maxphyaddr - vm->page_shift)) - 1; 1414 1415 if (this_cpu_has_p(X86_PROPERTY_PHYS_ADDR_REDUCTION)) 1416 max_pfn >>= this_cpu_property(X86_PROPERTY_PHYS_ADDR_REDUCTION); 1417 1418 ht_gfn = max_pfn - num_ht_pages; 1419 done: 1420 return min(max_gfn, ht_gfn - 1); 1421 } 1422 1423 void kvm_selftest_arch_init(void) 1424 { 1425 host_cpu_is_intel = this_cpu_is_intel(); 1426 host_cpu_is_amd = this_cpu_is_amd(); 1427 is_forced_emulation_enabled = kvm_is_forced_emulation_enabled(); 1428 1429 kvm_init_pmu_errata(); 1430 } 1431 1432 bool sys_clocksource_is_based_on_tsc(void) 1433 { 1434 char *clk_name = sys_get_cur_clocksource(); 1435 bool ret = !strcmp(clk_name, "tsc\n") || 1436 !strcmp(clk_name, "hyperv_clocksource_tsc_page\n"); 1437 1438 free(clk_name); 1439 1440 return ret; 1441 } 1442 1443 bool kvm_arch_has_default_irqchip(void) 1444 { 1445 return true; 1446 } 1447