1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2018, Google LLC.
4 */
5
6 #include "linux/bitmap.h"
7 #include "test_util.h"
8 #include "kvm_util.h"
9 #include "pmu.h"
10 #include "processor.h"
11 #include "smm.h"
12 #include "svm_util.h"
13 #include "sev.h"
14 #include "vmx.h"
15
16 #ifndef NUM_INTERRUPTS
17 #define NUM_INTERRUPTS 256
18 #endif
19
20 #define KERNEL_CS 0x8
21 #define KERNEL_DS 0x10
22 #define KERNEL_TSS 0x18
23
24 vm_vaddr_t exception_handlers;
25 bool host_cpu_is_amd;
26 bool host_cpu_is_intel;
27 bool host_cpu_is_hygon;
28 bool host_cpu_is_amd_compatible;
29 bool is_forced_emulation_enabled;
30 uint64_t guest_tsc_khz;
31
ex_str(int vector)32 const char *ex_str(int vector)
33 {
34 switch (vector) {
35 #define VEC_STR(v) case v##_VECTOR: return "#" #v
36 case DE_VECTOR: return "no exception";
37 case KVM_MAGIC_DE_VECTOR: return "#DE";
38 VEC_STR(DB);
39 VEC_STR(NMI);
40 VEC_STR(BP);
41 VEC_STR(OF);
42 VEC_STR(BR);
43 VEC_STR(UD);
44 VEC_STR(NM);
45 VEC_STR(DF);
46 VEC_STR(TS);
47 VEC_STR(NP);
48 VEC_STR(SS);
49 VEC_STR(GP);
50 VEC_STR(PF);
51 VEC_STR(MF);
52 VEC_STR(AC);
53 VEC_STR(MC);
54 VEC_STR(XM);
55 VEC_STR(VE);
56 VEC_STR(CP);
57 VEC_STR(HV);
58 VEC_STR(VC);
59 VEC_STR(SX);
60 default: return "#??";
61 #undef VEC_STR
62 }
63 }
64
regs_dump(FILE * stream,struct kvm_regs * regs,uint8_t indent)65 static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
66 {
67 fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
68 "rcx: 0x%.16llx rdx: 0x%.16llx\n",
69 indent, "",
70 regs->rax, regs->rbx, regs->rcx, regs->rdx);
71 fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx "
72 "rsp: 0x%.16llx rbp: 0x%.16llx\n",
73 indent, "",
74 regs->rsi, regs->rdi, regs->rsp, regs->rbp);
75 fprintf(stream, "%*sr8: 0x%.16llx r9: 0x%.16llx "
76 "r10: 0x%.16llx r11: 0x%.16llx\n",
77 indent, "",
78 regs->r8, regs->r9, regs->r10, regs->r11);
79 fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx "
80 "r14: 0x%.16llx r15: 0x%.16llx\n",
81 indent, "",
82 regs->r12, regs->r13, regs->r14, regs->r15);
83 fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n",
84 indent, "",
85 regs->rip, regs->rflags);
86 }
87
segment_dump(FILE * stream,struct kvm_segment * segment,uint8_t indent)88 static void segment_dump(FILE *stream, struct kvm_segment *segment,
89 uint8_t indent)
90 {
91 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
92 "selector: 0x%.4x type: 0x%.2x\n",
93 indent, "", segment->base, segment->limit,
94 segment->selector, segment->type);
95 fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x "
96 "db: 0x%.2x s: 0x%.2x l: 0x%.2x\n",
97 indent, "", segment->present, segment->dpl,
98 segment->db, segment->s, segment->l);
99 fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x "
100 "unusable: 0x%.2x padding: 0x%.2x\n",
101 indent, "", segment->g, segment->avl,
102 segment->unusable, segment->padding);
103 }
104
dtable_dump(FILE * stream,struct kvm_dtable * dtable,uint8_t indent)105 static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
106 uint8_t indent)
107 {
108 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
109 "padding: 0x%.4x 0x%.4x 0x%.4x\n",
110 indent, "", dtable->base, dtable->limit,
111 dtable->padding[0], dtable->padding[1], dtable->padding[2]);
112 }
113
sregs_dump(FILE * stream,struct kvm_sregs * sregs,uint8_t indent)114 static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent)
115 {
116 unsigned int i;
117
118 fprintf(stream, "%*scs:\n", indent, "");
119 segment_dump(stream, &sregs->cs, indent + 2);
120 fprintf(stream, "%*sds:\n", indent, "");
121 segment_dump(stream, &sregs->ds, indent + 2);
122 fprintf(stream, "%*ses:\n", indent, "");
123 segment_dump(stream, &sregs->es, indent + 2);
124 fprintf(stream, "%*sfs:\n", indent, "");
125 segment_dump(stream, &sregs->fs, indent + 2);
126 fprintf(stream, "%*sgs:\n", indent, "");
127 segment_dump(stream, &sregs->gs, indent + 2);
128 fprintf(stream, "%*sss:\n", indent, "");
129 segment_dump(stream, &sregs->ss, indent + 2);
130 fprintf(stream, "%*str:\n", indent, "");
131 segment_dump(stream, &sregs->tr, indent + 2);
132 fprintf(stream, "%*sldt:\n", indent, "");
133 segment_dump(stream, &sregs->ldt, indent + 2);
134
135 fprintf(stream, "%*sgdt:\n", indent, "");
136 dtable_dump(stream, &sregs->gdt, indent + 2);
137 fprintf(stream, "%*sidt:\n", indent, "");
138 dtable_dump(stream, &sregs->idt, indent + 2);
139
140 fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx "
141 "cr3: 0x%.16llx cr4: 0x%.16llx\n",
142 indent, "",
143 sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4);
144 fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx "
145 "apic_base: 0x%.16llx\n",
146 indent, "",
147 sregs->cr8, sregs->efer, sregs->apic_base);
148
149 fprintf(stream, "%*sinterrupt_bitmap:\n", indent, "");
150 for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) {
151 fprintf(stream, "%*s%.16llx\n", indent + 2, "",
152 sregs->interrupt_bitmap[i]);
153 }
154 }
155
kvm_is_tdp_enabled(void)156 bool kvm_is_tdp_enabled(void)
157 {
158 if (host_cpu_is_intel)
159 return get_kvm_intel_param_bool("ept");
160 else
161 return get_kvm_amd_param_bool("npt");
162 }
163
virt_mmu_init(struct kvm_vm * vm,struct kvm_mmu * mmu,struct pte_masks * pte_masks)164 static void virt_mmu_init(struct kvm_vm *vm, struct kvm_mmu *mmu,
165 struct pte_masks *pte_masks)
166 {
167 /* If needed, create the top-level page table. */
168 if (!mmu->pgd_created) {
169 mmu->pgd = vm_alloc_page_table(vm);
170 mmu->pgd_created = true;
171 mmu->arch.pte_masks = *pte_masks;
172 }
173
174 TEST_ASSERT(mmu->pgtable_levels == 4 || mmu->pgtable_levels == 5,
175 "Selftests MMU only supports 4-level and 5-level paging, not %u-level paging",
176 mmu->pgtable_levels);
177 }
178
virt_arch_pgd_alloc(struct kvm_vm * vm)179 void virt_arch_pgd_alloc(struct kvm_vm *vm)
180 {
181 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
182 "Unknown or unsupported guest mode: 0x%x", vm->mode);
183
184 struct pte_masks pte_masks = (struct pte_masks){
185 .present = BIT_ULL(0),
186 .writable = BIT_ULL(1),
187 .user = BIT_ULL(2),
188 .accessed = BIT_ULL(5),
189 .dirty = BIT_ULL(6),
190 .huge = BIT_ULL(7),
191 .nx = BIT_ULL(63),
192 .executable = 0,
193 .c = vm->arch.c_bit,
194 .s = vm->arch.s_bit,
195 };
196
197 virt_mmu_init(vm, &vm->mmu, &pte_masks);
198 }
199
tdp_mmu_init(struct kvm_vm * vm,int pgtable_levels,struct pte_masks * pte_masks)200 void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels,
201 struct pte_masks *pte_masks)
202 {
203 TEST_ASSERT(!vm->stage2_mmu.pgtable_levels, "TDP MMU already initialized");
204
205 vm->stage2_mmu.pgtable_levels = pgtable_levels;
206 virt_mmu_init(vm, &vm->stage2_mmu, pte_masks);
207 }
208
virt_get_pte(struct kvm_vm * vm,struct kvm_mmu * mmu,uint64_t * parent_pte,uint64_t vaddr,int level)209 static void *virt_get_pte(struct kvm_vm *vm, struct kvm_mmu *mmu,
210 uint64_t *parent_pte, uint64_t vaddr, int level)
211 {
212 uint64_t pt_gpa = PTE_GET_PA(*parent_pte);
213 uint64_t *page_table = addr_gpa2hva(vm, pt_gpa);
214 int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
215
216 TEST_ASSERT((*parent_pte == mmu->pgd) || is_present_pte(mmu, parent_pte),
217 "Parent PTE (level %d) not PRESENT for gva: 0x%08lx",
218 level + 1, vaddr);
219
220 return &page_table[index];
221 }
222
virt_create_upper_pte(struct kvm_vm * vm,struct kvm_mmu * mmu,uint64_t * parent_pte,uint64_t vaddr,uint64_t paddr,int current_level,int target_level)223 static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
224 struct kvm_mmu *mmu,
225 uint64_t *parent_pte,
226 uint64_t vaddr,
227 uint64_t paddr,
228 int current_level,
229 int target_level)
230 {
231 uint64_t *pte = virt_get_pte(vm, mmu, parent_pte, vaddr, current_level);
232
233 paddr = vm_untag_gpa(vm, paddr);
234
235 if (!is_present_pte(mmu, pte)) {
236 *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
237 PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
238 PTE_ALWAYS_SET_MASK(mmu);
239 if (current_level == target_level)
240 *pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
241 else
242 *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
243 } else {
244 /*
245 * Entry already present. Assert that the caller doesn't want
246 * a hugepage at this level, and that there isn't a hugepage at
247 * this level.
248 */
249 TEST_ASSERT(current_level != target_level,
250 "Cannot create hugepage at level: %u, vaddr: 0x%lx",
251 current_level, vaddr);
252 TEST_ASSERT(!is_huge_pte(mmu, pte),
253 "Cannot create page table at level: %u, vaddr: 0x%lx",
254 current_level, vaddr);
255 }
256 return pte;
257 }
258
__virt_pg_map(struct kvm_vm * vm,struct kvm_mmu * mmu,uint64_t vaddr,uint64_t paddr,int level)259 void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
260 uint64_t paddr, int level)
261 {
262 const uint64_t pg_size = PG_LEVEL_SIZE(level);
263 uint64_t *pte = &mmu->pgd;
264 int current_level;
265
266 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
267 "Unknown or unsupported guest mode: 0x%x", vm->mode);
268
269 TEST_ASSERT((vaddr % pg_size) == 0,
270 "Virtual address not aligned,\n"
271 "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size);
272 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)),
273 "Invalid virtual address, vaddr: 0x%lx", vaddr);
274 TEST_ASSERT((paddr % pg_size) == 0,
275 "Physical address not aligned,\n"
276 " paddr: 0x%lx page size: 0x%lx", paddr, pg_size);
277 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
278 "Physical address beyond maximum supported,\n"
279 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
280 paddr, vm->max_gfn, vm->page_size);
281 TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr,
282 "Unexpected bits in paddr: %lx", paddr);
283
284 TEST_ASSERT(!PTE_EXECUTABLE_MASK(mmu) || !PTE_NX_MASK(mmu),
285 "X and NX bit masks cannot be used simultaneously");
286
287 /*
288 * Allocate upper level page tables, if not already present. Return
289 * early if a hugepage was created.
290 */
291 for (current_level = mmu->pgtable_levels;
292 current_level > PG_LEVEL_4K;
293 current_level--) {
294 pte = virt_create_upper_pte(vm, mmu, pte, vaddr, paddr,
295 current_level, level);
296 if (is_huge_pte(mmu, pte))
297 return;
298 }
299
300 /* Fill in page table entry. */
301 pte = virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
302 TEST_ASSERT(!is_present_pte(mmu, pte),
303 "PTE already present for 4k page at vaddr: 0x%lx", vaddr);
304 *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
305 PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
306 PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
307
308 /*
309 * Neither SEV nor TDX supports shared page tables, so only the final
310 * leaf PTE needs manually set the C/S-bit.
311 */
312 if (vm_is_gpa_protected(vm, paddr))
313 *pte |= PTE_C_BIT_MASK(mmu);
314 else
315 *pte |= PTE_S_BIT_MASK(mmu);
316 }
317
virt_arch_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr)318 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
319 {
320 __virt_pg_map(vm, &vm->mmu, vaddr, paddr, PG_LEVEL_4K);
321 }
322
virt_map_level(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr,uint64_t nr_bytes,int level)323 void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
324 uint64_t nr_bytes, int level)
325 {
326 uint64_t pg_size = PG_LEVEL_SIZE(level);
327 uint64_t nr_pages = nr_bytes / pg_size;
328 int i;
329
330 TEST_ASSERT(nr_bytes % pg_size == 0,
331 "Region size not aligned: nr_bytes: 0x%lx, page size: 0x%lx",
332 nr_bytes, pg_size);
333
334 for (i = 0; i < nr_pages; i++) {
335 __virt_pg_map(vm, &vm->mmu, vaddr, paddr, level);
336 sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift,
337 nr_bytes / PAGE_SIZE);
338
339 vaddr += pg_size;
340 paddr += pg_size;
341 }
342 }
343
vm_is_target_pte(struct kvm_mmu * mmu,uint64_t * pte,int * level,int current_level)344 static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte,
345 int *level, int current_level)
346 {
347 if (is_huge_pte(mmu, pte)) {
348 TEST_ASSERT(*level == PG_LEVEL_NONE ||
349 *level == current_level,
350 "Unexpected hugepage at level %d", current_level);
351 *level = current_level;
352 }
353
354 return *level == current_level;
355 }
356
__vm_get_page_table_entry(struct kvm_vm * vm,struct kvm_mmu * mmu,uint64_t vaddr,int * level)357 static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm,
358 struct kvm_mmu *mmu,
359 uint64_t vaddr,
360 int *level)
361 {
362 int va_width = 12 + (mmu->pgtable_levels) * 9;
363 uint64_t *pte = &mmu->pgd;
364 int current_level;
365
366 TEST_ASSERT(!vm->arch.is_pt_protected,
367 "Walking page tables of protected guests is impossible");
368
369 TEST_ASSERT(*level >= PG_LEVEL_NONE && *level <= mmu->pgtable_levels,
370 "Invalid PG_LEVEL_* '%d'", *level);
371
372 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
373 "Unknown or unsupported guest mode: 0x%x", vm->mode);
374 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
375 (vaddr >> vm->page_shift)),
376 "Invalid virtual address, vaddr: 0x%lx",
377 vaddr);
378 /*
379 * Check that the vaddr is a sign-extended va_width value.
380 */
381 TEST_ASSERT(vaddr ==
382 (((int64_t)vaddr << (64 - va_width) >> (64 - va_width))),
383 "Canonical check failed. The virtual address is invalid.");
384
385 for (current_level = mmu->pgtable_levels;
386 current_level > PG_LEVEL_4K;
387 current_level--) {
388 pte = virt_get_pte(vm, mmu, pte, vaddr, current_level);
389 if (vm_is_target_pte(mmu, pte, level, current_level))
390 return pte;
391 }
392
393 return virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
394 }
395
tdp_get_pte(struct kvm_vm * vm,uint64_t l2_gpa)396 uint64_t *tdp_get_pte(struct kvm_vm *vm, uint64_t l2_gpa)
397 {
398 int level = PG_LEVEL_4K;
399
400 return __vm_get_page_table_entry(vm, &vm->stage2_mmu, l2_gpa, &level);
401 }
402
vm_get_pte(struct kvm_vm * vm,uint64_t vaddr)403 uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr)
404 {
405 int level = PG_LEVEL_4K;
406
407 return __vm_get_page_table_entry(vm, &vm->mmu, vaddr, &level);
408 }
409
virt_arch_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)410 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
411 {
412 struct kvm_mmu *mmu = &vm->mmu;
413 uint64_t *pml4e, *pml4e_start;
414 uint64_t *pdpe, *pdpe_start;
415 uint64_t *pde, *pde_start;
416 uint64_t *pte, *pte_start;
417
418 if (!mmu->pgd_created)
419 return;
420
421 fprintf(stream, "%*s "
422 " no\n", indent, "");
423 fprintf(stream, "%*s index hvaddr gpaddr "
424 "addr w exec dirty\n",
425 indent, "");
426 pml4e_start = (uint64_t *) addr_gpa2hva(vm, mmu->pgd);
427 for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
428 pml4e = &pml4e_start[n1];
429 if (!is_present_pte(mmu, pml4e))
430 continue;
431 fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u "
432 " %u\n",
433 indent, "",
434 pml4e - pml4e_start, pml4e,
435 addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e),
436 is_writable_pte(mmu, pml4e), is_nx_pte(mmu, pml4e));
437
438 pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK);
439 for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
440 pdpe = &pdpe_start[n2];
441 if (!is_present_pte(mmu, pdpe))
442 continue;
443 fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10llx "
444 "%u %u\n",
445 indent, "",
446 pdpe - pdpe_start, pdpe,
447 addr_hva2gpa(vm, pdpe),
448 PTE_GET_PFN(*pdpe), is_writable_pte(mmu, pdpe),
449 is_nx_pte(mmu, pdpe));
450
451 pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK);
452 for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
453 pde = &pde_start[n3];
454 if (!is_present_pte(mmu, pde))
455 continue;
456 fprintf(stream, "%*spde 0x%-3zx %p "
457 "0x%-12lx 0x%-10llx %u %u\n",
458 indent, "", pde - pde_start, pde,
459 addr_hva2gpa(vm, pde),
460 PTE_GET_PFN(*pde), is_writable_pte(mmu, pde),
461 is_nx_pte(mmu, pde));
462
463 pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK);
464 for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
465 pte = &pte_start[n4];
466 if (!is_present_pte(mmu, pte))
467 continue;
468 fprintf(stream, "%*spte 0x%-3zx %p "
469 "0x%-12lx 0x%-10llx %u %u "
470 " %u 0x%-10lx\n",
471 indent, "",
472 pte - pte_start, pte,
473 addr_hva2gpa(vm, pte),
474 PTE_GET_PFN(*pte),
475 is_writable_pte(mmu, pte),
476 is_nx_pte(mmu, pte),
477 is_dirty_pte(mmu, pte),
478 ((uint64_t) n1 << 27)
479 | ((uint64_t) n2 << 18)
480 | ((uint64_t) n3 << 9)
481 | ((uint64_t) n4));
482 }
483 }
484 }
485 }
486 }
487
vm_enable_tdp(struct kvm_vm * vm)488 void vm_enable_tdp(struct kvm_vm *vm)
489 {
490 if (kvm_cpu_has(X86_FEATURE_VMX))
491 vm_enable_ept(vm);
492 else
493 vm_enable_npt(vm);
494 }
495
kvm_cpu_has_tdp(void)496 bool kvm_cpu_has_tdp(void)
497 {
498 return kvm_cpu_has_ept() || kvm_cpu_has_npt();
499 }
500
__tdp_map(struct kvm_vm * vm,uint64_t nested_paddr,uint64_t paddr,uint64_t size,int level)501 void __tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
502 uint64_t size, int level)
503 {
504 size_t page_size = PG_LEVEL_SIZE(level);
505 size_t npages = size / page_size;
506
507 TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
508 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
509
510 while (npages--) {
511 __virt_pg_map(vm, &vm->stage2_mmu, nested_paddr, paddr, level);
512 nested_paddr += page_size;
513 paddr += page_size;
514 }
515 }
516
tdp_map(struct kvm_vm * vm,uint64_t nested_paddr,uint64_t paddr,uint64_t size)517 void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
518 uint64_t size)
519 {
520 __tdp_map(vm, nested_paddr, paddr, size, PG_LEVEL_4K);
521 }
522
523 /* Prepare an identity extended page table that maps all the
524 * physical pages in VM.
525 */
tdp_identity_map_default_memslots(struct kvm_vm * vm)526 void tdp_identity_map_default_memslots(struct kvm_vm *vm)
527 {
528 uint32_t s, memslot = 0;
529 sparsebit_idx_t i, last;
530 struct userspace_mem_region *region = memslot2region(vm, memslot);
531
532 /* Only memslot 0 is mapped here, ensure it's the only one being used */
533 for (s = 0; s < NR_MEM_REGIONS; s++)
534 TEST_ASSERT_EQ(vm->memslots[s], 0);
535
536 i = (region->region.guest_phys_addr >> vm->page_shift) - 1;
537 last = i + (region->region.memory_size >> vm->page_shift);
538 for (;;) {
539 i = sparsebit_next_clear(region->unused_phy_pages, i);
540 if (i > last)
541 break;
542
543 tdp_map(vm, (uint64_t)i << vm->page_shift,
544 (uint64_t)i << vm->page_shift, 1 << vm->page_shift);
545 }
546 }
547
548 /* Identity map a region with 1GiB Pages. */
tdp_identity_map_1g(struct kvm_vm * vm,uint64_t addr,uint64_t size)549 void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size)
550 {
551 __tdp_map(vm, addr, addr, size, PG_LEVEL_1G);
552 }
553
554 /*
555 * Set Unusable Segment
556 *
557 * Input Args: None
558 *
559 * Output Args:
560 * segp - Pointer to segment register
561 *
562 * Return: None
563 *
564 * Sets the segment register pointed to by @segp to an unusable state.
565 */
kvm_seg_set_unusable(struct kvm_segment * segp)566 static void kvm_seg_set_unusable(struct kvm_segment *segp)
567 {
568 memset(segp, 0, sizeof(*segp));
569 segp->unusable = true;
570 }
571
kvm_seg_fill_gdt_64bit(struct kvm_vm * vm,struct kvm_segment * segp)572 static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
573 {
574 void *gdt = addr_gva2hva(vm, vm->arch.gdt);
575 struct desc64 *desc = gdt + (segp->selector >> 3) * 8;
576
577 desc->limit0 = segp->limit & 0xFFFF;
578 desc->base0 = segp->base & 0xFFFF;
579 desc->base1 = segp->base >> 16;
580 desc->type = segp->type;
581 desc->s = segp->s;
582 desc->dpl = segp->dpl;
583 desc->p = segp->present;
584 desc->limit1 = segp->limit >> 16;
585 desc->avl = segp->avl;
586 desc->l = segp->l;
587 desc->db = segp->db;
588 desc->g = segp->g;
589 desc->base2 = segp->base >> 24;
590 if (!segp->s)
591 desc->base3 = segp->base >> 32;
592 }
593
kvm_seg_set_kernel_code_64bit(struct kvm_segment * segp)594 static void kvm_seg_set_kernel_code_64bit(struct kvm_segment *segp)
595 {
596 memset(segp, 0, sizeof(*segp));
597 segp->selector = KERNEL_CS;
598 segp->limit = 0xFFFFFFFFu;
599 segp->s = 0x1; /* kTypeCodeData */
600 segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed
601 * | kFlagCodeReadable
602 */
603 segp->g = true;
604 segp->l = true;
605 segp->present = 1;
606 }
607
kvm_seg_set_kernel_data_64bit(struct kvm_segment * segp)608 static void kvm_seg_set_kernel_data_64bit(struct kvm_segment *segp)
609 {
610 memset(segp, 0, sizeof(*segp));
611 segp->selector = KERNEL_DS;
612 segp->limit = 0xFFFFFFFFu;
613 segp->s = 0x1; /* kTypeCodeData */
614 segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed
615 * | kFlagDataWritable
616 */
617 segp->g = true;
618 segp->present = true;
619 }
620
addr_arch_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva)621 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
622 {
623 int level = PG_LEVEL_NONE;
624 uint64_t *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
625
626 TEST_ASSERT(is_present_pte(&vm->mmu, pte),
627 "Leaf PTE not PRESENT for gva: 0x%08lx", gva);
628
629 /*
630 * No need for a hugepage mask on the PTE, x86-64 requires the "unused"
631 * address bits to be zero.
632 */
633 return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level));
634 }
635
kvm_seg_set_tss_64bit(vm_vaddr_t base,struct kvm_segment * segp)636 static void kvm_seg_set_tss_64bit(vm_vaddr_t base, struct kvm_segment *segp)
637 {
638 memset(segp, 0, sizeof(*segp));
639 segp->base = base;
640 segp->limit = 0x67;
641 segp->selector = KERNEL_TSS;
642 segp->type = 0xb;
643 segp->present = 1;
644 }
645
vcpu_init_sregs(struct kvm_vm * vm,struct kvm_vcpu * vcpu)646 static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
647 {
648 struct kvm_sregs sregs;
649
650 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
651 "Unknown or unsupported guest mode: 0x%x", vm->mode);
652
653 /* Set mode specific system register values. */
654 vcpu_sregs_get(vcpu, &sregs);
655
656 sregs.idt.base = vm->arch.idt;
657 sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1;
658 sregs.gdt.base = vm->arch.gdt;
659 sregs.gdt.limit = getpagesize() - 1;
660
661 sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
662 sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
663 if (kvm_cpu_has(X86_FEATURE_XSAVE))
664 sregs.cr4 |= X86_CR4_OSXSAVE;
665 if (vm->mmu.pgtable_levels == 5)
666 sregs.cr4 |= X86_CR4_LA57;
667 sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
668
669 kvm_seg_set_unusable(&sregs.ldt);
670 kvm_seg_set_kernel_code_64bit(&sregs.cs);
671 kvm_seg_set_kernel_data_64bit(&sregs.ds);
672 kvm_seg_set_kernel_data_64bit(&sregs.es);
673 kvm_seg_set_kernel_data_64bit(&sregs.gs);
674 kvm_seg_set_tss_64bit(vm->arch.tss, &sregs.tr);
675
676 sregs.cr3 = vm->mmu.pgd;
677 vcpu_sregs_set(vcpu, &sregs);
678 }
679
vcpu_init_xcrs(struct kvm_vm * vm,struct kvm_vcpu * vcpu)680 static void vcpu_init_xcrs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
681 {
682 struct kvm_xcrs xcrs = {
683 .nr_xcrs = 1,
684 .xcrs[0].xcr = 0,
685 .xcrs[0].value = kvm_cpu_supported_xcr0(),
686 };
687
688 if (!kvm_cpu_has(X86_FEATURE_XSAVE))
689 return;
690
691 vcpu_xcrs_set(vcpu, &xcrs);
692 }
693
set_idt_entry(struct kvm_vm * vm,int vector,unsigned long addr,int dpl,unsigned short selector)694 static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
695 int dpl, unsigned short selector)
696 {
697 struct idt_entry *base =
698 (struct idt_entry *)addr_gva2hva(vm, vm->arch.idt);
699 struct idt_entry *e = &base[vector];
700
701 memset(e, 0, sizeof(*e));
702 e->offset0 = addr;
703 e->selector = selector;
704 e->ist = 0;
705 e->type = 14;
706 e->dpl = dpl;
707 e->p = 1;
708 e->offset1 = addr >> 16;
709 e->offset2 = addr >> 32;
710 }
711
kvm_fixup_exception(struct ex_regs * regs)712 static bool kvm_fixup_exception(struct ex_regs *regs)
713 {
714 if (regs->r9 != KVM_EXCEPTION_MAGIC || regs->rip != regs->r10)
715 return false;
716
717 if (regs->vector == DE_VECTOR)
718 regs->vector = KVM_MAGIC_DE_VECTOR;
719
720 regs->rip = regs->r11;
721 regs->r9 = regs->vector;
722 regs->r10 = regs->error_code;
723 return true;
724 }
725
route_exception(struct ex_regs * regs)726 void route_exception(struct ex_regs *regs)
727 {
728 typedef void(*handler)(struct ex_regs *);
729 handler *handlers = (handler *)exception_handlers;
730
731 if (handlers && handlers[regs->vector]) {
732 handlers[regs->vector](regs);
733 return;
734 }
735
736 if (kvm_fixup_exception(regs))
737 return;
738
739 GUEST_FAIL("Unhandled exception '0x%lx' at guest RIP '0x%lx'",
740 regs->vector, regs->rip);
741 }
742
vm_init_descriptor_tables(struct kvm_vm * vm)743 static void vm_init_descriptor_tables(struct kvm_vm *vm)
744 {
745 extern void *idt_handlers;
746 struct kvm_segment seg;
747 int i;
748
749 vm->arch.gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
750 vm->arch.idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
751 vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
752 vm->arch.tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
753
754 /* Handlers have the same address in both address spaces.*/
755 for (i = 0; i < NUM_INTERRUPTS; i++)
756 set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, KERNEL_CS);
757
758 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
759
760 kvm_seg_set_kernel_code_64bit(&seg);
761 kvm_seg_fill_gdt_64bit(vm, &seg);
762
763 kvm_seg_set_kernel_data_64bit(&seg);
764 kvm_seg_fill_gdt_64bit(vm, &seg);
765
766 kvm_seg_set_tss_64bit(vm->arch.tss, &seg);
767 kvm_seg_fill_gdt_64bit(vm, &seg);
768 }
769
vm_install_exception_handler(struct kvm_vm * vm,int vector,void (* handler)(struct ex_regs *))770 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
771 void (*handler)(struct ex_regs *))
772 {
773 vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers);
774
775 handlers[vector] = (vm_vaddr_t)handler;
776 }
777
assert_on_unhandled_exception(struct kvm_vcpu * vcpu)778 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
779 {
780 struct ucall uc;
781
782 if (get_ucall(vcpu, &uc) == UCALL_ABORT)
783 REPORT_GUEST_ASSERT(uc);
784 }
785
kvm_arch_vm_post_create(struct kvm_vm * vm,unsigned int nr_vcpus)786 void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus)
787 {
788 int r;
789
790 TEST_ASSERT(kvm_has_cap(KVM_CAP_GET_TSC_KHZ),
791 "Require KVM_GET_TSC_KHZ to provide udelay() to guest.");
792
793 vm_create_irqchip(vm);
794 vm_init_descriptor_tables(vm);
795
796 sync_global_to_guest(vm, host_cpu_is_intel);
797 sync_global_to_guest(vm, host_cpu_is_amd);
798 sync_global_to_guest(vm, host_cpu_is_hygon);
799 sync_global_to_guest(vm, host_cpu_is_amd_compatible);
800 sync_global_to_guest(vm, is_forced_emulation_enabled);
801 sync_global_to_guest(vm, pmu_errata_mask);
802
803 if (is_sev_vm(vm)) {
804 struct kvm_sev_init init = { 0 };
805
806 vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);
807 }
808
809 r = __vm_ioctl(vm, KVM_GET_TSC_KHZ, NULL);
810 TEST_ASSERT(r > 0, "KVM_GET_TSC_KHZ did not provide a valid TSC frequency.");
811 guest_tsc_khz = r;
812 sync_global_to_guest(vm, guest_tsc_khz);
813 }
814
vcpu_arch_set_entry_point(struct kvm_vcpu * vcpu,void * guest_code)815 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
816 {
817 struct kvm_regs regs;
818
819 vcpu_regs_get(vcpu, ®s);
820 regs.rip = (unsigned long) guest_code;
821 vcpu_regs_set(vcpu, ®s);
822 }
823
vm_arch_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id)824 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
825 {
826 struct kvm_mp_state mp_state;
827 struct kvm_regs regs;
828 vm_vaddr_t stack_vaddr;
829 struct kvm_vcpu *vcpu;
830
831 stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
832 DEFAULT_GUEST_STACK_VADDR_MIN,
833 MEM_REGION_DATA);
834
835 stack_vaddr += DEFAULT_STACK_PGS * getpagesize();
836
837 /*
838 * Align stack to match calling sequence requirements in section "The
839 * Stack Frame" of the System V ABI AMD64 Architecture Processor
840 * Supplement, which requires the value (%rsp + 8) to be a multiple of
841 * 16 when control is transferred to the function entry point.
842 *
843 * If this code is ever used to launch a vCPU with 32-bit entry point it
844 * may need to subtract 4 bytes instead of 8 bytes.
845 */
846 TEST_ASSERT(IS_ALIGNED(stack_vaddr, PAGE_SIZE),
847 "__vm_vaddr_alloc() did not provide a page-aligned address");
848 stack_vaddr -= 8;
849
850 vcpu = __vm_vcpu_add(vm, vcpu_id);
851 vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
852 vcpu_init_sregs(vm, vcpu);
853 vcpu_init_xcrs(vm, vcpu);
854
855 /* Setup guest general purpose registers */
856 vcpu_regs_get(vcpu, ®s);
857 regs.rflags = regs.rflags | 0x2;
858 regs.rsp = stack_vaddr;
859 vcpu_regs_set(vcpu, ®s);
860
861 /* Setup the MP state */
862 mp_state.mp_state = 0;
863 vcpu_mp_state_set(vcpu, &mp_state);
864
865 /*
866 * Refresh CPUID after setting SREGS and XCR0, so that KVM's "runtime"
867 * updates to guest CPUID, e.g. for OSXSAVE and XSAVE state size, are
868 * reflected into selftests' vCPU CPUID cache, i.e. so that the cache
869 * is consistent with vCPU state.
870 */
871 vcpu_get_cpuid(vcpu);
872 return vcpu;
873 }
874
vm_arch_vcpu_recreate(struct kvm_vm * vm,uint32_t vcpu_id)875 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id)
876 {
877 struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
878
879 vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
880
881 return vcpu;
882 }
883
vcpu_arch_free(struct kvm_vcpu * vcpu)884 void vcpu_arch_free(struct kvm_vcpu *vcpu)
885 {
886 if (vcpu->cpuid)
887 free(vcpu->cpuid);
888 }
889
890 /* Do not use kvm_supported_cpuid directly except for validity checks. */
891 static void *kvm_supported_cpuid;
892
kvm_get_supported_cpuid(void)893 const struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
894 {
895 int kvm_fd;
896
897 if (kvm_supported_cpuid)
898 return kvm_supported_cpuid;
899
900 kvm_supported_cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
901 kvm_fd = open_kvm_dev_path_or_exit();
902
903 kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID,
904 (struct kvm_cpuid2 *)kvm_supported_cpuid);
905
906 close(kvm_fd);
907 return kvm_supported_cpuid;
908 }
909
__kvm_cpu_has(const struct kvm_cpuid2 * cpuid,uint32_t function,uint32_t index,uint8_t reg,uint8_t lo,uint8_t hi)910 static uint32_t __kvm_cpu_has(const struct kvm_cpuid2 *cpuid,
911 uint32_t function, uint32_t index,
912 uint8_t reg, uint8_t lo, uint8_t hi)
913 {
914 const struct kvm_cpuid_entry2 *entry;
915 int i;
916
917 for (i = 0; i < cpuid->nent; i++) {
918 entry = &cpuid->entries[i];
919
920 /*
921 * The output registers in kvm_cpuid_entry2 are in alphabetical
922 * order, but kvm_x86_cpu_feature matches that mess, so yay
923 * pointer shenanigans!
924 */
925 if (entry->function == function && entry->index == index)
926 return ((&entry->eax)[reg] & GENMASK(hi, lo)) >> lo;
927 }
928
929 return 0;
930 }
931
kvm_cpuid_has(const struct kvm_cpuid2 * cpuid,struct kvm_x86_cpu_feature feature)932 bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
933 struct kvm_x86_cpu_feature feature)
934 {
935 return __kvm_cpu_has(cpuid, feature.function, feature.index,
936 feature.reg, feature.bit, feature.bit);
937 }
938
kvm_cpuid_property(const struct kvm_cpuid2 * cpuid,struct kvm_x86_cpu_property property)939 uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
940 struct kvm_x86_cpu_property property)
941 {
942 return __kvm_cpu_has(cpuid, property.function, property.index,
943 property.reg, property.lo_bit, property.hi_bit);
944 }
945
kvm_get_feature_msr(uint64_t msr_index)946 uint64_t kvm_get_feature_msr(uint64_t msr_index)
947 {
948 struct {
949 struct kvm_msrs header;
950 struct kvm_msr_entry entry;
951 } buffer = {};
952 int r, kvm_fd;
953
954 buffer.header.nmsrs = 1;
955 buffer.entry.index = msr_index;
956 kvm_fd = open_kvm_dev_path_or_exit();
957
958 r = __kvm_ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header);
959 TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_GET_MSRS, r));
960
961 close(kvm_fd);
962 return buffer.entry.data;
963 }
964
__vm_xsave_require_permission(uint64_t xfeature,const char * name)965 void __vm_xsave_require_permission(uint64_t xfeature, const char *name)
966 {
967 int kvm_fd;
968 u64 bitmask;
969 long rc;
970 struct kvm_device_attr attr = {
971 .group = 0,
972 .attr = KVM_X86_XCOMP_GUEST_SUPP,
973 .addr = (unsigned long) &bitmask,
974 };
975
976 TEST_ASSERT(!kvm_supported_cpuid,
977 "kvm_get_supported_cpuid() cannot be used before ARCH_REQ_XCOMP_GUEST_PERM");
978
979 TEST_ASSERT(is_power_of_2(xfeature),
980 "Dynamic XFeatures must be enabled one at a time");
981
982 kvm_fd = open_kvm_dev_path_or_exit();
983 rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr);
984 close(kvm_fd);
985
986 if (rc == -1 && (errno == ENXIO || errno == EINVAL))
987 __TEST_REQUIRE(0, "KVM_X86_XCOMP_GUEST_SUPP not supported");
988
989 TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc);
990
991 __TEST_REQUIRE(bitmask & xfeature,
992 "Required XSAVE feature '%s' not supported", name);
993
994 TEST_REQUIRE(!syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, ilog2(xfeature)));
995
996 rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask);
997 TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
998 TEST_ASSERT(bitmask & xfeature,
999 "'%s' (0x%lx) not permitted after prctl(ARCH_REQ_XCOMP_GUEST_PERM) permitted=0x%lx",
1000 name, xfeature, bitmask);
1001 }
1002
vcpu_init_cpuid(struct kvm_vcpu * vcpu,const struct kvm_cpuid2 * cpuid)1003 void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid)
1004 {
1005 TEST_ASSERT(cpuid != vcpu->cpuid, "@cpuid can't be the vCPU's CPUID");
1006
1007 /* Allow overriding the default CPUID. */
1008 if (vcpu->cpuid && vcpu->cpuid->nent < cpuid->nent) {
1009 free(vcpu->cpuid);
1010 vcpu->cpuid = NULL;
1011 }
1012
1013 if (!vcpu->cpuid)
1014 vcpu->cpuid = allocate_kvm_cpuid2(cpuid->nent);
1015
1016 memcpy(vcpu->cpuid, cpuid, kvm_cpuid2_size(cpuid->nent));
1017 vcpu_set_cpuid(vcpu);
1018 }
1019
vcpu_set_cpuid_property(struct kvm_vcpu * vcpu,struct kvm_x86_cpu_property property,uint32_t value)1020 void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
1021 struct kvm_x86_cpu_property property,
1022 uint32_t value)
1023 {
1024 struct kvm_cpuid_entry2 *entry;
1025
1026 entry = __vcpu_get_cpuid_entry(vcpu, property.function, property.index);
1027
1028 (&entry->eax)[property.reg] &= ~GENMASK(property.hi_bit, property.lo_bit);
1029 (&entry->eax)[property.reg] |= value << property.lo_bit;
1030
1031 vcpu_set_cpuid(vcpu);
1032
1033 /* Sanity check that @value doesn't exceed the bounds in any way. */
1034 TEST_ASSERT_EQ(kvm_cpuid_property(vcpu->cpuid, property), value);
1035 }
1036
vcpu_clear_cpuid_entry(struct kvm_vcpu * vcpu,uint32_t function)1037 void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function)
1038 {
1039 struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function);
1040
1041 entry->eax = 0;
1042 entry->ebx = 0;
1043 entry->ecx = 0;
1044 entry->edx = 0;
1045 vcpu_set_cpuid(vcpu);
1046 }
1047
vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu * vcpu,struct kvm_x86_cpu_feature feature,bool set)1048 void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
1049 struct kvm_x86_cpu_feature feature,
1050 bool set)
1051 {
1052 struct kvm_cpuid_entry2 *entry;
1053 u32 *reg;
1054
1055 entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index);
1056 reg = (&entry->eax) + feature.reg;
1057
1058 if (set)
1059 *reg |= BIT(feature.bit);
1060 else
1061 *reg &= ~BIT(feature.bit);
1062
1063 vcpu_set_cpuid(vcpu);
1064 }
1065
vcpu_get_msr(struct kvm_vcpu * vcpu,uint64_t msr_index)1066 uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index)
1067 {
1068 struct {
1069 struct kvm_msrs header;
1070 struct kvm_msr_entry entry;
1071 } buffer = {};
1072
1073 buffer.header.nmsrs = 1;
1074 buffer.entry.index = msr_index;
1075
1076 vcpu_msrs_get(vcpu, &buffer.header);
1077
1078 return buffer.entry.data;
1079 }
1080
_vcpu_set_msr(struct kvm_vcpu * vcpu,uint64_t msr_index,uint64_t msr_value)1081 int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value)
1082 {
1083 struct {
1084 struct kvm_msrs header;
1085 struct kvm_msr_entry entry;
1086 } buffer = {};
1087
1088 memset(&buffer, 0, sizeof(buffer));
1089 buffer.header.nmsrs = 1;
1090 buffer.entry.index = msr_index;
1091 buffer.entry.data = msr_value;
1092
1093 return __vcpu_ioctl(vcpu, KVM_SET_MSRS, &buffer.header);
1094 }
1095
vcpu_args_set(struct kvm_vcpu * vcpu,unsigned int num,...)1096 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
1097 {
1098 va_list ap;
1099 struct kvm_regs regs;
1100
1101 TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
1102 " num: %u",
1103 num);
1104
1105 va_start(ap, num);
1106 vcpu_regs_get(vcpu, ®s);
1107
1108 if (num >= 1)
1109 regs.rdi = va_arg(ap, uint64_t);
1110
1111 if (num >= 2)
1112 regs.rsi = va_arg(ap, uint64_t);
1113
1114 if (num >= 3)
1115 regs.rdx = va_arg(ap, uint64_t);
1116
1117 if (num >= 4)
1118 regs.rcx = va_arg(ap, uint64_t);
1119
1120 if (num >= 5)
1121 regs.r8 = va_arg(ap, uint64_t);
1122
1123 if (num >= 6)
1124 regs.r9 = va_arg(ap, uint64_t);
1125
1126 vcpu_regs_set(vcpu, ®s);
1127 va_end(ap);
1128 }
1129
vcpu_arch_dump(FILE * stream,struct kvm_vcpu * vcpu,uint8_t indent)1130 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
1131 {
1132 struct kvm_regs regs;
1133 struct kvm_sregs sregs;
1134
1135 fprintf(stream, "%*svCPU ID: %u\n", indent, "", vcpu->id);
1136
1137 fprintf(stream, "%*sregs:\n", indent + 2, "");
1138 vcpu_regs_get(vcpu, ®s);
1139 regs_dump(stream, ®s, indent + 4);
1140
1141 fprintf(stream, "%*ssregs:\n", indent + 2, "");
1142 vcpu_sregs_get(vcpu, &sregs);
1143 sregs_dump(stream, &sregs, indent + 4);
1144 }
1145
__kvm_get_msr_index_list(bool feature_msrs)1146 static struct kvm_msr_list *__kvm_get_msr_index_list(bool feature_msrs)
1147 {
1148 struct kvm_msr_list *list;
1149 struct kvm_msr_list nmsrs;
1150 int kvm_fd, r;
1151
1152 kvm_fd = open_kvm_dev_path_or_exit();
1153
1154 nmsrs.nmsrs = 0;
1155 if (!feature_msrs)
1156 r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
1157 else
1158 r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, &nmsrs);
1159
1160 TEST_ASSERT(r == -1 && errno == E2BIG,
1161 "Expected -E2BIG, got rc: %i errno: %i (%s)",
1162 r, errno, strerror(errno));
1163
1164 list = malloc(sizeof(*list) + nmsrs.nmsrs * sizeof(list->indices[0]));
1165 TEST_ASSERT(list, "-ENOMEM when allocating MSR index list");
1166 list->nmsrs = nmsrs.nmsrs;
1167
1168 if (!feature_msrs)
1169 kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
1170 else
1171 kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list);
1172 close(kvm_fd);
1173
1174 TEST_ASSERT(list->nmsrs == nmsrs.nmsrs,
1175 "Number of MSRs in list changed, was %d, now %d",
1176 nmsrs.nmsrs, list->nmsrs);
1177 return list;
1178 }
1179
kvm_get_msr_index_list(void)1180 const struct kvm_msr_list *kvm_get_msr_index_list(void)
1181 {
1182 static const struct kvm_msr_list *list;
1183
1184 if (!list)
1185 list = __kvm_get_msr_index_list(false);
1186 return list;
1187 }
1188
1189
kvm_get_feature_msr_index_list(void)1190 const struct kvm_msr_list *kvm_get_feature_msr_index_list(void)
1191 {
1192 static const struct kvm_msr_list *list;
1193
1194 if (!list)
1195 list = __kvm_get_msr_index_list(true);
1196 return list;
1197 }
1198
kvm_msr_is_in_save_restore_list(uint32_t msr_index)1199 bool kvm_msr_is_in_save_restore_list(uint32_t msr_index)
1200 {
1201 const struct kvm_msr_list *list = kvm_get_msr_index_list();
1202 int i;
1203
1204 for (i = 0; i < list->nmsrs; ++i) {
1205 if (list->indices[i] == msr_index)
1206 return true;
1207 }
1208
1209 return false;
1210 }
1211
vcpu_save_xsave_state(struct kvm_vcpu * vcpu,struct kvm_x86_state * state)1212 static void vcpu_save_xsave_state(struct kvm_vcpu *vcpu,
1213 struct kvm_x86_state *state)
1214 {
1215 int size = vm_check_cap(vcpu->vm, KVM_CAP_XSAVE2);
1216
1217 if (size) {
1218 state->xsave = malloc(size);
1219 vcpu_xsave2_get(vcpu, state->xsave);
1220 } else {
1221 state->xsave = malloc(sizeof(struct kvm_xsave));
1222 vcpu_xsave_get(vcpu, state->xsave);
1223 }
1224 }
1225
vcpu_save_state(struct kvm_vcpu * vcpu)1226 struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu)
1227 {
1228 const struct kvm_msr_list *msr_list = kvm_get_msr_index_list();
1229 struct kvm_x86_state *state;
1230 int i;
1231
1232 static int nested_size = -1;
1233
1234 if (nested_size == -1) {
1235 nested_size = kvm_check_cap(KVM_CAP_NESTED_STATE);
1236 TEST_ASSERT(nested_size <= sizeof(state->nested_),
1237 "Nested state size too big, %i > %zi",
1238 nested_size, sizeof(state->nested_));
1239 }
1240
1241 /*
1242 * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
1243 * guest state is consistent only after userspace re-enters the
1244 * kernel with KVM_RUN. Complete IO prior to migrating state
1245 * to a new VM.
1246 */
1247 vcpu_run_complete_io(vcpu);
1248
1249 state = malloc(sizeof(*state) + msr_list->nmsrs * sizeof(state->msrs.entries[0]));
1250 TEST_ASSERT(state, "-ENOMEM when allocating kvm state");
1251
1252 vcpu_events_get(vcpu, &state->events);
1253 vcpu_mp_state_get(vcpu, &state->mp_state);
1254 vcpu_regs_get(vcpu, &state->regs);
1255 vcpu_save_xsave_state(vcpu, state);
1256
1257 if (kvm_has_cap(KVM_CAP_XCRS))
1258 vcpu_xcrs_get(vcpu, &state->xcrs);
1259
1260 vcpu_sregs_get(vcpu, &state->sregs);
1261
1262 if (nested_size) {
1263 state->nested.size = sizeof(state->nested_);
1264
1265 vcpu_nested_state_get(vcpu, &state->nested);
1266 TEST_ASSERT(state->nested.size <= nested_size,
1267 "Nested state size too big, %i (KVM_CHECK_CAP gave %i)",
1268 state->nested.size, nested_size);
1269 } else {
1270 state->nested.size = 0;
1271 }
1272
1273 state->msrs.nmsrs = msr_list->nmsrs;
1274 for (i = 0; i < msr_list->nmsrs; i++)
1275 state->msrs.entries[i].index = msr_list->indices[i];
1276 vcpu_msrs_get(vcpu, &state->msrs);
1277
1278 vcpu_debugregs_get(vcpu, &state->debugregs);
1279
1280 return state;
1281 }
1282
vcpu_load_state(struct kvm_vcpu * vcpu,struct kvm_x86_state * state)1283 void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state)
1284 {
1285 vcpu_sregs_set(vcpu, &state->sregs);
1286 vcpu_msrs_set(vcpu, &state->msrs);
1287
1288 if (kvm_has_cap(KVM_CAP_XCRS))
1289 vcpu_xcrs_set(vcpu, &state->xcrs);
1290
1291 vcpu_xsave_set(vcpu, state->xsave);
1292 vcpu_events_set(vcpu, &state->events);
1293 vcpu_mp_state_set(vcpu, &state->mp_state);
1294 vcpu_debugregs_set(vcpu, &state->debugregs);
1295 vcpu_regs_set(vcpu, &state->regs);
1296
1297 if (state->nested.size)
1298 vcpu_nested_state_set(vcpu, &state->nested);
1299 }
1300
kvm_x86_state_cleanup(struct kvm_x86_state * state)1301 void kvm_x86_state_cleanup(struct kvm_x86_state *state)
1302 {
1303 free(state->xsave);
1304 free(state);
1305 }
1306
kvm_get_cpu_address_width(unsigned int * pa_bits,unsigned int * va_bits)1307 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
1308 {
1309 if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) {
1310 *pa_bits = kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32;
1311 *va_bits = 32;
1312 } else {
1313 *pa_bits = kvm_cpu_property(X86_PROPERTY_MAX_PHY_ADDR);
1314 *va_bits = kvm_cpu_property(X86_PROPERTY_MAX_VIRT_ADDR);
1315 }
1316 }
1317
kvm_init_vm_address_properties(struct kvm_vm * vm)1318 void kvm_init_vm_address_properties(struct kvm_vm *vm)
1319 {
1320 if (is_sev_vm(vm)) {
1321 vm->arch.sev_fd = open_sev_dev_path_or_exit();
1322 vm->arch.c_bit = BIT_ULL(this_cpu_property(X86_PROPERTY_SEV_C_BIT));
1323 vm->gpa_tag_mask = vm->arch.c_bit;
1324 } else {
1325 vm->arch.sev_fd = -1;
1326 }
1327 }
1328
get_cpuid_entry(const struct kvm_cpuid2 * cpuid,uint32_t function,uint32_t index)1329 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
1330 uint32_t function, uint32_t index)
1331 {
1332 int i;
1333
1334 for (i = 0; i < cpuid->nent; i++) {
1335 if (cpuid->entries[i].function == function &&
1336 cpuid->entries[i].index == index)
1337 return &cpuid->entries[i];
1338 }
1339
1340 TEST_FAIL("CPUID function 0x%x index 0x%x not found ", function, index);
1341
1342 return NULL;
1343 }
1344
1345 #define X86_HYPERCALL(inputs...) \
1346 ({ \
1347 uint64_t r; \
1348 \
1349 asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" \
1350 "jnz 1f\n\t" \
1351 "vmcall\n\t" \
1352 "jmp 2f\n\t" \
1353 "1: vmmcall\n\t" \
1354 "2:" \
1355 : "=a"(r) \
1356 : [use_vmmcall] "r" (host_cpu_is_amd_compatible), \
1357 inputs); \
1358 \
1359 r; \
1360 })
1361
kvm_hypercall(uint64_t nr,uint64_t a0,uint64_t a1,uint64_t a2,uint64_t a3)1362 uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
1363 uint64_t a3)
1364 {
1365 return X86_HYPERCALL("a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
1366 }
1367
__xen_hypercall(uint64_t nr,uint64_t a0,void * a1)1368 uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
1369 {
1370 return X86_HYPERCALL("a"(nr), "D"(a0), "S"(a1));
1371 }
1372
xen_hypercall(uint64_t nr,uint64_t a0,void * a1)1373 void xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
1374 {
1375 GUEST_ASSERT(!__xen_hypercall(nr, a0, a1));
1376 }
1377
vm_compute_max_gfn(struct kvm_vm * vm)1378 unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
1379 {
1380 const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
1381 unsigned long ht_gfn, max_gfn, max_pfn;
1382 uint8_t maxphyaddr, guest_maxphyaddr;
1383
1384 /*
1385 * Use "guest MAXPHYADDR" from KVM if it's available. Guest MAXPHYADDR
1386 * enumerates the max _mappable_ GPA, which can be less than the raw
1387 * MAXPHYADDR, e.g. if MAXPHYADDR=52, KVM is using TDP, and the CPU
1388 * doesn't support 5-level TDP.
1389 */
1390 guest_maxphyaddr = kvm_cpu_property(X86_PROPERTY_GUEST_MAX_PHY_ADDR);
1391 guest_maxphyaddr = guest_maxphyaddr ?: vm->pa_bits;
1392 TEST_ASSERT(guest_maxphyaddr <= vm->pa_bits,
1393 "Guest MAXPHYADDR should never be greater than raw MAXPHYADDR");
1394
1395 max_gfn = (1ULL << (guest_maxphyaddr - vm->page_shift)) - 1;
1396
1397 /* Avoid reserved HyperTransport region on AMD or Hygon processors. */
1398 if (!host_cpu_is_amd_compatible)
1399 return max_gfn;
1400
1401 /* On parts with <40 physical address bits, the area is fully hidden */
1402 if (vm->pa_bits < 40)
1403 return max_gfn;
1404
1405 /* Before family 17h, the HyperTransport area is just below 1T. */
1406 ht_gfn = (1 << 28) - num_ht_pages;
1407 if (this_cpu_family() < 0x17)
1408 goto done;
1409
1410 /*
1411 * Otherwise it's at the top of the physical address space, possibly
1412 * reduced due to SME or CSV by bits 11:6 of CPUID[0x8000001f].EBX. Use
1413 * the old conservative value if MAXPHYADDR is not enumerated.
1414 */
1415 if (!this_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR))
1416 goto done;
1417
1418 maxphyaddr = this_cpu_property(X86_PROPERTY_MAX_PHY_ADDR);
1419 max_pfn = (1ULL << (maxphyaddr - vm->page_shift)) - 1;
1420
1421 if (this_cpu_has_p(X86_PROPERTY_PHYS_ADDR_REDUCTION))
1422 max_pfn >>= this_cpu_property(X86_PROPERTY_PHYS_ADDR_REDUCTION);
1423
1424 ht_gfn = max_pfn - num_ht_pages;
1425 done:
1426 return min(max_gfn, ht_gfn - 1);
1427 }
1428
kvm_selftest_arch_init(void)1429 void kvm_selftest_arch_init(void)
1430 {
1431 host_cpu_is_intel = this_cpu_is_intel();
1432 host_cpu_is_amd = this_cpu_is_amd();
1433 host_cpu_is_hygon = this_cpu_is_hygon();
1434 host_cpu_is_amd_compatible = host_cpu_is_amd || host_cpu_is_hygon;
1435 is_forced_emulation_enabled = kvm_is_forced_emulation_enabled();
1436
1437 kvm_init_pmu_errata();
1438 }
1439
sys_clocksource_is_based_on_tsc(void)1440 bool sys_clocksource_is_based_on_tsc(void)
1441 {
1442 char *clk_name = sys_get_cur_clocksource();
1443 bool ret = !strcmp(clk_name, "tsc\n") ||
1444 !strcmp(clk_name, "hyperv_clocksource_tsc_page\n");
1445
1446 free(clk_name);
1447
1448 return ret;
1449 }
1450
kvm_arch_has_default_irqchip(void)1451 bool kvm_arch_has_default_irqchip(void)
1452 {
1453 return true;
1454 }
1455
setup_smram(struct kvm_vm * vm,struct kvm_vcpu * vcpu,uint64_t smram_gpa,const void * smi_handler,size_t handler_size)1456 void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
1457 uint64_t smram_gpa,
1458 const void *smi_handler, size_t handler_size)
1459 {
1460 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, smram_gpa,
1461 SMRAM_MEMSLOT, SMRAM_PAGES, 0);
1462 TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, smram_gpa,
1463 SMRAM_MEMSLOT) == smram_gpa,
1464 "Could not allocate guest physical addresses for SMRAM");
1465
1466 memset(addr_gpa2hva(vm, smram_gpa), 0x0, SMRAM_SIZE);
1467 memcpy(addr_gpa2hva(vm, smram_gpa) + 0x8000, smi_handler, handler_size);
1468 vcpu_set_msr(vcpu, MSR_IA32_SMBASE, smram_gpa);
1469 }
1470
inject_smi(struct kvm_vcpu * vcpu)1471 void inject_smi(struct kvm_vcpu *vcpu)
1472 {
1473 struct kvm_vcpu_events events;
1474
1475 vcpu_events_get(vcpu, &events);
1476 events.smi.pending = 1;
1477 events.flags |= KVM_VCPUEVENT_VALID_SMM;
1478 vcpu_events_set(vcpu, &events);
1479 }
1480