xref: /linux/tools/testing/selftests/kvm/lib/x86/processor.c (revision 014dfb7b9bf3ff49261b47fbe56b42fc8ed06fc5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2018, Google LLC.
4  */
5 
6 #include "linux/bitmap.h"
7 #include "test_util.h"
8 #include "kvm_util.h"
9 #include "pmu.h"
10 #include "processor.h"
11 #include "smm.h"
12 #include "svm_util.h"
13 #include "sev.h"
14 #include "vmx.h"
15 
16 #ifndef NUM_INTERRUPTS
17 #define NUM_INTERRUPTS 256
18 #endif
19 
20 #define KERNEL_CS	0x8
21 #define KERNEL_DS	0x10
22 #define KERNEL_TSS	0x18
23 
24 gva_t exception_handlers;
25 bool host_cpu_is_amd;
26 bool host_cpu_is_intel;
27 bool host_cpu_is_hygon;
28 bool host_cpu_is_amd_compatible;
29 bool is_forced_emulation_enabled;
30 u64 guest_tsc_khz;
31 
32 const char *ex_str(int vector)
33 {
34 	switch (vector) {
35 #define VEC_STR(v) case v##_VECTOR: return "#" #v
36 	case DE_VECTOR: return "no exception";
37 	case KVM_MAGIC_DE_VECTOR: return "#DE";
38 	VEC_STR(DB);
39 	VEC_STR(NMI);
40 	VEC_STR(BP);
41 	VEC_STR(OF);
42 	VEC_STR(BR);
43 	VEC_STR(UD);
44 	VEC_STR(NM);
45 	VEC_STR(DF);
46 	VEC_STR(TS);
47 	VEC_STR(NP);
48 	VEC_STR(SS);
49 	VEC_STR(GP);
50 	VEC_STR(PF);
51 	VEC_STR(MF);
52 	VEC_STR(AC);
53 	VEC_STR(MC);
54 	VEC_STR(XM);
55 	VEC_STR(VE);
56 	VEC_STR(CP);
57 	VEC_STR(HV);
58 	VEC_STR(VC);
59 	VEC_STR(SX);
60 	default: return "#??";
61 #undef VEC_STR
62 	}
63 }
64 
65 static void regs_dump(FILE *stream, struct kvm_regs *regs, u8 indent)
66 {
67 	fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
68 		"rcx: 0x%.16llx rdx: 0x%.16llx\n",
69 		indent, "",
70 		regs->rax, regs->rbx, regs->rcx, regs->rdx);
71 	fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx "
72 		"rsp: 0x%.16llx rbp: 0x%.16llx\n",
73 		indent, "",
74 		regs->rsi, regs->rdi, regs->rsp, regs->rbp);
75 	fprintf(stream, "%*sr8:  0x%.16llx r9:  0x%.16llx "
76 		"r10: 0x%.16llx r11: 0x%.16llx\n",
77 		indent, "",
78 		regs->r8, regs->r9, regs->r10, regs->r11);
79 	fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx "
80 		"r14: 0x%.16llx r15: 0x%.16llx\n",
81 		indent, "",
82 		regs->r12, regs->r13, regs->r14, regs->r15);
83 	fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n",
84 		indent, "",
85 		regs->rip, regs->rflags);
86 }
87 
88 static void segment_dump(FILE *stream, struct kvm_segment *segment,
89 			 u8 indent)
90 {
91 	fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
92 		"selector: 0x%.4x type: 0x%.2x\n",
93 		indent, "", segment->base, segment->limit,
94 		segment->selector, segment->type);
95 	fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x "
96 		"db: 0x%.2x s: 0x%.2x l: 0x%.2x\n",
97 		indent, "", segment->present, segment->dpl,
98 		segment->db, segment->s, segment->l);
99 	fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x "
100 		"unusable: 0x%.2x padding: 0x%.2x\n",
101 		indent, "", segment->g, segment->avl,
102 		segment->unusable, segment->padding);
103 }
104 
105 static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
106 			u8 indent)
107 {
108 	fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
109 		"padding: 0x%.4x 0x%.4x 0x%.4x\n",
110 		indent, "", dtable->base, dtable->limit,
111 		dtable->padding[0], dtable->padding[1], dtable->padding[2]);
112 }
113 
114 static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, u8 indent)
115 {
116 	unsigned int i;
117 
118 	fprintf(stream, "%*scs:\n", indent, "");
119 	segment_dump(stream, &sregs->cs, indent + 2);
120 	fprintf(stream, "%*sds:\n", indent, "");
121 	segment_dump(stream, &sregs->ds, indent + 2);
122 	fprintf(stream, "%*ses:\n", indent, "");
123 	segment_dump(stream, &sregs->es, indent + 2);
124 	fprintf(stream, "%*sfs:\n", indent, "");
125 	segment_dump(stream, &sregs->fs, indent + 2);
126 	fprintf(stream, "%*sgs:\n", indent, "");
127 	segment_dump(stream, &sregs->gs, indent + 2);
128 	fprintf(stream, "%*sss:\n", indent, "");
129 	segment_dump(stream, &sregs->ss, indent + 2);
130 	fprintf(stream, "%*str:\n", indent, "");
131 	segment_dump(stream, &sregs->tr, indent + 2);
132 	fprintf(stream, "%*sldt:\n", indent, "");
133 	segment_dump(stream, &sregs->ldt, indent + 2);
134 
135 	fprintf(stream, "%*sgdt:\n", indent, "");
136 	dtable_dump(stream, &sregs->gdt, indent + 2);
137 	fprintf(stream, "%*sidt:\n", indent, "");
138 	dtable_dump(stream, &sregs->idt, indent + 2);
139 
140 	fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx "
141 		"cr3: 0x%.16llx cr4: 0x%.16llx\n",
142 		indent, "",
143 		sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4);
144 	fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx "
145 		"apic_base: 0x%.16llx\n",
146 		indent, "",
147 		sregs->cr8, sregs->efer, sregs->apic_base);
148 
149 	fprintf(stream, "%*sinterrupt_bitmap:\n", indent, "");
150 	for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) {
151 		fprintf(stream, "%*s%.16llx\n", indent + 2, "",
152 			sregs->interrupt_bitmap[i]);
153 	}
154 }
155 
156 bool kvm_is_tdp_enabled(void)
157 {
158 	if (host_cpu_is_intel)
159 		return get_kvm_intel_param_bool("ept");
160 	else
161 		return get_kvm_amd_param_bool("npt");
162 }
163 
164 static void virt_mmu_init(struct kvm_vm *vm, struct kvm_mmu *mmu,
165 			  struct pte_masks *pte_masks)
166 {
167 	/* If needed, create the top-level page table. */
168 	if (!mmu->pgd_created) {
169 		mmu->pgd = vm_alloc_page_table(vm);
170 		mmu->pgd_created = true;
171 		mmu->arch.pte_masks = *pte_masks;
172 	}
173 
174 	TEST_ASSERT(mmu->pgtable_levels == 4 || mmu->pgtable_levels == 5,
175 		    "Selftests MMU only supports 4-level and 5-level paging, not %u-level paging",
176 		    mmu->pgtable_levels);
177 }
178 
179 void virt_arch_pgd_alloc(struct kvm_vm *vm)
180 {
181 	TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
182 		    "Unknown or unsupported guest mode: 0x%x", vm->mode);
183 
184 	struct pte_masks pte_masks = (struct pte_masks){
185 		.present	=	BIT_ULL(0),
186 		.writable	=	BIT_ULL(1),
187 		.user		=	BIT_ULL(2),
188 		.accessed	=	BIT_ULL(5),
189 		.dirty		=	BIT_ULL(6),
190 		.huge		=	BIT_ULL(7),
191 		.nx		=	BIT_ULL(63),
192 		.executable	=	0,
193 		.c		=	vm->arch.c_bit,
194 		.s		=	vm->arch.s_bit,
195 	};
196 
197 	virt_mmu_init(vm, &vm->mmu, &pte_masks);
198 }
199 
200 void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels,
201 		  struct pte_masks *pte_masks)
202 {
203 	TEST_ASSERT(!vm->stage2_mmu.pgtable_levels, "TDP MMU already initialized");
204 
205 	vm->stage2_mmu.pgtable_levels = pgtable_levels;
206 	virt_mmu_init(vm, &vm->stage2_mmu, pte_masks);
207 }
208 
209 static void *virt_get_pte(struct kvm_vm *vm, struct kvm_mmu *mmu,
210 			  u64 *parent_pte, gva_t gva, int level)
211 {
212 	u64 pt_gpa = PTE_GET_PA(*parent_pte);
213 	u64 *page_table = addr_gpa2hva(vm, pt_gpa);
214 	int index = (gva >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
215 
216 	TEST_ASSERT((*parent_pte == mmu->pgd) || is_present_pte(mmu, parent_pte),
217 		    "Parent PTE (level %d) not PRESENT for gva: 0x%08lx",
218 		    level + 1, gva);
219 
220 	return &page_table[index];
221 }
222 
223 static u64 *virt_create_upper_pte(struct kvm_vm *vm,
224 				  struct kvm_mmu *mmu,
225 				  u64 *parent_pte,
226 				  gva_t gva,
227 				  u64 paddr,
228 				  int current_level,
229 				  int target_level)
230 {
231 	u64 *pte = virt_get_pte(vm, mmu, parent_pte, gva, current_level);
232 
233 	paddr = vm_untag_gpa(vm, paddr);
234 
235 	if (!is_present_pte(mmu, pte)) {
236 		*pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
237 		       PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
238 		       PTE_ALWAYS_SET_MASK(mmu);
239 		if (current_level == target_level)
240 			*pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
241 		else
242 			*pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
243 	} else {
244 		/*
245 		 * Entry already present.  Assert that the caller doesn't want
246 		 * a hugepage at this level, and that there isn't a hugepage at
247 		 * this level.
248 		 */
249 		TEST_ASSERT(current_level != target_level,
250 			    "Cannot create hugepage at level: %u, gva: 0x%lx",
251 			    current_level, gva);
252 		TEST_ASSERT(!is_huge_pte(mmu, pte),
253 			    "Cannot create page table at level: %u, gva: 0x%lx",
254 			    current_level, gva);
255 	}
256 	return pte;
257 }
258 
259 void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva,
260 		   u64 paddr, int level)
261 {
262 	const u64 pg_size = PG_LEVEL_SIZE(level);
263 	u64 *pte = &mmu->pgd;
264 	int current_level;
265 
266 	TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
267 		    "Unknown or unsupported guest mode: 0x%x", vm->mode);
268 
269 	TEST_ASSERT((gva % pg_size) == 0,
270 		    "Virtual address not aligned,\n"
271 		    "gva: 0x%lx page size: 0x%lx", gva, pg_size);
272 	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
273 		    "Invalid virtual address, gva: 0x%lx", gva);
274 	TEST_ASSERT((paddr % pg_size) == 0,
275 		    "Physical address not aligned,\n"
276 		    "  paddr: 0x%lx page size: 0x%lx", paddr, pg_size);
277 	TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
278 		    "Physical address beyond maximum supported,\n"
279 		    "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
280 		    paddr, vm->max_gfn, vm->page_size);
281 	TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr,
282 		    "Unexpected bits in paddr: %lx", paddr);
283 
284 	TEST_ASSERT(!PTE_EXECUTABLE_MASK(mmu) || !PTE_NX_MASK(mmu),
285 		    "X and NX bit masks cannot be used simultaneously");
286 
287 	/*
288 	 * Allocate upper level page tables, if not already present.  Return
289 	 * early if a hugepage was created.
290 	 */
291 	for (current_level = mmu->pgtable_levels;
292 	     current_level > PG_LEVEL_4K;
293 	     current_level--) {
294 		pte = virt_create_upper_pte(vm, mmu, pte, gva, paddr,
295 					    current_level, level);
296 		if (is_huge_pte(mmu, pte))
297 			return;
298 	}
299 
300 	/* Fill in page table entry. */
301 	pte = virt_get_pte(vm, mmu, pte, gva, PG_LEVEL_4K);
302 	TEST_ASSERT(!is_present_pte(mmu, pte),
303 		    "PTE already present for 4k page at gva: 0x%lx", gva);
304 	*pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
305 	       PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
306 	       PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
307 
308 	/*
309 	 * Neither SEV nor TDX supports shared page tables, so only the final
310 	 * leaf PTE needs manually set the C/S-bit.
311 	 */
312 	if (vm_is_gpa_protected(vm, paddr))
313 		*pte |= PTE_C_BIT_MASK(mmu);
314 	else
315 		*pte |= PTE_S_BIT_MASK(mmu);
316 }
317 
318 void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr)
319 {
320 	__virt_pg_map(vm, &vm->mmu, gva, paddr, PG_LEVEL_4K);
321 }
322 
323 void virt_map_level(struct kvm_vm *vm, gva_t gva, u64 paddr,
324 		    u64 nr_bytes, int level)
325 {
326 	u64 pg_size = PG_LEVEL_SIZE(level);
327 	u64 nr_pages = nr_bytes / pg_size;
328 	int i;
329 
330 	TEST_ASSERT(nr_bytes % pg_size == 0,
331 		    "Region size not aligned: nr_bytes: 0x%lx, page size: 0x%lx",
332 		    nr_bytes, pg_size);
333 
334 	for (i = 0; i < nr_pages; i++) {
335 		__virt_pg_map(vm, &vm->mmu, gva, paddr, level);
336 		sparsebit_set_num(vm->vpages_mapped, gva >> vm->page_shift,
337 				  nr_bytes / PAGE_SIZE);
338 
339 		gva += pg_size;
340 		paddr += pg_size;
341 	}
342 }
343 
344 static bool vm_is_target_pte(struct kvm_mmu *mmu, u64 *pte,
345 			     int *level, int current_level)
346 {
347 	if (is_huge_pte(mmu, pte)) {
348 		TEST_ASSERT(*level == PG_LEVEL_NONE ||
349 			    *level == current_level,
350 			    "Unexpected hugepage at level %d", current_level);
351 		*level = current_level;
352 	}
353 
354 	return *level == current_level;
355 }
356 
357 static u64 *__vm_get_page_table_entry(struct kvm_vm *vm,
358 				      struct kvm_mmu *mmu,
359 				      gva_t gva,
360 				      int *level)
361 {
362 	int va_width = 12 + (mmu->pgtable_levels) * 9;
363 	u64 *pte = &mmu->pgd;
364 	int current_level;
365 
366 	TEST_ASSERT(!vm->arch.is_pt_protected,
367 		    "Walking page tables of protected guests is impossible");
368 
369 	TEST_ASSERT(*level >= PG_LEVEL_NONE && *level <= mmu->pgtable_levels,
370 		    "Invalid PG_LEVEL_* '%d'", *level);
371 
372 	TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
373 		    "Unknown or unsupported guest mode: 0x%x", vm->mode);
374 	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
375 		    "Invalid virtual address, gva: 0x%lx", gva);
376 	/*
377 	 * Check that the gva is a sign-extended va_width value.
378 	 */
379 	TEST_ASSERT(gva == (((s64)gva << (64 - va_width) >> (64 - va_width))),
380 		    "Canonical check failed.  The virtual address is invalid.");
381 
382 	for (current_level = mmu->pgtable_levels;
383 	     current_level > PG_LEVEL_4K;
384 	     current_level--) {
385 		pte = virt_get_pte(vm, mmu, pte, gva, current_level);
386 		if (vm_is_target_pte(mmu, pte, level, current_level))
387 			return pte;
388 	}
389 
390 	return virt_get_pte(vm, mmu, pte, gva, PG_LEVEL_4K);
391 }
392 
393 u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa)
394 {
395 	int level = PG_LEVEL_4K;
396 
397 	return __vm_get_page_table_entry(vm, &vm->stage2_mmu, l2_gpa, &level);
398 }
399 
400 u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva)
401 {
402 	int level = PG_LEVEL_4K;
403 
404 	return __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
405 }
406 
407 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
408 {
409 	struct kvm_mmu *mmu = &vm->mmu;
410 	u64 *pml4e, *pml4e_start;
411 	u64 *pdpe, *pdpe_start;
412 	u64 *pde, *pde_start;
413 	u64 *pte, *pte_start;
414 
415 	if (!mmu->pgd_created)
416 		return;
417 
418 	fprintf(stream, "%*s                                          "
419 		"                no\n", indent, "");
420 	fprintf(stream, "%*s      index hvaddr         gpaddr         "
421 		"addr         w exec dirty\n",
422 		indent, "");
423 	pml4e_start = (u64 *)addr_gpa2hva(vm, mmu->pgd);
424 	for (u16 n1 = 0; n1 <= 0x1ffu; n1++) {
425 		pml4e = &pml4e_start[n1];
426 		if (!is_present_pte(mmu, pml4e))
427 			continue;
428 		fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u "
429 			" %u\n",
430 			indent, "",
431 			pml4e - pml4e_start, pml4e,
432 			addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e),
433 			is_writable_pte(mmu, pml4e), is_nx_pte(mmu, pml4e));
434 
435 		pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK);
436 		for (u16 n2 = 0; n2 <= 0x1ffu; n2++) {
437 			pdpe = &pdpe_start[n2];
438 			if (!is_present_pte(mmu, pdpe))
439 				continue;
440 			fprintf(stream, "%*spdpe  0x%-3zx %p 0x%-12lx 0x%-10llx "
441 				"%u  %u\n",
442 				indent, "",
443 				pdpe - pdpe_start, pdpe,
444 				addr_hva2gpa(vm, pdpe),
445 				PTE_GET_PFN(*pdpe), is_writable_pte(mmu, pdpe),
446 				is_nx_pte(mmu, pdpe));
447 
448 			pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK);
449 			for (u16 n3 = 0; n3 <= 0x1ffu; n3++) {
450 				pde = &pde_start[n3];
451 				if (!is_present_pte(mmu, pde))
452 					continue;
453 				fprintf(stream, "%*spde   0x%-3zx %p "
454 					"0x%-12lx 0x%-10llx %u  %u\n",
455 					indent, "", pde - pde_start, pde,
456 					addr_hva2gpa(vm, pde),
457 					PTE_GET_PFN(*pde), is_writable_pte(mmu, pde),
458 					is_nx_pte(mmu, pde));
459 
460 				pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK);
461 				for (u16 n4 = 0; n4 <= 0x1ffu; n4++) {
462 					pte = &pte_start[n4];
463 					if (!is_present_pte(mmu, pte))
464 						continue;
465 					fprintf(stream, "%*spte   0x%-3zx %p "
466 						"0x%-12lx 0x%-10llx %u  %u "
467 						"    %u    0x%-10lx\n",
468 						indent, "",
469 						pte - pte_start, pte,
470 						addr_hva2gpa(vm, pte),
471 						PTE_GET_PFN(*pte),
472 						is_writable_pte(mmu, pte),
473 						is_nx_pte(mmu, pte),
474 						is_dirty_pte(mmu, pte),
475 						((u64)n1 << 27)
476 							| ((u64)n2 << 18)
477 							| ((u64)n3 << 9)
478 							| ((u64)n4));
479 				}
480 			}
481 		}
482 	}
483 }
484 
485 void vm_enable_tdp(struct kvm_vm *vm)
486 {
487 	if (kvm_cpu_has(X86_FEATURE_VMX))
488 		vm_enable_ept(vm);
489 	else
490 		vm_enable_npt(vm);
491 }
492 
493 bool kvm_cpu_has_tdp(void)
494 {
495 	return kvm_cpu_has_ept() || kvm_cpu_has_npt();
496 }
497 
498 void __tdp_map(struct kvm_vm *vm, u64 nested_paddr, u64 paddr,
499 	       u64 size, int level)
500 {
501 	size_t page_size = PG_LEVEL_SIZE(level);
502 	size_t npages = size / page_size;
503 
504 	TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
505 	TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
506 
507 	while (npages--) {
508 		__virt_pg_map(vm, &vm->stage2_mmu, nested_paddr, paddr, level);
509 		nested_paddr += page_size;
510 		paddr += page_size;
511 	}
512 }
513 
514 void tdp_map(struct kvm_vm *vm, u64 nested_paddr, u64 paddr,
515 	     u64 size)
516 {
517 	__tdp_map(vm, nested_paddr, paddr, size, PG_LEVEL_4K);
518 }
519 
520 /* Prepare an identity extended page table that maps all the
521  * physical pages in VM.
522  */
523 void tdp_identity_map_default_memslots(struct kvm_vm *vm)
524 {
525 	u32 s, memslot = 0;
526 	sparsebit_idx_t i, last;
527 	struct userspace_mem_region *region = memslot2region(vm, memslot);
528 
529 	/* Only memslot 0 is mapped here, ensure it's the only one being used */
530 	for (s = 0; s < NR_MEM_REGIONS; s++)
531 		TEST_ASSERT_EQ(vm->memslots[s], 0);
532 
533 	i = (region->region.guest_phys_addr >> vm->page_shift) - 1;
534 	last = i + (region->region.memory_size >> vm->page_shift);
535 	for (;;) {
536 		i = sparsebit_next_clear(region->unused_phy_pages, i);
537 		if (i > last)
538 			break;
539 
540 		tdp_map(vm, (u64)i << vm->page_shift,
541 			(u64)i << vm->page_shift, 1 << vm->page_shift);
542 	}
543 }
544 
545 /* Identity map a region with 1GiB Pages. */
546 void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size)
547 {
548 	__tdp_map(vm, addr, addr, size, PG_LEVEL_1G);
549 }
550 
551 /*
552  * Set Unusable Segment
553  *
554  * Input Args: None
555  *
556  * Output Args:
557  *   segp - Pointer to segment register
558  *
559  * Return: None
560  *
561  * Sets the segment register pointed to by @segp to an unusable state.
562  */
563 static void kvm_seg_set_unusable(struct kvm_segment *segp)
564 {
565 	memset(segp, 0, sizeof(*segp));
566 	segp->unusable = true;
567 }
568 
569 static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
570 {
571 	void *gdt = addr_gva2hva(vm, vm->arch.gdt);
572 	struct desc64 *desc = gdt + (segp->selector >> 3) * 8;
573 
574 	desc->limit0 = segp->limit & 0xFFFF;
575 	desc->base0 = segp->base & 0xFFFF;
576 	desc->base1 = segp->base >> 16;
577 	desc->type = segp->type;
578 	desc->s = segp->s;
579 	desc->dpl = segp->dpl;
580 	desc->p = segp->present;
581 	desc->limit1 = segp->limit >> 16;
582 	desc->avl = segp->avl;
583 	desc->l = segp->l;
584 	desc->db = segp->db;
585 	desc->g = segp->g;
586 	desc->base2 = segp->base >> 24;
587 	if (!segp->s)
588 		desc->base3 = segp->base >> 32;
589 }
590 
591 static void kvm_seg_set_kernel_code_64bit(struct kvm_segment *segp)
592 {
593 	memset(segp, 0, sizeof(*segp));
594 	segp->selector = KERNEL_CS;
595 	segp->limit = 0xFFFFFFFFu;
596 	segp->s = 0x1; /* kTypeCodeData */
597 	segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed
598 					  * | kFlagCodeReadable
599 					  */
600 	segp->g = true;
601 	segp->l = true;
602 	segp->present = 1;
603 }
604 
605 static void kvm_seg_set_kernel_data_64bit(struct kvm_segment *segp)
606 {
607 	memset(segp, 0, sizeof(*segp));
608 	segp->selector = KERNEL_DS;
609 	segp->limit = 0xFFFFFFFFu;
610 	segp->s = 0x1; /* kTypeCodeData */
611 	segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed
612 					  * | kFlagDataWritable
613 					  */
614 	segp->g = true;
615 	segp->present = true;
616 }
617 
618 gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
619 {
620 	int level = PG_LEVEL_NONE;
621 	u64 *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
622 
623 	TEST_ASSERT(is_present_pte(&vm->mmu, pte),
624 		    "Leaf PTE not PRESENT for gva: 0x%08lx", gva);
625 
626 	/*
627 	 * No need for a hugepage mask on the PTE, x86-64 requires the "unused"
628 	 * address bits to be zero.
629 	 */
630 	return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level));
631 }
632 
633 static void kvm_seg_set_tss_64bit(gva_t base, struct kvm_segment *segp)
634 {
635 	memset(segp, 0, sizeof(*segp));
636 	segp->base = base;
637 	segp->limit = 0x67;
638 	segp->selector = KERNEL_TSS;
639 	segp->type = 0xb;
640 	segp->present = 1;
641 }
642 
643 static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
644 {
645 	struct kvm_sregs sregs;
646 
647 	TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
648 		    "Unknown or unsupported guest mode: 0x%x", vm->mode);
649 
650 	/* Set mode specific system register values. */
651 	vcpu_sregs_get(vcpu, &sregs);
652 
653 	sregs.idt.base = vm->arch.idt;
654 	sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1;
655 	sregs.gdt.base = vm->arch.gdt;
656 	sregs.gdt.limit = getpagesize() - 1;
657 
658 	sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
659 	sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
660 	if (kvm_cpu_has(X86_FEATURE_XSAVE))
661 		sregs.cr4 |= X86_CR4_OSXSAVE;
662 	if (vm->mmu.pgtable_levels == 5)
663 		sregs.cr4 |= X86_CR4_LA57;
664 	sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
665 
666 	kvm_seg_set_unusable(&sregs.ldt);
667 	kvm_seg_set_kernel_code_64bit(&sregs.cs);
668 	kvm_seg_set_kernel_data_64bit(&sregs.ds);
669 	kvm_seg_set_kernel_data_64bit(&sregs.es);
670 	kvm_seg_set_kernel_data_64bit(&sregs.gs);
671 	kvm_seg_set_tss_64bit(vm->arch.tss, &sregs.tr);
672 
673 	sregs.cr3 = vm->mmu.pgd;
674 	vcpu_sregs_set(vcpu, &sregs);
675 }
676 
677 static void vcpu_init_xcrs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
678 {
679 	struct kvm_xcrs xcrs = {
680 		.nr_xcrs = 1,
681 		.xcrs[0].xcr = 0,
682 		.xcrs[0].value = kvm_cpu_supported_xcr0(),
683 	};
684 
685 	if (!kvm_cpu_has(X86_FEATURE_XSAVE))
686 		return;
687 
688 	vcpu_xcrs_set(vcpu, &xcrs);
689 }
690 
691 static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
692 			  int dpl, unsigned short selector)
693 {
694 	struct idt_entry *base =
695 		(struct idt_entry *)addr_gva2hva(vm, vm->arch.idt);
696 	struct idt_entry *e = &base[vector];
697 
698 	memset(e, 0, sizeof(*e));
699 	e->offset0 = addr;
700 	e->selector = selector;
701 	e->ist = 0;
702 	e->type = 14;
703 	e->dpl = dpl;
704 	e->p = 1;
705 	e->offset1 = addr >> 16;
706 	e->offset2 = addr >> 32;
707 }
708 
709 static bool kvm_fixup_exception(struct ex_regs *regs)
710 {
711 	if (regs->r9 != KVM_EXCEPTION_MAGIC || regs->rip != regs->r10)
712 		return false;
713 
714 	if (regs->vector == DE_VECTOR)
715 		regs->vector = KVM_MAGIC_DE_VECTOR;
716 
717 	regs->rip = regs->r11;
718 	regs->r9 = regs->vector;
719 	regs->r10 = regs->error_code;
720 	return true;
721 }
722 
723 void route_exception(struct ex_regs *regs)
724 {
725 	typedef void(*handler)(struct ex_regs *);
726 	handler *handlers = (handler *)exception_handlers;
727 
728 	if (handlers && handlers[regs->vector]) {
729 		handlers[regs->vector](regs);
730 		return;
731 	}
732 
733 	if (kvm_fixup_exception(regs))
734 		return;
735 
736 	GUEST_FAIL("Unhandled exception '0x%lx' at guest RIP '0x%lx'",
737 		   regs->vector, regs->rip);
738 }
739 
740 static void vm_init_descriptor_tables(struct kvm_vm *vm)
741 {
742 	extern void *idt_handlers;
743 	struct kvm_segment seg;
744 	int i;
745 
746 	vm->arch.gdt = __vm_alloc_page(vm, MEM_REGION_DATA);
747 	vm->arch.idt = __vm_alloc_page(vm, MEM_REGION_DATA);
748 	vm->handlers = __vm_alloc_page(vm, MEM_REGION_DATA);
749 	vm->arch.tss = __vm_alloc_page(vm, MEM_REGION_DATA);
750 
751 	/* Handlers have the same address in both address spaces.*/
752 	for (i = 0; i < NUM_INTERRUPTS; i++)
753 		set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, KERNEL_CS);
754 
755 	*(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers;
756 
757 	kvm_seg_set_kernel_code_64bit(&seg);
758 	kvm_seg_fill_gdt_64bit(vm, &seg);
759 
760 	kvm_seg_set_kernel_data_64bit(&seg);
761 	kvm_seg_fill_gdt_64bit(vm, &seg);
762 
763 	kvm_seg_set_tss_64bit(vm->arch.tss, &seg);
764 	kvm_seg_fill_gdt_64bit(vm, &seg);
765 }
766 
767 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
768 			       void (*handler)(struct ex_regs *))
769 {
770 	gva_t *handlers = (gva_t *)addr_gva2hva(vm, vm->handlers);
771 
772 	handlers[vector] = (gva_t)handler;
773 }
774 
775 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
776 {
777 	struct ucall uc;
778 
779 	if (get_ucall(vcpu, &uc) == UCALL_ABORT)
780 		REPORT_GUEST_ASSERT(uc);
781 }
782 
783 void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus)
784 {
785 	int r;
786 
787 	TEST_ASSERT(kvm_has_cap(KVM_CAP_GET_TSC_KHZ),
788 		    "Require KVM_GET_TSC_KHZ to provide udelay() to guest.");
789 
790 	vm_create_irqchip(vm);
791 	vm_init_descriptor_tables(vm);
792 
793 	sync_global_to_guest(vm, host_cpu_is_intel);
794 	sync_global_to_guest(vm, host_cpu_is_amd);
795 	sync_global_to_guest(vm, host_cpu_is_hygon);
796 	sync_global_to_guest(vm, host_cpu_is_amd_compatible);
797 	sync_global_to_guest(vm, is_forced_emulation_enabled);
798 	sync_global_to_guest(vm, pmu_errata_mask);
799 
800 	if (is_sev_vm(vm)) {
801 		struct kvm_sev_init init = { 0 };
802 
803 		vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);
804 	}
805 
806 	r = __vm_ioctl(vm, KVM_GET_TSC_KHZ, NULL);
807 	TEST_ASSERT(r > 0, "KVM_GET_TSC_KHZ did not provide a valid TSC frequency.");
808 	guest_tsc_khz = r;
809 	sync_global_to_guest(vm, guest_tsc_khz);
810 }
811 
812 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
813 {
814 	struct kvm_regs regs;
815 
816 	vcpu_regs_get(vcpu, &regs);
817 	regs.rip = (unsigned long) guest_code;
818 	vcpu_regs_set(vcpu, &regs);
819 }
820 
821 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
822 {
823 	struct kvm_mp_state mp_state;
824 	struct kvm_regs regs;
825 	gva_t stack_gva;
826 	struct kvm_vcpu *vcpu;
827 
828 	stack_gva = __vm_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
829 			       DEFAULT_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
830 
831 	stack_gva += DEFAULT_STACK_PGS * getpagesize();
832 
833 	/*
834 	 * Align stack to match calling sequence requirements in section "The
835 	 * Stack Frame" of the System V ABI AMD64 Architecture Processor
836 	 * Supplement, which requires the value (%rsp + 8) to be a multiple of
837 	 * 16 when control is transferred to the function entry point.
838 	 *
839 	 * If this code is ever used to launch a vCPU with 32-bit entry point it
840 	 * may need to subtract 4 bytes instead of 8 bytes.
841 	 */
842 	TEST_ASSERT(IS_ALIGNED(stack_gva, PAGE_SIZE),
843 		    "__vm_alloc() did not provide a page-aligned address");
844 	stack_gva -= 8;
845 
846 	vcpu = __vm_vcpu_add(vm, vcpu_id);
847 	vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
848 	vcpu_init_sregs(vm, vcpu);
849 	vcpu_init_xcrs(vm, vcpu);
850 
851 	/* Setup guest general purpose registers */
852 	vcpu_regs_get(vcpu, &regs);
853 	regs.rflags = regs.rflags | 0x2;
854 	regs.rsp = stack_gva;
855 	vcpu_regs_set(vcpu, &regs);
856 
857 	/* Setup the MP state */
858 	mp_state.mp_state = 0;
859 	vcpu_mp_state_set(vcpu, &mp_state);
860 
861 	/*
862 	 * Refresh CPUID after setting SREGS and XCR0, so that KVM's "runtime"
863 	 * updates to guest CPUID, e.g. for OSXSAVE and XSAVE state size, are
864 	 * reflected into selftests' vCPU CPUID cache, i.e. so that the cache
865 	 * is consistent with vCPU state.
866 	 */
867 	vcpu_get_cpuid(vcpu);
868 	return vcpu;
869 }
870 
871 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id)
872 {
873 	struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
874 
875 	vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
876 
877 	return vcpu;
878 }
879 
880 void vcpu_arch_free(struct kvm_vcpu *vcpu)
881 {
882 	if (vcpu->cpuid)
883 		free(vcpu->cpuid);
884 }
885 
886 /* Do not use kvm_supported_cpuid directly except for validity checks. */
887 static void *kvm_supported_cpuid;
888 
889 const struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
890 {
891 	int kvm_fd;
892 
893 	if (kvm_supported_cpuid)
894 		return kvm_supported_cpuid;
895 
896 	kvm_supported_cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
897 	kvm_fd = open_kvm_dev_path_or_exit();
898 
899 	kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID,
900 		  (struct kvm_cpuid2 *)kvm_supported_cpuid);
901 
902 	close(kvm_fd);
903 	return kvm_supported_cpuid;
904 }
905 
906 static u32 __kvm_cpu_has(const struct kvm_cpuid2 *cpuid,
907 			 u32 function, u32 index,
908 			 u8 reg, u8 lo, u8 hi)
909 {
910 	const struct kvm_cpuid_entry2 *entry;
911 	int i;
912 
913 	for (i = 0; i < cpuid->nent; i++) {
914 		entry = &cpuid->entries[i];
915 
916 		/*
917 		 * The output registers in kvm_cpuid_entry2 are in alphabetical
918 		 * order, but kvm_x86_cpu_feature matches that mess, so yay
919 		 * pointer shenanigans!
920 		 */
921 		if (entry->function == function && entry->index == index)
922 			return ((&entry->eax)[reg] & GENMASK(hi, lo)) >> lo;
923 	}
924 
925 	return 0;
926 }
927 
928 bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
929 		   struct kvm_x86_cpu_feature feature)
930 {
931 	return __kvm_cpu_has(cpuid, feature.function, feature.index,
932 			     feature.reg, feature.bit, feature.bit);
933 }
934 
935 u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
936 		       struct kvm_x86_cpu_property property)
937 {
938 	return __kvm_cpu_has(cpuid, property.function, property.index,
939 			     property.reg, property.lo_bit, property.hi_bit);
940 }
941 
942 u64 kvm_get_feature_msr(u64 msr_index)
943 {
944 	struct {
945 		struct kvm_msrs header;
946 		struct kvm_msr_entry entry;
947 	} buffer = {};
948 	int r, kvm_fd;
949 
950 	buffer.header.nmsrs = 1;
951 	buffer.entry.index = msr_index;
952 	kvm_fd = open_kvm_dev_path_or_exit();
953 
954 	r = __kvm_ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header);
955 	TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_GET_MSRS, r));
956 
957 	close(kvm_fd);
958 	return buffer.entry.data;
959 }
960 
961 void __vm_xsave_require_permission(u64 xfeature, const char *name)
962 {
963 	int kvm_fd;
964 	u64 bitmask;
965 	long rc;
966 	struct kvm_device_attr attr = {
967 		.group = 0,
968 		.attr = KVM_X86_XCOMP_GUEST_SUPP,
969 		.addr = (unsigned long) &bitmask,
970 	};
971 
972 	TEST_ASSERT(!kvm_supported_cpuid,
973 		    "kvm_get_supported_cpuid() cannot be used before ARCH_REQ_XCOMP_GUEST_PERM");
974 
975 	TEST_ASSERT(is_power_of_2(xfeature),
976 		    "Dynamic XFeatures must be enabled one at a time");
977 
978 	kvm_fd = open_kvm_dev_path_or_exit();
979 	rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr);
980 	close(kvm_fd);
981 
982 	if (rc == -1 && (errno == ENXIO || errno == EINVAL))
983 		__TEST_REQUIRE(0, "KVM_X86_XCOMP_GUEST_SUPP not supported");
984 
985 	TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc);
986 
987 	__TEST_REQUIRE(bitmask & xfeature,
988 		       "Required XSAVE feature '%s' not supported", name);
989 
990 	TEST_REQUIRE(!syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, ilog2(xfeature)));
991 
992 	rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask);
993 	TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
994 	TEST_ASSERT(bitmask & xfeature,
995 		    "'%s' (0x%lx) not permitted after prctl(ARCH_REQ_XCOMP_GUEST_PERM) permitted=0x%lx",
996 		    name, xfeature, bitmask);
997 }
998 
999 void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid)
1000 {
1001 	TEST_ASSERT(cpuid != vcpu->cpuid, "@cpuid can't be the vCPU's CPUID");
1002 
1003 	/* Allow overriding the default CPUID. */
1004 	if (vcpu->cpuid && vcpu->cpuid->nent < cpuid->nent) {
1005 		free(vcpu->cpuid);
1006 		vcpu->cpuid = NULL;
1007 	}
1008 
1009 	if (!vcpu->cpuid)
1010 		vcpu->cpuid = allocate_kvm_cpuid2(cpuid->nent);
1011 
1012 	memcpy(vcpu->cpuid, cpuid, kvm_cpuid2_size(cpuid->nent));
1013 	vcpu_set_cpuid(vcpu);
1014 }
1015 
1016 void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
1017 			     struct kvm_x86_cpu_property property,
1018 			     u32 value)
1019 {
1020 	struct kvm_cpuid_entry2 *entry;
1021 
1022 	entry = __vcpu_get_cpuid_entry(vcpu, property.function, property.index);
1023 
1024 	(&entry->eax)[property.reg] &= ~GENMASK(property.hi_bit, property.lo_bit);
1025 	(&entry->eax)[property.reg] |= value << property.lo_bit;
1026 
1027 	vcpu_set_cpuid(vcpu);
1028 
1029 	/* Sanity check that @value doesn't exceed the bounds in any way. */
1030 	TEST_ASSERT_EQ(kvm_cpuid_property(vcpu->cpuid, property), value);
1031 }
1032 
1033 void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function)
1034 {
1035 	struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function);
1036 
1037 	entry->eax = 0;
1038 	entry->ebx = 0;
1039 	entry->ecx = 0;
1040 	entry->edx = 0;
1041 	vcpu_set_cpuid(vcpu);
1042 }
1043 
1044 void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
1045 				     struct kvm_x86_cpu_feature feature,
1046 				     bool set)
1047 {
1048 	struct kvm_cpuid_entry2 *entry;
1049 	u32 *reg;
1050 
1051 	entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index);
1052 	reg = (&entry->eax) + feature.reg;
1053 
1054 	if (set)
1055 		*reg |= BIT(feature.bit);
1056 	else
1057 		*reg &= ~BIT(feature.bit);
1058 
1059 	vcpu_set_cpuid(vcpu);
1060 }
1061 
1062 u64 vcpu_get_msr(struct kvm_vcpu *vcpu, u64 msr_index)
1063 {
1064 	struct {
1065 		struct kvm_msrs header;
1066 		struct kvm_msr_entry entry;
1067 	} buffer = {};
1068 
1069 	buffer.header.nmsrs = 1;
1070 	buffer.entry.index = msr_index;
1071 
1072 	vcpu_msrs_get(vcpu, &buffer.header);
1073 
1074 	return buffer.entry.data;
1075 }
1076 
1077 int _vcpu_set_msr(struct kvm_vcpu *vcpu, u64 msr_index, u64 msr_value)
1078 {
1079 	struct {
1080 		struct kvm_msrs header;
1081 		struct kvm_msr_entry entry;
1082 	} buffer = {};
1083 
1084 	memset(&buffer, 0, sizeof(buffer));
1085 	buffer.header.nmsrs = 1;
1086 	buffer.entry.index = msr_index;
1087 	buffer.entry.data = msr_value;
1088 
1089 	return __vcpu_ioctl(vcpu, KVM_SET_MSRS, &buffer.header);
1090 }
1091 
1092 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
1093 {
1094 	va_list ap;
1095 	struct kvm_regs regs;
1096 
1097 	TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
1098 		    "  num: %u",
1099 		    num);
1100 
1101 	va_start(ap, num);
1102 	vcpu_regs_get(vcpu, &regs);
1103 
1104 	if (num >= 1)
1105 		regs.rdi = va_arg(ap, u64);
1106 
1107 	if (num >= 2)
1108 		regs.rsi = va_arg(ap, u64);
1109 
1110 	if (num >= 3)
1111 		regs.rdx = va_arg(ap, u64);
1112 
1113 	if (num >= 4)
1114 		regs.rcx = va_arg(ap, u64);
1115 
1116 	if (num >= 5)
1117 		regs.r8 = va_arg(ap, u64);
1118 
1119 	if (num >= 6)
1120 		regs.r9 = va_arg(ap, u64);
1121 
1122 	vcpu_regs_set(vcpu, &regs);
1123 	va_end(ap);
1124 }
1125 
1126 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
1127 {
1128 	struct kvm_regs regs;
1129 	struct kvm_sregs sregs;
1130 
1131 	fprintf(stream, "%*svCPU ID: %u\n", indent, "", vcpu->id);
1132 
1133 	fprintf(stream, "%*sregs:\n", indent + 2, "");
1134 	vcpu_regs_get(vcpu, &regs);
1135 	regs_dump(stream, &regs, indent + 4);
1136 
1137 	fprintf(stream, "%*ssregs:\n", indent + 2, "");
1138 	vcpu_sregs_get(vcpu, &sregs);
1139 	sregs_dump(stream, &sregs, indent + 4);
1140 }
1141 
1142 static struct kvm_msr_list *__kvm_get_msr_index_list(bool feature_msrs)
1143 {
1144 	struct kvm_msr_list *list;
1145 	struct kvm_msr_list nmsrs;
1146 	int kvm_fd, r;
1147 
1148 	kvm_fd = open_kvm_dev_path_or_exit();
1149 
1150 	nmsrs.nmsrs = 0;
1151 	if (!feature_msrs)
1152 		r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
1153 	else
1154 		r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, &nmsrs);
1155 
1156 	TEST_ASSERT(r == -1 && errno == E2BIG,
1157 		    "Expected -E2BIG, got rc: %i errno: %i (%s)",
1158 		    r, errno, strerror(errno));
1159 
1160 	list = malloc(sizeof(*list) + nmsrs.nmsrs * sizeof(list->indices[0]));
1161 	TEST_ASSERT(list, "-ENOMEM when allocating MSR index list");
1162 	list->nmsrs = nmsrs.nmsrs;
1163 
1164 	if (!feature_msrs)
1165 		kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
1166 	else
1167 		kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list);
1168 	close(kvm_fd);
1169 
1170 	TEST_ASSERT(list->nmsrs == nmsrs.nmsrs,
1171 		    "Number of MSRs in list changed, was %d, now %d",
1172 		    nmsrs.nmsrs, list->nmsrs);
1173 	return list;
1174 }
1175 
1176 const struct kvm_msr_list *kvm_get_msr_index_list(void)
1177 {
1178 	static const struct kvm_msr_list *list;
1179 
1180 	if (!list)
1181 		list = __kvm_get_msr_index_list(false);
1182 	return list;
1183 }
1184 
1185 
1186 const struct kvm_msr_list *kvm_get_feature_msr_index_list(void)
1187 {
1188 	static const struct kvm_msr_list *list;
1189 
1190 	if (!list)
1191 		list = __kvm_get_msr_index_list(true);
1192 	return list;
1193 }
1194 
1195 bool kvm_msr_is_in_save_restore_list(u32 msr_index)
1196 {
1197 	const struct kvm_msr_list *list = kvm_get_msr_index_list();
1198 	int i;
1199 
1200 	for (i = 0; i < list->nmsrs; ++i) {
1201 		if (list->indices[i] == msr_index)
1202 			return true;
1203 	}
1204 
1205 	return false;
1206 }
1207 
1208 static void vcpu_save_xsave_state(struct kvm_vcpu *vcpu,
1209 				  struct kvm_x86_state *state)
1210 {
1211 	int size = vm_check_cap(vcpu->vm, KVM_CAP_XSAVE2);
1212 
1213 	if (size) {
1214 		state->xsave = malloc(size);
1215 		vcpu_xsave2_get(vcpu, state->xsave);
1216 	} else {
1217 		state->xsave = malloc(sizeof(struct kvm_xsave));
1218 		vcpu_xsave_get(vcpu, state->xsave);
1219 	}
1220 }
1221 
1222 struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu)
1223 {
1224 	const struct kvm_msr_list *msr_list = kvm_get_msr_index_list();
1225 	struct kvm_x86_state *state;
1226 	int i;
1227 
1228 	static int nested_size = -1;
1229 
1230 	if (nested_size == -1) {
1231 		nested_size = kvm_check_cap(KVM_CAP_NESTED_STATE);
1232 		TEST_ASSERT(nested_size <= sizeof(state->nested_),
1233 			    "Nested state size too big, %i > %zi",
1234 			    nested_size, sizeof(state->nested_));
1235 	}
1236 
1237 	/*
1238 	 * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
1239 	 * guest state is consistent only after userspace re-enters the
1240 	 * kernel with KVM_RUN.  Complete IO prior to migrating state
1241 	 * to a new VM.
1242 	 */
1243 	vcpu_run_complete_io(vcpu);
1244 
1245 	state = malloc(sizeof(*state) + msr_list->nmsrs * sizeof(state->msrs.entries[0]));
1246 	TEST_ASSERT(state, "-ENOMEM when allocating kvm state");
1247 
1248 	vcpu_events_get(vcpu, &state->events);
1249 	vcpu_mp_state_get(vcpu, &state->mp_state);
1250 	vcpu_regs_get(vcpu, &state->regs);
1251 	vcpu_save_xsave_state(vcpu, state);
1252 
1253 	if (kvm_has_cap(KVM_CAP_XCRS))
1254 		vcpu_xcrs_get(vcpu, &state->xcrs);
1255 
1256 	vcpu_sregs_get(vcpu, &state->sregs);
1257 
1258 	if (nested_size) {
1259 		state->nested.size = sizeof(state->nested_);
1260 
1261 		vcpu_nested_state_get(vcpu, &state->nested);
1262 		TEST_ASSERT(state->nested.size <= nested_size,
1263 			    "Nested state size too big, %i (KVM_CHECK_CAP gave %i)",
1264 			    state->nested.size, nested_size);
1265 	} else {
1266 		state->nested.size = 0;
1267 	}
1268 
1269 	state->msrs.nmsrs = msr_list->nmsrs;
1270 	for (i = 0; i < msr_list->nmsrs; i++)
1271 		state->msrs.entries[i].index = msr_list->indices[i];
1272 	vcpu_msrs_get(vcpu, &state->msrs);
1273 
1274 	vcpu_debugregs_get(vcpu, &state->debugregs);
1275 
1276 	return state;
1277 }
1278 
1279 void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state)
1280 {
1281 	vcpu_sregs_set(vcpu, &state->sregs);
1282 	vcpu_msrs_set(vcpu, &state->msrs);
1283 
1284 	if (kvm_has_cap(KVM_CAP_XCRS))
1285 		vcpu_xcrs_set(vcpu, &state->xcrs);
1286 
1287 	vcpu_xsave_set(vcpu,  state->xsave);
1288 	vcpu_events_set(vcpu, &state->events);
1289 	vcpu_mp_state_set(vcpu, &state->mp_state);
1290 	vcpu_debugregs_set(vcpu, &state->debugregs);
1291 	vcpu_regs_set(vcpu, &state->regs);
1292 
1293 	if (state->nested.size)
1294 		vcpu_nested_state_set(vcpu, &state->nested);
1295 }
1296 
1297 void kvm_x86_state_cleanup(struct kvm_x86_state *state)
1298 {
1299 	free(state->xsave);
1300 	free(state);
1301 }
1302 
1303 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
1304 {
1305 	if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) {
1306 		*pa_bits = kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32;
1307 		*va_bits = 32;
1308 	} else {
1309 		*pa_bits = kvm_cpu_property(X86_PROPERTY_MAX_PHY_ADDR);
1310 		*va_bits = kvm_cpu_property(X86_PROPERTY_MAX_VIRT_ADDR);
1311 	}
1312 }
1313 
1314 void kvm_init_vm_address_properties(struct kvm_vm *vm)
1315 {
1316 	if (is_sev_vm(vm)) {
1317 		vm->arch.sev_fd = open_sev_dev_path_or_exit();
1318 		vm->arch.c_bit = BIT_ULL(this_cpu_property(X86_PROPERTY_SEV_C_BIT));
1319 		vm->gpa_tag_mask = vm->arch.c_bit;
1320 	} else {
1321 		vm->arch.sev_fd = -1;
1322 	}
1323 }
1324 
1325 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
1326 					       u32 function, u32 index)
1327 {
1328 	int i;
1329 
1330 	for (i = 0; i < cpuid->nent; i++) {
1331 		if (cpuid->entries[i].function == function &&
1332 		    cpuid->entries[i].index == index)
1333 			return &cpuid->entries[i];
1334 	}
1335 
1336 	TEST_FAIL("CPUID function 0x%x index 0x%x not found ", function, index);
1337 
1338 	return NULL;
1339 }
1340 
1341 #define X86_HYPERCALL(inputs...)					\
1342 ({									\
1343 	u64 r;							\
1344 									\
1345 	asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t"		\
1346 		     "jnz 1f\n\t"					\
1347 		     "vmcall\n\t"					\
1348 		     "jmp 2f\n\t"					\
1349 		     "1: vmmcall\n\t"					\
1350 		     "2:"						\
1351 		     : "=a"(r)						\
1352 		     : [use_vmmcall] "r" (host_cpu_is_amd_compatible),	\
1353 		       inputs);						\
1354 									\
1355 	r;								\
1356 })
1357 
1358 u64 kvm_hypercall(u64 nr, u64 a0, u64 a1, u64 a2, u64 a3)
1359 {
1360 	return X86_HYPERCALL("a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
1361 }
1362 
1363 u64 __xen_hypercall(u64 nr, u64 a0, void *a1)
1364 {
1365 	return X86_HYPERCALL("a"(nr), "D"(a0), "S"(a1));
1366 }
1367 
1368 void xen_hypercall(u64 nr, u64 a0, void *a1)
1369 {
1370 	GUEST_ASSERT(!__xen_hypercall(nr, a0, a1));
1371 }
1372 
1373 unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
1374 {
1375 	const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
1376 	unsigned long ht_gfn, max_gfn, max_pfn;
1377 	u8 maxphyaddr, guest_maxphyaddr;
1378 
1379 	/*
1380 	 * Use "guest MAXPHYADDR" from KVM if it's available.  Guest MAXPHYADDR
1381 	 * enumerates the max _mappable_ GPA, which can be less than the raw
1382 	 * MAXPHYADDR, e.g. if MAXPHYADDR=52, KVM is using TDP, and the CPU
1383 	 * doesn't support 5-level TDP.
1384 	 */
1385 	guest_maxphyaddr = kvm_cpu_property(X86_PROPERTY_GUEST_MAX_PHY_ADDR);
1386 	guest_maxphyaddr = guest_maxphyaddr ?: vm->pa_bits;
1387 	TEST_ASSERT(guest_maxphyaddr <= vm->pa_bits,
1388 		    "Guest MAXPHYADDR should never be greater than raw MAXPHYADDR");
1389 
1390 	max_gfn = (1ULL << (guest_maxphyaddr - vm->page_shift)) - 1;
1391 
1392 	/* Avoid reserved HyperTransport region on AMD or Hygon processors. */
1393 	if (!host_cpu_is_amd_compatible)
1394 		return max_gfn;
1395 
1396 	/* On parts with <40 physical address bits, the area is fully hidden */
1397 	if (vm->pa_bits < 40)
1398 		return max_gfn;
1399 
1400 	/* Before family 17h, the HyperTransport area is just below 1T.  */
1401 	ht_gfn = (1 << 28) - num_ht_pages;
1402 	if (this_cpu_family() < 0x17)
1403 		goto done;
1404 
1405 	/*
1406 	 * Otherwise it's at the top of the physical address space, possibly
1407 	 * reduced due to SME or CSV by bits 11:6 of CPUID[0x8000001f].EBX.  Use
1408 	 * the old conservative value if MAXPHYADDR is not enumerated.
1409 	 */
1410 	if (!this_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR))
1411 		goto done;
1412 
1413 	maxphyaddr = this_cpu_property(X86_PROPERTY_MAX_PHY_ADDR);
1414 	max_pfn = (1ULL << (maxphyaddr - vm->page_shift)) - 1;
1415 
1416 	if (this_cpu_has_p(X86_PROPERTY_PHYS_ADDR_REDUCTION))
1417 		max_pfn >>= this_cpu_property(X86_PROPERTY_PHYS_ADDR_REDUCTION);
1418 
1419 	ht_gfn = max_pfn - num_ht_pages;
1420 done:
1421 	return min(max_gfn, ht_gfn - 1);
1422 }
1423 
1424 void kvm_selftest_arch_init(void)
1425 {
1426 	host_cpu_is_intel = this_cpu_is_intel();
1427 	host_cpu_is_amd = this_cpu_is_amd();
1428 	host_cpu_is_hygon = this_cpu_is_hygon();
1429 	host_cpu_is_amd_compatible = host_cpu_is_amd || host_cpu_is_hygon;
1430 	is_forced_emulation_enabled = kvm_is_forced_emulation_enabled();
1431 
1432 	kvm_init_pmu_errata();
1433 }
1434 
1435 bool sys_clocksource_is_based_on_tsc(void)
1436 {
1437 	char *clk_name = sys_get_cur_clocksource();
1438 	bool ret = !strcmp(clk_name, "tsc\n") ||
1439 		   !strcmp(clk_name, "hyperv_clocksource_tsc_page\n");
1440 
1441 	free(clk_name);
1442 
1443 	return ret;
1444 }
1445 
1446 bool kvm_arch_has_default_irqchip(void)
1447 {
1448 	return true;
1449 }
1450 
1451 void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, u64 smram_gpa,
1452 		 const void *smi_handler, size_t handler_size)
1453 {
1454 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, smram_gpa,
1455 				    SMRAM_MEMSLOT, SMRAM_PAGES, 0);
1456 	TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, smram_gpa,
1457 				       SMRAM_MEMSLOT) == smram_gpa,
1458 		    "Could not allocate guest physical addresses for SMRAM");
1459 
1460 	memset(addr_gpa2hva(vm, smram_gpa), 0x0, SMRAM_SIZE);
1461 	memcpy(addr_gpa2hva(vm, smram_gpa) + 0x8000, smi_handler, handler_size);
1462 	vcpu_set_msr(vcpu, MSR_IA32_SMBASE, smram_gpa);
1463 }
1464 
1465 void inject_smi(struct kvm_vcpu *vcpu)
1466 {
1467 	struct kvm_vcpu_events events;
1468 
1469 	vcpu_events_get(vcpu, &events);
1470 	events.smi.pending = 1;
1471 	events.flags |= KVM_VCPUEVENT_VALID_SMM;
1472 	vcpu_events_set(vcpu, &events);
1473 }
1474