xref: /linux/tools/testing/selftests/kvm/lib/x86/processor.c (revision 11e8c7e9471cf8e6ae6ec7324a3174191cd965e3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2018, Google LLC.
4  */
5 
6 #include "linux/bitmap.h"
7 #include "test_util.h"
8 #include "kvm_util.h"
9 #include "pmu.h"
10 #include "processor.h"
11 #include "smm.h"
12 #include "svm_util.h"
13 #include "sev.h"
14 #include "vmx.h"
15 
16 #ifndef NUM_INTERRUPTS
17 #define NUM_INTERRUPTS 256
18 #endif
19 
20 #define KERNEL_CS	0x8
21 #define KERNEL_DS	0x10
22 #define KERNEL_TSS	0x18
23 
24 vm_vaddr_t exception_handlers;
25 bool host_cpu_is_amd;
26 bool host_cpu_is_intel;
27 bool is_forced_emulation_enabled;
28 uint64_t guest_tsc_khz;
29 
ex_str(int vector)30 const char *ex_str(int vector)
31 {
32 	switch (vector) {
33 #define VEC_STR(v) case v##_VECTOR: return "#" #v
34 	case DE_VECTOR: return "no exception";
35 	case KVM_MAGIC_DE_VECTOR: return "#DE";
36 	VEC_STR(DB);
37 	VEC_STR(NMI);
38 	VEC_STR(BP);
39 	VEC_STR(OF);
40 	VEC_STR(BR);
41 	VEC_STR(UD);
42 	VEC_STR(NM);
43 	VEC_STR(DF);
44 	VEC_STR(TS);
45 	VEC_STR(NP);
46 	VEC_STR(SS);
47 	VEC_STR(GP);
48 	VEC_STR(PF);
49 	VEC_STR(MF);
50 	VEC_STR(AC);
51 	VEC_STR(MC);
52 	VEC_STR(XM);
53 	VEC_STR(VE);
54 	VEC_STR(CP);
55 	VEC_STR(HV);
56 	VEC_STR(VC);
57 	VEC_STR(SX);
58 	default: return "#??";
59 #undef VEC_STR
60 	}
61 }
62 
regs_dump(FILE * stream,struct kvm_regs * regs,uint8_t indent)63 static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
64 {
65 	fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
66 		"rcx: 0x%.16llx rdx: 0x%.16llx\n",
67 		indent, "",
68 		regs->rax, regs->rbx, regs->rcx, regs->rdx);
69 	fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx "
70 		"rsp: 0x%.16llx rbp: 0x%.16llx\n",
71 		indent, "",
72 		regs->rsi, regs->rdi, regs->rsp, regs->rbp);
73 	fprintf(stream, "%*sr8:  0x%.16llx r9:  0x%.16llx "
74 		"r10: 0x%.16llx r11: 0x%.16llx\n",
75 		indent, "",
76 		regs->r8, regs->r9, regs->r10, regs->r11);
77 	fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx "
78 		"r14: 0x%.16llx r15: 0x%.16llx\n",
79 		indent, "",
80 		regs->r12, regs->r13, regs->r14, regs->r15);
81 	fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n",
82 		indent, "",
83 		regs->rip, regs->rflags);
84 }
85 
segment_dump(FILE * stream,struct kvm_segment * segment,uint8_t indent)86 static void segment_dump(FILE *stream, struct kvm_segment *segment,
87 			 uint8_t indent)
88 {
89 	fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
90 		"selector: 0x%.4x type: 0x%.2x\n",
91 		indent, "", segment->base, segment->limit,
92 		segment->selector, segment->type);
93 	fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x "
94 		"db: 0x%.2x s: 0x%.2x l: 0x%.2x\n",
95 		indent, "", segment->present, segment->dpl,
96 		segment->db, segment->s, segment->l);
97 	fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x "
98 		"unusable: 0x%.2x padding: 0x%.2x\n",
99 		indent, "", segment->g, segment->avl,
100 		segment->unusable, segment->padding);
101 }
102 
dtable_dump(FILE * stream,struct kvm_dtable * dtable,uint8_t indent)103 static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
104 			uint8_t indent)
105 {
106 	fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
107 		"padding: 0x%.4x 0x%.4x 0x%.4x\n",
108 		indent, "", dtable->base, dtable->limit,
109 		dtable->padding[0], dtable->padding[1], dtable->padding[2]);
110 }
111 
sregs_dump(FILE * stream,struct kvm_sregs * sregs,uint8_t indent)112 static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent)
113 {
114 	unsigned int i;
115 
116 	fprintf(stream, "%*scs:\n", indent, "");
117 	segment_dump(stream, &sregs->cs, indent + 2);
118 	fprintf(stream, "%*sds:\n", indent, "");
119 	segment_dump(stream, &sregs->ds, indent + 2);
120 	fprintf(stream, "%*ses:\n", indent, "");
121 	segment_dump(stream, &sregs->es, indent + 2);
122 	fprintf(stream, "%*sfs:\n", indent, "");
123 	segment_dump(stream, &sregs->fs, indent + 2);
124 	fprintf(stream, "%*sgs:\n", indent, "");
125 	segment_dump(stream, &sregs->gs, indent + 2);
126 	fprintf(stream, "%*sss:\n", indent, "");
127 	segment_dump(stream, &sregs->ss, indent + 2);
128 	fprintf(stream, "%*str:\n", indent, "");
129 	segment_dump(stream, &sregs->tr, indent + 2);
130 	fprintf(stream, "%*sldt:\n", indent, "");
131 	segment_dump(stream, &sregs->ldt, indent + 2);
132 
133 	fprintf(stream, "%*sgdt:\n", indent, "");
134 	dtable_dump(stream, &sregs->gdt, indent + 2);
135 	fprintf(stream, "%*sidt:\n", indent, "");
136 	dtable_dump(stream, &sregs->idt, indent + 2);
137 
138 	fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx "
139 		"cr3: 0x%.16llx cr4: 0x%.16llx\n",
140 		indent, "",
141 		sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4);
142 	fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx "
143 		"apic_base: 0x%.16llx\n",
144 		indent, "",
145 		sregs->cr8, sregs->efer, sregs->apic_base);
146 
147 	fprintf(stream, "%*sinterrupt_bitmap:\n", indent, "");
148 	for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) {
149 		fprintf(stream, "%*s%.16llx\n", indent + 2, "",
150 			sregs->interrupt_bitmap[i]);
151 	}
152 }
153 
kvm_is_tdp_enabled(void)154 bool kvm_is_tdp_enabled(void)
155 {
156 	if (host_cpu_is_intel)
157 		return get_kvm_intel_param_bool("ept");
158 	else
159 		return get_kvm_amd_param_bool("npt");
160 }
161 
virt_mmu_init(struct kvm_vm * vm,struct kvm_mmu * mmu,struct pte_masks * pte_masks)162 static void virt_mmu_init(struct kvm_vm *vm, struct kvm_mmu *mmu,
163 			  struct pte_masks *pte_masks)
164 {
165 	/* If needed, create the top-level page table. */
166 	if (!mmu->pgd_created) {
167 		mmu->pgd = vm_alloc_page_table(vm);
168 		mmu->pgd_created = true;
169 		mmu->arch.pte_masks = *pte_masks;
170 	}
171 
172 	TEST_ASSERT(mmu->pgtable_levels == 4 || mmu->pgtable_levels == 5,
173 		    "Selftests MMU only supports 4-level and 5-level paging, not %u-level paging",
174 		    mmu->pgtable_levels);
175 }
176 
virt_arch_pgd_alloc(struct kvm_vm * vm)177 void virt_arch_pgd_alloc(struct kvm_vm *vm)
178 {
179 	TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
180 		    "Unknown or unsupported guest mode: 0x%x", vm->mode);
181 
182 	struct pte_masks pte_masks = (struct pte_masks){
183 		.present	=	BIT_ULL(0),
184 		.writable	=	BIT_ULL(1),
185 		.user		=	BIT_ULL(2),
186 		.accessed	=	BIT_ULL(5),
187 		.dirty		=	BIT_ULL(6),
188 		.huge		=	BIT_ULL(7),
189 		.nx		=	BIT_ULL(63),
190 		.executable	=	0,
191 		.c		=	vm->arch.c_bit,
192 		.s		=	vm->arch.s_bit,
193 	};
194 
195 	virt_mmu_init(vm, &vm->mmu, &pte_masks);
196 }
197 
tdp_mmu_init(struct kvm_vm * vm,int pgtable_levels,struct pte_masks * pte_masks)198 void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels,
199 		  struct pte_masks *pte_masks)
200 {
201 	TEST_ASSERT(!vm->stage2_mmu.pgtable_levels, "TDP MMU already initialized");
202 
203 	vm->stage2_mmu.pgtable_levels = pgtable_levels;
204 	virt_mmu_init(vm, &vm->stage2_mmu, pte_masks);
205 }
206 
virt_get_pte(struct kvm_vm * vm,struct kvm_mmu * mmu,uint64_t * parent_pte,uint64_t vaddr,int level)207 static void *virt_get_pte(struct kvm_vm *vm, struct kvm_mmu *mmu,
208 			  uint64_t *parent_pte, uint64_t vaddr, int level)
209 {
210 	uint64_t pt_gpa = PTE_GET_PA(*parent_pte);
211 	uint64_t *page_table = addr_gpa2hva(vm, pt_gpa);
212 	int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
213 
214 	TEST_ASSERT((*parent_pte == mmu->pgd) || is_present_pte(mmu, parent_pte),
215 		    "Parent PTE (level %d) not PRESENT for gva: 0x%08lx",
216 		    level + 1, vaddr);
217 
218 	return &page_table[index];
219 }
220 
virt_create_upper_pte(struct kvm_vm * vm,struct kvm_mmu * mmu,uint64_t * parent_pte,uint64_t vaddr,uint64_t paddr,int current_level,int target_level)221 static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
222 				       struct kvm_mmu *mmu,
223 				       uint64_t *parent_pte,
224 				       uint64_t vaddr,
225 				       uint64_t paddr,
226 				       int current_level,
227 				       int target_level)
228 {
229 	uint64_t *pte = virt_get_pte(vm, mmu, parent_pte, vaddr, current_level);
230 
231 	paddr = vm_untag_gpa(vm, paddr);
232 
233 	if (!is_present_pte(mmu, pte)) {
234 		*pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
235 		       PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
236 		       PTE_ALWAYS_SET_MASK(mmu);
237 		if (current_level == target_level)
238 			*pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
239 		else
240 			*pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
241 	} else {
242 		/*
243 		 * Entry already present.  Assert that the caller doesn't want
244 		 * a hugepage at this level, and that there isn't a hugepage at
245 		 * this level.
246 		 */
247 		TEST_ASSERT(current_level != target_level,
248 			    "Cannot create hugepage at level: %u, vaddr: 0x%lx",
249 			    current_level, vaddr);
250 		TEST_ASSERT(!is_huge_pte(mmu, pte),
251 			    "Cannot create page table at level: %u, vaddr: 0x%lx",
252 			    current_level, vaddr);
253 	}
254 	return pte;
255 }
256 
__virt_pg_map(struct kvm_vm * vm,struct kvm_mmu * mmu,uint64_t vaddr,uint64_t paddr,int level)257 void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
258 		   uint64_t paddr, int level)
259 {
260 	const uint64_t pg_size = PG_LEVEL_SIZE(level);
261 	uint64_t *pte = &mmu->pgd;
262 	int current_level;
263 
264 	TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
265 		    "Unknown or unsupported guest mode: 0x%x", vm->mode);
266 
267 	TEST_ASSERT((vaddr % pg_size) == 0,
268 		    "Virtual address not aligned,\n"
269 		    "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size);
270 	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)),
271 		    "Invalid virtual address, vaddr: 0x%lx", vaddr);
272 	TEST_ASSERT((paddr % pg_size) == 0,
273 		    "Physical address not aligned,\n"
274 		    "  paddr: 0x%lx page size: 0x%lx", paddr, pg_size);
275 	TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
276 		    "Physical address beyond maximum supported,\n"
277 		    "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
278 		    paddr, vm->max_gfn, vm->page_size);
279 	TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr,
280 		    "Unexpected bits in paddr: %lx", paddr);
281 
282 	TEST_ASSERT(!PTE_EXECUTABLE_MASK(mmu) || !PTE_NX_MASK(mmu),
283 		    "X and NX bit masks cannot be used simultaneously");
284 
285 	/*
286 	 * Allocate upper level page tables, if not already present.  Return
287 	 * early if a hugepage was created.
288 	 */
289 	for (current_level = mmu->pgtable_levels;
290 	     current_level > PG_LEVEL_4K;
291 	     current_level--) {
292 		pte = virt_create_upper_pte(vm, mmu, pte, vaddr, paddr,
293 					    current_level, level);
294 		if (is_huge_pte(mmu, pte))
295 			return;
296 	}
297 
298 	/* Fill in page table entry. */
299 	pte = virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
300 	TEST_ASSERT(!is_present_pte(mmu, pte),
301 		    "PTE already present for 4k page at vaddr: 0x%lx", vaddr);
302 	*pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
303 	       PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
304 	       PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
305 
306 	/*
307 	 * Neither SEV nor TDX supports shared page tables, so only the final
308 	 * leaf PTE needs manually set the C/S-bit.
309 	 */
310 	if (vm_is_gpa_protected(vm, paddr))
311 		*pte |= PTE_C_BIT_MASK(mmu);
312 	else
313 		*pte |= PTE_S_BIT_MASK(mmu);
314 }
315 
virt_arch_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr)316 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
317 {
318 	__virt_pg_map(vm, &vm->mmu, vaddr, paddr, PG_LEVEL_4K);
319 }
320 
virt_map_level(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr,uint64_t nr_bytes,int level)321 void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
322 		    uint64_t nr_bytes, int level)
323 {
324 	uint64_t pg_size = PG_LEVEL_SIZE(level);
325 	uint64_t nr_pages = nr_bytes / pg_size;
326 	int i;
327 
328 	TEST_ASSERT(nr_bytes % pg_size == 0,
329 		    "Region size not aligned: nr_bytes: 0x%lx, page size: 0x%lx",
330 		    nr_bytes, pg_size);
331 
332 	for (i = 0; i < nr_pages; i++) {
333 		__virt_pg_map(vm, &vm->mmu, vaddr, paddr, level);
334 		sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift,
335 				  nr_bytes / PAGE_SIZE);
336 
337 		vaddr += pg_size;
338 		paddr += pg_size;
339 	}
340 }
341 
vm_is_target_pte(struct kvm_mmu * mmu,uint64_t * pte,int * level,int current_level)342 static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte,
343 			     int *level, int current_level)
344 {
345 	if (is_huge_pte(mmu, pte)) {
346 		TEST_ASSERT(*level == PG_LEVEL_NONE ||
347 			    *level == current_level,
348 			    "Unexpected hugepage at level %d", current_level);
349 		*level = current_level;
350 	}
351 
352 	return *level == current_level;
353 }
354 
__vm_get_page_table_entry(struct kvm_vm * vm,struct kvm_mmu * mmu,uint64_t vaddr,int * level)355 static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm,
356 					   struct kvm_mmu *mmu,
357 					   uint64_t vaddr,
358 					   int *level)
359 {
360 	int va_width = 12 + (mmu->pgtable_levels) * 9;
361 	uint64_t *pte = &mmu->pgd;
362 	int current_level;
363 
364 	TEST_ASSERT(!vm->arch.is_pt_protected,
365 		    "Walking page tables of protected guests is impossible");
366 
367 	TEST_ASSERT(*level >= PG_LEVEL_NONE && *level <= mmu->pgtable_levels,
368 		    "Invalid PG_LEVEL_* '%d'", *level);
369 
370 	TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
371 		    "Unknown or unsupported guest mode: 0x%x", vm->mode);
372 	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
373 		(vaddr >> vm->page_shift)),
374 		"Invalid virtual address, vaddr: 0x%lx",
375 		vaddr);
376 	/*
377 	 * Check that the vaddr is a sign-extended va_width value.
378 	 */
379 	TEST_ASSERT(vaddr ==
380 		    (((int64_t)vaddr << (64 - va_width) >> (64 - va_width))),
381 		    "Canonical check failed.  The virtual address is invalid.");
382 
383 	for (current_level = mmu->pgtable_levels;
384 	     current_level > PG_LEVEL_4K;
385 	     current_level--) {
386 		pte = virt_get_pte(vm, mmu, pte, vaddr, current_level);
387 		if (vm_is_target_pte(mmu, pte, level, current_level))
388 			return pte;
389 	}
390 
391 	return virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
392 }
393 
tdp_get_pte(struct kvm_vm * vm,uint64_t l2_gpa)394 uint64_t *tdp_get_pte(struct kvm_vm *vm, uint64_t l2_gpa)
395 {
396 	int level = PG_LEVEL_4K;
397 
398 	return __vm_get_page_table_entry(vm, &vm->stage2_mmu, l2_gpa, &level);
399 }
400 
vm_get_pte(struct kvm_vm * vm,uint64_t vaddr)401 uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr)
402 {
403 	int level = PG_LEVEL_4K;
404 
405 	return __vm_get_page_table_entry(vm, &vm->mmu, vaddr, &level);
406 }
407 
virt_arch_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)408 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
409 {
410 	struct kvm_mmu *mmu = &vm->mmu;
411 	uint64_t *pml4e, *pml4e_start;
412 	uint64_t *pdpe, *pdpe_start;
413 	uint64_t *pde, *pde_start;
414 	uint64_t *pte, *pte_start;
415 
416 	if (!mmu->pgd_created)
417 		return;
418 
419 	fprintf(stream, "%*s                                          "
420 		"                no\n", indent, "");
421 	fprintf(stream, "%*s      index hvaddr         gpaddr         "
422 		"addr         w exec dirty\n",
423 		indent, "");
424 	pml4e_start = (uint64_t *) addr_gpa2hva(vm, mmu->pgd);
425 	for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
426 		pml4e = &pml4e_start[n1];
427 		if (!is_present_pte(mmu, pml4e))
428 			continue;
429 		fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u "
430 			" %u\n",
431 			indent, "",
432 			pml4e - pml4e_start, pml4e,
433 			addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e),
434 			is_writable_pte(mmu, pml4e), is_nx_pte(mmu, pml4e));
435 
436 		pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK);
437 		for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
438 			pdpe = &pdpe_start[n2];
439 			if (!is_present_pte(mmu, pdpe))
440 				continue;
441 			fprintf(stream, "%*spdpe  0x%-3zx %p 0x%-12lx 0x%-10llx "
442 				"%u  %u\n",
443 				indent, "",
444 				pdpe - pdpe_start, pdpe,
445 				addr_hva2gpa(vm, pdpe),
446 				PTE_GET_PFN(*pdpe), is_writable_pte(mmu, pdpe),
447 				is_nx_pte(mmu, pdpe));
448 
449 			pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK);
450 			for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
451 				pde = &pde_start[n3];
452 				if (!is_present_pte(mmu, pde))
453 					continue;
454 				fprintf(stream, "%*spde   0x%-3zx %p "
455 					"0x%-12lx 0x%-10llx %u  %u\n",
456 					indent, "", pde - pde_start, pde,
457 					addr_hva2gpa(vm, pde),
458 					PTE_GET_PFN(*pde), is_writable_pte(mmu, pde),
459 					is_nx_pte(mmu, pde));
460 
461 				pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK);
462 				for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
463 					pte = &pte_start[n4];
464 					if (!is_present_pte(mmu, pte))
465 						continue;
466 					fprintf(stream, "%*spte   0x%-3zx %p "
467 						"0x%-12lx 0x%-10llx %u  %u "
468 						"    %u    0x%-10lx\n",
469 						indent, "",
470 						pte - pte_start, pte,
471 						addr_hva2gpa(vm, pte),
472 						PTE_GET_PFN(*pte),
473 						is_writable_pte(mmu, pte),
474 						is_nx_pte(mmu, pte),
475 						is_dirty_pte(mmu, pte),
476 						((uint64_t) n1 << 27)
477 							| ((uint64_t) n2 << 18)
478 							| ((uint64_t) n3 << 9)
479 							| ((uint64_t) n4));
480 				}
481 			}
482 		}
483 	}
484 }
485 
vm_enable_tdp(struct kvm_vm * vm)486 void vm_enable_tdp(struct kvm_vm *vm)
487 {
488 	if (kvm_cpu_has(X86_FEATURE_VMX))
489 		vm_enable_ept(vm);
490 	else
491 		vm_enable_npt(vm);
492 }
493 
kvm_cpu_has_tdp(void)494 bool kvm_cpu_has_tdp(void)
495 {
496 	return kvm_cpu_has_ept() || kvm_cpu_has_npt();
497 }
498 
__tdp_map(struct kvm_vm * vm,uint64_t nested_paddr,uint64_t paddr,uint64_t size,int level)499 void __tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
500 	       uint64_t size, int level)
501 {
502 	size_t page_size = PG_LEVEL_SIZE(level);
503 	size_t npages = size / page_size;
504 
505 	TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
506 	TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
507 
508 	while (npages--) {
509 		__virt_pg_map(vm, &vm->stage2_mmu, nested_paddr, paddr, level);
510 		nested_paddr += page_size;
511 		paddr += page_size;
512 	}
513 }
514 
tdp_map(struct kvm_vm * vm,uint64_t nested_paddr,uint64_t paddr,uint64_t size)515 void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
516 	     uint64_t size)
517 {
518 	__tdp_map(vm, nested_paddr, paddr, size, PG_LEVEL_4K);
519 }
520 
521 /* Prepare an identity extended page table that maps all the
522  * physical pages in VM.
523  */
tdp_identity_map_default_memslots(struct kvm_vm * vm)524 void tdp_identity_map_default_memslots(struct kvm_vm *vm)
525 {
526 	uint32_t s, memslot = 0;
527 	sparsebit_idx_t i, last;
528 	struct userspace_mem_region *region = memslot2region(vm, memslot);
529 
530 	/* Only memslot 0 is mapped here, ensure it's the only one being used */
531 	for (s = 0; s < NR_MEM_REGIONS; s++)
532 		TEST_ASSERT_EQ(vm->memslots[s], 0);
533 
534 	i = (region->region.guest_phys_addr >> vm->page_shift) - 1;
535 	last = i + (region->region.memory_size >> vm->page_shift);
536 	for (;;) {
537 		i = sparsebit_next_clear(region->unused_phy_pages, i);
538 		if (i > last)
539 			break;
540 
541 		tdp_map(vm, (uint64_t)i << vm->page_shift,
542 			(uint64_t)i << vm->page_shift, 1 << vm->page_shift);
543 	}
544 }
545 
546 /* Identity map a region with 1GiB Pages. */
tdp_identity_map_1g(struct kvm_vm * vm,uint64_t addr,uint64_t size)547 void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size)
548 {
549 	__tdp_map(vm, addr, addr, size, PG_LEVEL_1G);
550 }
551 
552 /*
553  * Set Unusable Segment
554  *
555  * Input Args: None
556  *
557  * Output Args:
558  *   segp - Pointer to segment register
559  *
560  * Return: None
561  *
562  * Sets the segment register pointed to by @segp to an unusable state.
563  */
kvm_seg_set_unusable(struct kvm_segment * segp)564 static void kvm_seg_set_unusable(struct kvm_segment *segp)
565 {
566 	memset(segp, 0, sizeof(*segp));
567 	segp->unusable = true;
568 }
569 
kvm_seg_fill_gdt_64bit(struct kvm_vm * vm,struct kvm_segment * segp)570 static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
571 {
572 	void *gdt = addr_gva2hva(vm, vm->arch.gdt);
573 	struct desc64 *desc = gdt + (segp->selector >> 3) * 8;
574 
575 	desc->limit0 = segp->limit & 0xFFFF;
576 	desc->base0 = segp->base & 0xFFFF;
577 	desc->base1 = segp->base >> 16;
578 	desc->type = segp->type;
579 	desc->s = segp->s;
580 	desc->dpl = segp->dpl;
581 	desc->p = segp->present;
582 	desc->limit1 = segp->limit >> 16;
583 	desc->avl = segp->avl;
584 	desc->l = segp->l;
585 	desc->db = segp->db;
586 	desc->g = segp->g;
587 	desc->base2 = segp->base >> 24;
588 	if (!segp->s)
589 		desc->base3 = segp->base >> 32;
590 }
591 
kvm_seg_set_kernel_code_64bit(struct kvm_segment * segp)592 static void kvm_seg_set_kernel_code_64bit(struct kvm_segment *segp)
593 {
594 	memset(segp, 0, sizeof(*segp));
595 	segp->selector = KERNEL_CS;
596 	segp->limit = 0xFFFFFFFFu;
597 	segp->s = 0x1; /* kTypeCodeData */
598 	segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed
599 					  * | kFlagCodeReadable
600 					  */
601 	segp->g = true;
602 	segp->l = true;
603 	segp->present = 1;
604 }
605 
kvm_seg_set_kernel_data_64bit(struct kvm_segment * segp)606 static void kvm_seg_set_kernel_data_64bit(struct kvm_segment *segp)
607 {
608 	memset(segp, 0, sizeof(*segp));
609 	segp->selector = KERNEL_DS;
610 	segp->limit = 0xFFFFFFFFu;
611 	segp->s = 0x1; /* kTypeCodeData */
612 	segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed
613 					  * | kFlagDataWritable
614 					  */
615 	segp->g = true;
616 	segp->present = true;
617 }
618 
addr_arch_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva)619 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
620 {
621 	int level = PG_LEVEL_NONE;
622 	uint64_t *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
623 
624 	TEST_ASSERT(is_present_pte(&vm->mmu, pte),
625 		    "Leaf PTE not PRESENT for gva: 0x%08lx", gva);
626 
627 	/*
628 	 * No need for a hugepage mask on the PTE, x86-64 requires the "unused"
629 	 * address bits to be zero.
630 	 */
631 	return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level));
632 }
633 
kvm_seg_set_tss_64bit(vm_vaddr_t base,struct kvm_segment * segp)634 static void kvm_seg_set_tss_64bit(vm_vaddr_t base, struct kvm_segment *segp)
635 {
636 	memset(segp, 0, sizeof(*segp));
637 	segp->base = base;
638 	segp->limit = 0x67;
639 	segp->selector = KERNEL_TSS;
640 	segp->type = 0xb;
641 	segp->present = 1;
642 }
643 
vcpu_init_sregs(struct kvm_vm * vm,struct kvm_vcpu * vcpu)644 static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
645 {
646 	struct kvm_sregs sregs;
647 
648 	TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
649 		    "Unknown or unsupported guest mode: 0x%x", vm->mode);
650 
651 	/* Set mode specific system register values. */
652 	vcpu_sregs_get(vcpu, &sregs);
653 
654 	sregs.idt.base = vm->arch.idt;
655 	sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1;
656 	sregs.gdt.base = vm->arch.gdt;
657 	sregs.gdt.limit = getpagesize() - 1;
658 
659 	sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
660 	sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
661 	if (kvm_cpu_has(X86_FEATURE_XSAVE))
662 		sregs.cr4 |= X86_CR4_OSXSAVE;
663 	if (vm->mmu.pgtable_levels == 5)
664 		sregs.cr4 |= X86_CR4_LA57;
665 	sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
666 
667 	kvm_seg_set_unusable(&sregs.ldt);
668 	kvm_seg_set_kernel_code_64bit(&sregs.cs);
669 	kvm_seg_set_kernel_data_64bit(&sregs.ds);
670 	kvm_seg_set_kernel_data_64bit(&sregs.es);
671 	kvm_seg_set_kernel_data_64bit(&sregs.gs);
672 	kvm_seg_set_tss_64bit(vm->arch.tss, &sregs.tr);
673 
674 	sregs.cr3 = vm->mmu.pgd;
675 	vcpu_sregs_set(vcpu, &sregs);
676 }
677 
vcpu_init_xcrs(struct kvm_vm * vm,struct kvm_vcpu * vcpu)678 static void vcpu_init_xcrs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
679 {
680 	struct kvm_xcrs xcrs = {
681 		.nr_xcrs = 1,
682 		.xcrs[0].xcr = 0,
683 		.xcrs[0].value = kvm_cpu_supported_xcr0(),
684 	};
685 
686 	if (!kvm_cpu_has(X86_FEATURE_XSAVE))
687 		return;
688 
689 	vcpu_xcrs_set(vcpu, &xcrs);
690 }
691 
set_idt_entry(struct kvm_vm * vm,int vector,unsigned long addr,int dpl,unsigned short selector)692 static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
693 			  int dpl, unsigned short selector)
694 {
695 	struct idt_entry *base =
696 		(struct idt_entry *)addr_gva2hva(vm, vm->arch.idt);
697 	struct idt_entry *e = &base[vector];
698 
699 	memset(e, 0, sizeof(*e));
700 	e->offset0 = addr;
701 	e->selector = selector;
702 	e->ist = 0;
703 	e->type = 14;
704 	e->dpl = dpl;
705 	e->p = 1;
706 	e->offset1 = addr >> 16;
707 	e->offset2 = addr >> 32;
708 }
709 
kvm_fixup_exception(struct ex_regs * regs)710 static bool kvm_fixup_exception(struct ex_regs *regs)
711 {
712 	if (regs->r9 != KVM_EXCEPTION_MAGIC || regs->rip != regs->r10)
713 		return false;
714 
715 	if (regs->vector == DE_VECTOR)
716 		regs->vector = KVM_MAGIC_DE_VECTOR;
717 
718 	regs->rip = regs->r11;
719 	regs->r9 = regs->vector;
720 	regs->r10 = regs->error_code;
721 	return true;
722 }
723 
route_exception(struct ex_regs * regs)724 void route_exception(struct ex_regs *regs)
725 {
726 	typedef void(*handler)(struct ex_regs *);
727 	handler *handlers = (handler *)exception_handlers;
728 
729 	if (handlers && handlers[regs->vector]) {
730 		handlers[regs->vector](regs);
731 		return;
732 	}
733 
734 	if (kvm_fixup_exception(regs))
735 		return;
736 
737 	GUEST_FAIL("Unhandled exception '0x%lx' at guest RIP '0x%lx'",
738 		   regs->vector, regs->rip);
739 }
740 
vm_init_descriptor_tables(struct kvm_vm * vm)741 static void vm_init_descriptor_tables(struct kvm_vm *vm)
742 {
743 	extern void *idt_handlers;
744 	struct kvm_segment seg;
745 	int i;
746 
747 	vm->arch.gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
748 	vm->arch.idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
749 	vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
750 	vm->arch.tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
751 
752 	/* Handlers have the same address in both address spaces.*/
753 	for (i = 0; i < NUM_INTERRUPTS; i++)
754 		set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, KERNEL_CS);
755 
756 	*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
757 
758 	kvm_seg_set_kernel_code_64bit(&seg);
759 	kvm_seg_fill_gdt_64bit(vm, &seg);
760 
761 	kvm_seg_set_kernel_data_64bit(&seg);
762 	kvm_seg_fill_gdt_64bit(vm, &seg);
763 
764 	kvm_seg_set_tss_64bit(vm->arch.tss, &seg);
765 	kvm_seg_fill_gdt_64bit(vm, &seg);
766 }
767 
vm_install_exception_handler(struct kvm_vm * vm,int vector,void (* handler)(struct ex_regs *))768 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
769 			       void (*handler)(struct ex_regs *))
770 {
771 	vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers);
772 
773 	handlers[vector] = (vm_vaddr_t)handler;
774 }
775 
assert_on_unhandled_exception(struct kvm_vcpu * vcpu)776 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
777 {
778 	struct ucall uc;
779 
780 	if (get_ucall(vcpu, &uc) == UCALL_ABORT)
781 		REPORT_GUEST_ASSERT(uc);
782 }
783 
kvm_arch_vm_post_create(struct kvm_vm * vm,unsigned int nr_vcpus)784 void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus)
785 {
786 	int r;
787 
788 	TEST_ASSERT(kvm_has_cap(KVM_CAP_GET_TSC_KHZ),
789 		    "Require KVM_GET_TSC_KHZ to provide udelay() to guest.");
790 
791 	vm_create_irqchip(vm);
792 	vm_init_descriptor_tables(vm);
793 
794 	sync_global_to_guest(vm, host_cpu_is_intel);
795 	sync_global_to_guest(vm, host_cpu_is_amd);
796 	sync_global_to_guest(vm, is_forced_emulation_enabled);
797 	sync_global_to_guest(vm, pmu_errata_mask);
798 
799 	if (is_sev_vm(vm)) {
800 		struct kvm_sev_init init = { 0 };
801 
802 		vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);
803 	}
804 
805 	r = __vm_ioctl(vm, KVM_GET_TSC_KHZ, NULL);
806 	TEST_ASSERT(r > 0, "KVM_GET_TSC_KHZ did not provide a valid TSC frequency.");
807 	guest_tsc_khz = r;
808 	sync_global_to_guest(vm, guest_tsc_khz);
809 }
810 
vcpu_arch_set_entry_point(struct kvm_vcpu * vcpu,void * guest_code)811 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
812 {
813 	struct kvm_regs regs;
814 
815 	vcpu_regs_get(vcpu, &regs);
816 	regs.rip = (unsigned long) guest_code;
817 	vcpu_regs_set(vcpu, &regs);
818 }
819 
vm_arch_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id)820 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
821 {
822 	struct kvm_mp_state mp_state;
823 	struct kvm_regs regs;
824 	vm_vaddr_t stack_vaddr;
825 	struct kvm_vcpu *vcpu;
826 
827 	stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
828 				       DEFAULT_GUEST_STACK_VADDR_MIN,
829 				       MEM_REGION_DATA);
830 
831 	stack_vaddr += DEFAULT_STACK_PGS * getpagesize();
832 
833 	/*
834 	 * Align stack to match calling sequence requirements in section "The
835 	 * Stack Frame" of the System V ABI AMD64 Architecture Processor
836 	 * Supplement, which requires the value (%rsp + 8) to be a multiple of
837 	 * 16 when control is transferred to the function entry point.
838 	 *
839 	 * If this code is ever used to launch a vCPU with 32-bit entry point it
840 	 * may need to subtract 4 bytes instead of 8 bytes.
841 	 */
842 	TEST_ASSERT(IS_ALIGNED(stack_vaddr, PAGE_SIZE),
843 		    "__vm_vaddr_alloc() did not provide a page-aligned address");
844 	stack_vaddr -= 8;
845 
846 	vcpu = __vm_vcpu_add(vm, vcpu_id);
847 	vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
848 	vcpu_init_sregs(vm, vcpu);
849 	vcpu_init_xcrs(vm, vcpu);
850 
851 	/* Setup guest general purpose registers */
852 	vcpu_regs_get(vcpu, &regs);
853 	regs.rflags = regs.rflags | 0x2;
854 	regs.rsp = stack_vaddr;
855 	vcpu_regs_set(vcpu, &regs);
856 
857 	/* Setup the MP state */
858 	mp_state.mp_state = 0;
859 	vcpu_mp_state_set(vcpu, &mp_state);
860 
861 	/*
862 	 * Refresh CPUID after setting SREGS and XCR0, so that KVM's "runtime"
863 	 * updates to guest CPUID, e.g. for OSXSAVE and XSAVE state size, are
864 	 * reflected into selftests' vCPU CPUID cache, i.e. so that the cache
865 	 * is consistent with vCPU state.
866 	 */
867 	vcpu_get_cpuid(vcpu);
868 	return vcpu;
869 }
870 
vm_arch_vcpu_recreate(struct kvm_vm * vm,uint32_t vcpu_id)871 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id)
872 {
873 	struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
874 
875 	vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
876 
877 	return vcpu;
878 }
879 
vcpu_arch_free(struct kvm_vcpu * vcpu)880 void vcpu_arch_free(struct kvm_vcpu *vcpu)
881 {
882 	if (vcpu->cpuid)
883 		free(vcpu->cpuid);
884 }
885 
886 /* Do not use kvm_supported_cpuid directly except for validity checks. */
887 static void *kvm_supported_cpuid;
888 
kvm_get_supported_cpuid(void)889 const struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
890 {
891 	int kvm_fd;
892 
893 	if (kvm_supported_cpuid)
894 		return kvm_supported_cpuid;
895 
896 	kvm_supported_cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
897 	kvm_fd = open_kvm_dev_path_or_exit();
898 
899 	kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID,
900 		  (struct kvm_cpuid2 *)kvm_supported_cpuid);
901 
902 	close(kvm_fd);
903 	return kvm_supported_cpuid;
904 }
905 
__kvm_cpu_has(const struct kvm_cpuid2 * cpuid,uint32_t function,uint32_t index,uint8_t reg,uint8_t lo,uint8_t hi)906 static uint32_t __kvm_cpu_has(const struct kvm_cpuid2 *cpuid,
907 			      uint32_t function, uint32_t index,
908 			      uint8_t reg, uint8_t lo, uint8_t hi)
909 {
910 	const struct kvm_cpuid_entry2 *entry;
911 	int i;
912 
913 	for (i = 0; i < cpuid->nent; i++) {
914 		entry = &cpuid->entries[i];
915 
916 		/*
917 		 * The output registers in kvm_cpuid_entry2 are in alphabetical
918 		 * order, but kvm_x86_cpu_feature matches that mess, so yay
919 		 * pointer shenanigans!
920 		 */
921 		if (entry->function == function && entry->index == index)
922 			return ((&entry->eax)[reg] & GENMASK(hi, lo)) >> lo;
923 	}
924 
925 	return 0;
926 }
927 
kvm_cpuid_has(const struct kvm_cpuid2 * cpuid,struct kvm_x86_cpu_feature feature)928 bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
929 		   struct kvm_x86_cpu_feature feature)
930 {
931 	return __kvm_cpu_has(cpuid, feature.function, feature.index,
932 			     feature.reg, feature.bit, feature.bit);
933 }
934 
kvm_cpuid_property(const struct kvm_cpuid2 * cpuid,struct kvm_x86_cpu_property property)935 uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
936 			    struct kvm_x86_cpu_property property)
937 {
938 	return __kvm_cpu_has(cpuid, property.function, property.index,
939 			     property.reg, property.lo_bit, property.hi_bit);
940 }
941 
kvm_get_feature_msr(uint64_t msr_index)942 uint64_t kvm_get_feature_msr(uint64_t msr_index)
943 {
944 	struct {
945 		struct kvm_msrs header;
946 		struct kvm_msr_entry entry;
947 	} buffer = {};
948 	int r, kvm_fd;
949 
950 	buffer.header.nmsrs = 1;
951 	buffer.entry.index = msr_index;
952 	kvm_fd = open_kvm_dev_path_or_exit();
953 
954 	r = __kvm_ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header);
955 	TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_GET_MSRS, r));
956 
957 	close(kvm_fd);
958 	return buffer.entry.data;
959 }
960 
__vm_xsave_require_permission(uint64_t xfeature,const char * name)961 void __vm_xsave_require_permission(uint64_t xfeature, const char *name)
962 {
963 	int kvm_fd;
964 	u64 bitmask;
965 	long rc;
966 	struct kvm_device_attr attr = {
967 		.group = 0,
968 		.attr = KVM_X86_XCOMP_GUEST_SUPP,
969 		.addr = (unsigned long) &bitmask,
970 	};
971 
972 	TEST_ASSERT(!kvm_supported_cpuid,
973 		    "kvm_get_supported_cpuid() cannot be used before ARCH_REQ_XCOMP_GUEST_PERM");
974 
975 	TEST_ASSERT(is_power_of_2(xfeature),
976 		    "Dynamic XFeatures must be enabled one at a time");
977 
978 	kvm_fd = open_kvm_dev_path_or_exit();
979 	rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr);
980 	close(kvm_fd);
981 
982 	if (rc == -1 && (errno == ENXIO || errno == EINVAL))
983 		__TEST_REQUIRE(0, "KVM_X86_XCOMP_GUEST_SUPP not supported");
984 
985 	TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc);
986 
987 	__TEST_REQUIRE(bitmask & xfeature,
988 		       "Required XSAVE feature '%s' not supported", name);
989 
990 	TEST_REQUIRE(!syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, ilog2(xfeature)));
991 
992 	rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask);
993 	TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
994 	TEST_ASSERT(bitmask & xfeature,
995 		    "'%s' (0x%lx) not permitted after prctl(ARCH_REQ_XCOMP_GUEST_PERM) permitted=0x%lx",
996 		    name, xfeature, bitmask);
997 }
998 
vcpu_init_cpuid(struct kvm_vcpu * vcpu,const struct kvm_cpuid2 * cpuid)999 void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid)
1000 {
1001 	TEST_ASSERT(cpuid != vcpu->cpuid, "@cpuid can't be the vCPU's CPUID");
1002 
1003 	/* Allow overriding the default CPUID. */
1004 	if (vcpu->cpuid && vcpu->cpuid->nent < cpuid->nent) {
1005 		free(vcpu->cpuid);
1006 		vcpu->cpuid = NULL;
1007 	}
1008 
1009 	if (!vcpu->cpuid)
1010 		vcpu->cpuid = allocate_kvm_cpuid2(cpuid->nent);
1011 
1012 	memcpy(vcpu->cpuid, cpuid, kvm_cpuid2_size(cpuid->nent));
1013 	vcpu_set_cpuid(vcpu);
1014 }
1015 
vcpu_set_cpuid_property(struct kvm_vcpu * vcpu,struct kvm_x86_cpu_property property,uint32_t value)1016 void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
1017 			     struct kvm_x86_cpu_property property,
1018 			     uint32_t value)
1019 {
1020 	struct kvm_cpuid_entry2 *entry;
1021 
1022 	entry = __vcpu_get_cpuid_entry(vcpu, property.function, property.index);
1023 
1024 	(&entry->eax)[property.reg] &= ~GENMASK(property.hi_bit, property.lo_bit);
1025 	(&entry->eax)[property.reg] |= value << property.lo_bit;
1026 
1027 	vcpu_set_cpuid(vcpu);
1028 
1029 	/* Sanity check that @value doesn't exceed the bounds in any way. */
1030 	TEST_ASSERT_EQ(kvm_cpuid_property(vcpu->cpuid, property), value);
1031 }
1032 
vcpu_clear_cpuid_entry(struct kvm_vcpu * vcpu,uint32_t function)1033 void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function)
1034 {
1035 	struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function);
1036 
1037 	entry->eax = 0;
1038 	entry->ebx = 0;
1039 	entry->ecx = 0;
1040 	entry->edx = 0;
1041 	vcpu_set_cpuid(vcpu);
1042 }
1043 
vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu * vcpu,struct kvm_x86_cpu_feature feature,bool set)1044 void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
1045 				     struct kvm_x86_cpu_feature feature,
1046 				     bool set)
1047 {
1048 	struct kvm_cpuid_entry2 *entry;
1049 	u32 *reg;
1050 
1051 	entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index);
1052 	reg = (&entry->eax) + feature.reg;
1053 
1054 	if (set)
1055 		*reg |= BIT(feature.bit);
1056 	else
1057 		*reg &= ~BIT(feature.bit);
1058 
1059 	vcpu_set_cpuid(vcpu);
1060 }
1061 
vcpu_get_msr(struct kvm_vcpu * vcpu,uint64_t msr_index)1062 uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index)
1063 {
1064 	struct {
1065 		struct kvm_msrs header;
1066 		struct kvm_msr_entry entry;
1067 	} buffer = {};
1068 
1069 	buffer.header.nmsrs = 1;
1070 	buffer.entry.index = msr_index;
1071 
1072 	vcpu_msrs_get(vcpu, &buffer.header);
1073 
1074 	return buffer.entry.data;
1075 }
1076 
_vcpu_set_msr(struct kvm_vcpu * vcpu,uint64_t msr_index,uint64_t msr_value)1077 int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value)
1078 {
1079 	struct {
1080 		struct kvm_msrs header;
1081 		struct kvm_msr_entry entry;
1082 	} buffer = {};
1083 
1084 	memset(&buffer, 0, sizeof(buffer));
1085 	buffer.header.nmsrs = 1;
1086 	buffer.entry.index = msr_index;
1087 	buffer.entry.data = msr_value;
1088 
1089 	return __vcpu_ioctl(vcpu, KVM_SET_MSRS, &buffer.header);
1090 }
1091 
vcpu_args_set(struct kvm_vcpu * vcpu,unsigned int num,...)1092 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
1093 {
1094 	va_list ap;
1095 	struct kvm_regs regs;
1096 
1097 	TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
1098 		    "  num: %u",
1099 		    num);
1100 
1101 	va_start(ap, num);
1102 	vcpu_regs_get(vcpu, &regs);
1103 
1104 	if (num >= 1)
1105 		regs.rdi = va_arg(ap, uint64_t);
1106 
1107 	if (num >= 2)
1108 		regs.rsi = va_arg(ap, uint64_t);
1109 
1110 	if (num >= 3)
1111 		regs.rdx = va_arg(ap, uint64_t);
1112 
1113 	if (num >= 4)
1114 		regs.rcx = va_arg(ap, uint64_t);
1115 
1116 	if (num >= 5)
1117 		regs.r8 = va_arg(ap, uint64_t);
1118 
1119 	if (num >= 6)
1120 		regs.r9 = va_arg(ap, uint64_t);
1121 
1122 	vcpu_regs_set(vcpu, &regs);
1123 	va_end(ap);
1124 }
1125 
vcpu_arch_dump(FILE * stream,struct kvm_vcpu * vcpu,uint8_t indent)1126 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
1127 {
1128 	struct kvm_regs regs;
1129 	struct kvm_sregs sregs;
1130 
1131 	fprintf(stream, "%*svCPU ID: %u\n", indent, "", vcpu->id);
1132 
1133 	fprintf(stream, "%*sregs:\n", indent + 2, "");
1134 	vcpu_regs_get(vcpu, &regs);
1135 	regs_dump(stream, &regs, indent + 4);
1136 
1137 	fprintf(stream, "%*ssregs:\n", indent + 2, "");
1138 	vcpu_sregs_get(vcpu, &sregs);
1139 	sregs_dump(stream, &sregs, indent + 4);
1140 }
1141 
__kvm_get_msr_index_list(bool feature_msrs)1142 static struct kvm_msr_list *__kvm_get_msr_index_list(bool feature_msrs)
1143 {
1144 	struct kvm_msr_list *list;
1145 	struct kvm_msr_list nmsrs;
1146 	int kvm_fd, r;
1147 
1148 	kvm_fd = open_kvm_dev_path_or_exit();
1149 
1150 	nmsrs.nmsrs = 0;
1151 	if (!feature_msrs)
1152 		r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
1153 	else
1154 		r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, &nmsrs);
1155 
1156 	TEST_ASSERT(r == -1 && errno == E2BIG,
1157 		    "Expected -E2BIG, got rc: %i errno: %i (%s)",
1158 		    r, errno, strerror(errno));
1159 
1160 	list = malloc(sizeof(*list) + nmsrs.nmsrs * sizeof(list->indices[0]));
1161 	TEST_ASSERT(list, "-ENOMEM when allocating MSR index list");
1162 	list->nmsrs = nmsrs.nmsrs;
1163 
1164 	if (!feature_msrs)
1165 		kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
1166 	else
1167 		kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list);
1168 	close(kvm_fd);
1169 
1170 	TEST_ASSERT(list->nmsrs == nmsrs.nmsrs,
1171 		    "Number of MSRs in list changed, was %d, now %d",
1172 		    nmsrs.nmsrs, list->nmsrs);
1173 	return list;
1174 }
1175 
kvm_get_msr_index_list(void)1176 const struct kvm_msr_list *kvm_get_msr_index_list(void)
1177 {
1178 	static const struct kvm_msr_list *list;
1179 
1180 	if (!list)
1181 		list = __kvm_get_msr_index_list(false);
1182 	return list;
1183 }
1184 
1185 
kvm_get_feature_msr_index_list(void)1186 const struct kvm_msr_list *kvm_get_feature_msr_index_list(void)
1187 {
1188 	static const struct kvm_msr_list *list;
1189 
1190 	if (!list)
1191 		list = __kvm_get_msr_index_list(true);
1192 	return list;
1193 }
1194 
kvm_msr_is_in_save_restore_list(uint32_t msr_index)1195 bool kvm_msr_is_in_save_restore_list(uint32_t msr_index)
1196 {
1197 	const struct kvm_msr_list *list = kvm_get_msr_index_list();
1198 	int i;
1199 
1200 	for (i = 0; i < list->nmsrs; ++i) {
1201 		if (list->indices[i] == msr_index)
1202 			return true;
1203 	}
1204 
1205 	return false;
1206 }
1207 
vcpu_save_xsave_state(struct kvm_vcpu * vcpu,struct kvm_x86_state * state)1208 static void vcpu_save_xsave_state(struct kvm_vcpu *vcpu,
1209 				  struct kvm_x86_state *state)
1210 {
1211 	int size = vm_check_cap(vcpu->vm, KVM_CAP_XSAVE2);
1212 
1213 	if (size) {
1214 		state->xsave = malloc(size);
1215 		vcpu_xsave2_get(vcpu, state->xsave);
1216 	} else {
1217 		state->xsave = malloc(sizeof(struct kvm_xsave));
1218 		vcpu_xsave_get(vcpu, state->xsave);
1219 	}
1220 }
1221 
vcpu_save_state(struct kvm_vcpu * vcpu)1222 struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu)
1223 {
1224 	const struct kvm_msr_list *msr_list = kvm_get_msr_index_list();
1225 	struct kvm_x86_state *state;
1226 	int i;
1227 
1228 	static int nested_size = -1;
1229 
1230 	if (nested_size == -1) {
1231 		nested_size = kvm_check_cap(KVM_CAP_NESTED_STATE);
1232 		TEST_ASSERT(nested_size <= sizeof(state->nested_),
1233 			    "Nested state size too big, %i > %zi",
1234 			    nested_size, sizeof(state->nested_));
1235 	}
1236 
1237 	/*
1238 	 * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
1239 	 * guest state is consistent only after userspace re-enters the
1240 	 * kernel with KVM_RUN.  Complete IO prior to migrating state
1241 	 * to a new VM.
1242 	 */
1243 	vcpu_run_complete_io(vcpu);
1244 
1245 	state = malloc(sizeof(*state) + msr_list->nmsrs * sizeof(state->msrs.entries[0]));
1246 	TEST_ASSERT(state, "-ENOMEM when allocating kvm state");
1247 
1248 	vcpu_events_get(vcpu, &state->events);
1249 	vcpu_mp_state_get(vcpu, &state->mp_state);
1250 	vcpu_regs_get(vcpu, &state->regs);
1251 	vcpu_save_xsave_state(vcpu, state);
1252 
1253 	if (kvm_has_cap(KVM_CAP_XCRS))
1254 		vcpu_xcrs_get(vcpu, &state->xcrs);
1255 
1256 	vcpu_sregs_get(vcpu, &state->sregs);
1257 
1258 	if (nested_size) {
1259 		state->nested.size = sizeof(state->nested_);
1260 
1261 		vcpu_nested_state_get(vcpu, &state->nested);
1262 		TEST_ASSERT(state->nested.size <= nested_size,
1263 			    "Nested state size too big, %i (KVM_CHECK_CAP gave %i)",
1264 			    state->nested.size, nested_size);
1265 	} else {
1266 		state->nested.size = 0;
1267 	}
1268 
1269 	state->msrs.nmsrs = msr_list->nmsrs;
1270 	for (i = 0; i < msr_list->nmsrs; i++)
1271 		state->msrs.entries[i].index = msr_list->indices[i];
1272 	vcpu_msrs_get(vcpu, &state->msrs);
1273 
1274 	vcpu_debugregs_get(vcpu, &state->debugregs);
1275 
1276 	return state;
1277 }
1278 
vcpu_load_state(struct kvm_vcpu * vcpu,struct kvm_x86_state * state)1279 void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state)
1280 {
1281 	vcpu_sregs_set(vcpu, &state->sregs);
1282 	vcpu_msrs_set(vcpu, &state->msrs);
1283 
1284 	if (kvm_has_cap(KVM_CAP_XCRS))
1285 		vcpu_xcrs_set(vcpu, &state->xcrs);
1286 
1287 	vcpu_xsave_set(vcpu,  state->xsave);
1288 	vcpu_events_set(vcpu, &state->events);
1289 	vcpu_mp_state_set(vcpu, &state->mp_state);
1290 	vcpu_debugregs_set(vcpu, &state->debugregs);
1291 	vcpu_regs_set(vcpu, &state->regs);
1292 
1293 	if (state->nested.size)
1294 		vcpu_nested_state_set(vcpu, &state->nested);
1295 }
1296 
kvm_x86_state_cleanup(struct kvm_x86_state * state)1297 void kvm_x86_state_cleanup(struct kvm_x86_state *state)
1298 {
1299 	free(state->xsave);
1300 	free(state);
1301 }
1302 
kvm_get_cpu_address_width(unsigned int * pa_bits,unsigned int * va_bits)1303 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
1304 {
1305 	if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) {
1306 		*pa_bits = kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32;
1307 		*va_bits = 32;
1308 	} else {
1309 		*pa_bits = kvm_cpu_property(X86_PROPERTY_MAX_PHY_ADDR);
1310 		*va_bits = kvm_cpu_property(X86_PROPERTY_MAX_VIRT_ADDR);
1311 	}
1312 }
1313 
kvm_init_vm_address_properties(struct kvm_vm * vm)1314 void kvm_init_vm_address_properties(struct kvm_vm *vm)
1315 {
1316 	if (is_sev_vm(vm)) {
1317 		vm->arch.sev_fd = open_sev_dev_path_or_exit();
1318 		vm->arch.c_bit = BIT_ULL(this_cpu_property(X86_PROPERTY_SEV_C_BIT));
1319 		vm->gpa_tag_mask = vm->arch.c_bit;
1320 	} else {
1321 		vm->arch.sev_fd = -1;
1322 	}
1323 }
1324 
get_cpuid_entry(const struct kvm_cpuid2 * cpuid,uint32_t function,uint32_t index)1325 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
1326 					       uint32_t function, uint32_t index)
1327 {
1328 	int i;
1329 
1330 	for (i = 0; i < cpuid->nent; i++) {
1331 		if (cpuid->entries[i].function == function &&
1332 		    cpuid->entries[i].index == index)
1333 			return &cpuid->entries[i];
1334 	}
1335 
1336 	TEST_FAIL("CPUID function 0x%x index 0x%x not found ", function, index);
1337 
1338 	return NULL;
1339 }
1340 
1341 #define X86_HYPERCALL(inputs...)					\
1342 ({									\
1343 	uint64_t r;							\
1344 									\
1345 	asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t"		\
1346 		     "jnz 1f\n\t"					\
1347 		     "vmcall\n\t"					\
1348 		     "jmp 2f\n\t"					\
1349 		     "1: vmmcall\n\t"					\
1350 		     "2:"						\
1351 		     : "=a"(r)						\
1352 		     : [use_vmmcall] "r" (host_cpu_is_amd), inputs);	\
1353 									\
1354 	r;								\
1355 })
1356 
kvm_hypercall(uint64_t nr,uint64_t a0,uint64_t a1,uint64_t a2,uint64_t a3)1357 uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
1358 		       uint64_t a3)
1359 {
1360 	return X86_HYPERCALL("a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
1361 }
1362 
__xen_hypercall(uint64_t nr,uint64_t a0,void * a1)1363 uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
1364 {
1365 	return X86_HYPERCALL("a"(nr), "D"(a0), "S"(a1));
1366 }
1367 
xen_hypercall(uint64_t nr,uint64_t a0,void * a1)1368 void xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
1369 {
1370 	GUEST_ASSERT(!__xen_hypercall(nr, a0, a1));
1371 }
1372 
vm_compute_max_gfn(struct kvm_vm * vm)1373 unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
1374 {
1375 	const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
1376 	unsigned long ht_gfn, max_gfn, max_pfn;
1377 	uint8_t maxphyaddr, guest_maxphyaddr;
1378 
1379 	/*
1380 	 * Use "guest MAXPHYADDR" from KVM if it's available.  Guest MAXPHYADDR
1381 	 * enumerates the max _mappable_ GPA, which can be less than the raw
1382 	 * MAXPHYADDR, e.g. if MAXPHYADDR=52, KVM is using TDP, and the CPU
1383 	 * doesn't support 5-level TDP.
1384 	 */
1385 	guest_maxphyaddr = kvm_cpu_property(X86_PROPERTY_GUEST_MAX_PHY_ADDR);
1386 	guest_maxphyaddr = guest_maxphyaddr ?: vm->pa_bits;
1387 	TEST_ASSERT(guest_maxphyaddr <= vm->pa_bits,
1388 		    "Guest MAXPHYADDR should never be greater than raw MAXPHYADDR");
1389 
1390 	max_gfn = (1ULL << (guest_maxphyaddr - vm->page_shift)) - 1;
1391 
1392 	/* Avoid reserved HyperTransport region on AMD processors.  */
1393 	if (!host_cpu_is_amd)
1394 		return max_gfn;
1395 
1396 	/* On parts with <40 physical address bits, the area is fully hidden */
1397 	if (vm->pa_bits < 40)
1398 		return max_gfn;
1399 
1400 	/* Before family 17h, the HyperTransport area is just below 1T.  */
1401 	ht_gfn = (1 << 28) - num_ht_pages;
1402 	if (this_cpu_family() < 0x17)
1403 		goto done;
1404 
1405 	/*
1406 	 * Otherwise it's at the top of the physical address space, possibly
1407 	 * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX.  Use
1408 	 * the old conservative value if MAXPHYADDR is not enumerated.
1409 	 */
1410 	if (!this_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR))
1411 		goto done;
1412 
1413 	maxphyaddr = this_cpu_property(X86_PROPERTY_MAX_PHY_ADDR);
1414 	max_pfn = (1ULL << (maxphyaddr - vm->page_shift)) - 1;
1415 
1416 	if (this_cpu_has_p(X86_PROPERTY_PHYS_ADDR_REDUCTION))
1417 		max_pfn >>= this_cpu_property(X86_PROPERTY_PHYS_ADDR_REDUCTION);
1418 
1419 	ht_gfn = max_pfn - num_ht_pages;
1420 done:
1421 	return min(max_gfn, ht_gfn - 1);
1422 }
1423 
kvm_selftest_arch_init(void)1424 void kvm_selftest_arch_init(void)
1425 {
1426 	host_cpu_is_intel = this_cpu_is_intel();
1427 	host_cpu_is_amd = this_cpu_is_amd();
1428 	is_forced_emulation_enabled = kvm_is_forced_emulation_enabled();
1429 
1430 	kvm_init_pmu_errata();
1431 }
1432 
sys_clocksource_is_based_on_tsc(void)1433 bool sys_clocksource_is_based_on_tsc(void)
1434 {
1435 	char *clk_name = sys_get_cur_clocksource();
1436 	bool ret = !strcmp(clk_name, "tsc\n") ||
1437 		   !strcmp(clk_name, "hyperv_clocksource_tsc_page\n");
1438 
1439 	free(clk_name);
1440 
1441 	return ret;
1442 }
1443 
kvm_arch_has_default_irqchip(void)1444 bool kvm_arch_has_default_irqchip(void)
1445 {
1446 	return true;
1447 }
1448 
setup_smram(struct kvm_vm * vm,struct kvm_vcpu * vcpu,uint64_t smram_gpa,const void * smi_handler,size_t handler_size)1449 void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
1450 		 uint64_t smram_gpa,
1451 		 const void *smi_handler, size_t handler_size)
1452 {
1453 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, smram_gpa,
1454 				    SMRAM_MEMSLOT, SMRAM_PAGES, 0);
1455 	TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, smram_gpa,
1456 				       SMRAM_MEMSLOT) == smram_gpa,
1457 		    "Could not allocate guest physical addresses for SMRAM");
1458 
1459 	memset(addr_gpa2hva(vm, smram_gpa), 0x0, SMRAM_SIZE);
1460 	memcpy(addr_gpa2hva(vm, smram_gpa) + 0x8000, smi_handler, handler_size);
1461 	vcpu_set_msr(vcpu, MSR_IA32_SMBASE, smram_gpa);
1462 }
1463 
inject_smi(struct kvm_vcpu * vcpu)1464 void inject_smi(struct kvm_vcpu *vcpu)
1465 {
1466 	struct kvm_vcpu_events events;
1467 
1468 	vcpu_events_get(vcpu, &events);
1469 	events.smi.pending = 1;
1470 	events.flags |= KVM_VCPUEVENT_VALID_SMM;
1471 	vcpu_events_set(vcpu, &events);
1472 }
1473