xref: /linux/tools/testing/selftests/kvm/lib/loongarch/processor.c (revision df41742343fad11fde06e085096003d64599785f)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <assert.h>
4 #include <linux/compiler.h>
5 
6 #include "kvm_util.h"
7 #include "processor.h"
8 #include "ucall_common.h"
9 
10 #define LOONGARCH_PAGE_TABLE_PHYS_MIN		0x200000
11 #define LOONGARCH_GUEST_STACK_VADDR_MIN		0x200000
12 
13 static vm_paddr_t invalid_pgtable[4];
14 static vm_vaddr_t exception_handlers;
15 
16 static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
17 {
18 	unsigned int shift;
19 	uint64_t mask;
20 
21 	shift = level * (vm->page_shift - 3) + vm->page_shift;
22 	mask = (1UL << (vm->page_shift - 3)) - 1;
23 	return (gva >> shift) & mask;
24 }
25 
26 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
27 {
28 	return entry &  ~((0x1UL << vm->page_shift) - 1);
29 }
30 
31 static uint64_t ptrs_per_pte(struct kvm_vm *vm)
32 {
33 	return 1 << (vm->page_shift - 3);
34 }
35 
36 static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t child)
37 {
38 	uint64_t *ptep;
39 	int i, ptrs_per_pte;
40 
41 	ptep = addr_gpa2hva(vm, table);
42 	ptrs_per_pte = 1 << (vm->page_shift - 3);
43 	for (i = 0; i < ptrs_per_pte; i++)
44 		WRITE_ONCE(*(ptep + i), child);
45 }
46 
47 void virt_arch_pgd_alloc(struct kvm_vm *vm)
48 {
49 	int i;
50 	vm_paddr_t child, table;
51 
52 	if (vm->pgd_created)
53 		return;
54 
55 	child = table = 0;
56 	for (i = 0; i < vm->pgtable_levels; i++) {
57 		invalid_pgtable[i] = child;
58 		table = vm_phy_page_alloc(vm, LOONGARCH_PAGE_TABLE_PHYS_MIN,
59 				vm->memslots[MEM_REGION_PT]);
60 		TEST_ASSERT(table, "Fail to allocate page tale at level %d\n", i);
61 		virt_set_pgtable(vm, table, child);
62 		child = table;
63 	}
64 	vm->pgd = table;
65 	vm->pgd_created = true;
66 }
67 
68 static int virt_pte_none(uint64_t *ptep, int level)
69 {
70 	return *ptep == invalid_pgtable[level];
71 }
72 
73 static uint64_t *virt_populate_pte(struct kvm_vm *vm, vm_vaddr_t gva, int alloc)
74 {
75 	int level;
76 	uint64_t *ptep;
77 	vm_paddr_t child;
78 
79 	if (!vm->pgd_created)
80 		goto unmapped_gva;
81 
82 	child = vm->pgd;
83 	level = vm->pgtable_levels - 1;
84 	while (level > 0) {
85 		ptep = addr_gpa2hva(vm, child) + virt_pte_index(vm, gva, level) * 8;
86 		if (virt_pte_none(ptep, level)) {
87 			if (alloc) {
88 				child = vm_alloc_page_table(vm);
89 				virt_set_pgtable(vm, child, invalid_pgtable[level - 1]);
90 				WRITE_ONCE(*ptep, child);
91 			} else
92 				goto unmapped_gva;
93 
94 		} else
95 			child = pte_addr(vm, *ptep);
96 		level--;
97 	}
98 
99 	ptep = addr_gpa2hva(vm, child) + virt_pte_index(vm, gva, level) * 8;
100 	return ptep;
101 
102 unmapped_gva:
103 	TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
104 	exit(EXIT_FAILURE);
105 }
106 
107 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
108 {
109 	uint64_t *ptep;
110 
111 	ptep = virt_populate_pte(vm, gva, 0);
112 	TEST_ASSERT(*ptep != 0, "Virtual address vaddr: 0x%lx not mapped\n", gva);
113 
114 	return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
115 }
116 
117 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
118 {
119 	uint32_t prot_bits;
120 	uint64_t *ptep;
121 
122 	TEST_ASSERT((vaddr % vm->page_size) == 0,
123 			"Virtual address not on page boundary,\n"
124 			"vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
125 	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
126 			(vaddr >> vm->page_shift)),
127 			"Invalid virtual address, vaddr: 0x%lx", vaddr);
128 	TEST_ASSERT((paddr % vm->page_size) == 0,
129 			"Physical address not on page boundary,\n"
130 			"paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
131 	TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
132 			"Physical address beyond maximum supported,\n"
133 			"paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
134 			paddr, vm->max_gfn, vm->page_size);
135 
136 	ptep = virt_populate_pte(vm, vaddr, 1);
137 	prot_bits = _PAGE_PRESENT | __READABLE | __WRITEABLE | _CACHE_CC | _PAGE_USER;
138 	WRITE_ONCE(*ptep, paddr | prot_bits);
139 }
140 
141 static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
142 {
143 	uint64_t pte, *ptep;
144 	static const char * const type[] = { "pte", "pmd", "pud", "pgd"};
145 
146 	if (level < 0)
147 		return;
148 
149 	for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
150 		ptep = addr_gpa2hva(vm, pte);
151 		if (virt_pte_none(ptep, level))
152 			continue;
153 		fprintf(stream, "%*s%s: %lx: %lx at %p\n",
154 				indent, "", type[level], pte, *ptep, ptep);
155 		pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level--);
156 	}
157 }
158 
159 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
160 {
161 	int level;
162 
163 	if (!vm->pgd_created)
164 		return;
165 
166 	level = vm->pgtable_levels - 1;
167 	pte_dump(stream, vm, indent, vm->pgd, level);
168 }
169 
170 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
171 {
172 }
173 
174 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
175 {
176 	struct ucall uc;
177 
178 	if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED)
179 		return;
180 
181 	TEST_FAIL("Unexpected exception (pc:0x%lx, estat:0x%lx, badv:0x%lx)",
182 			uc.args[0], uc.args[1], uc.args[2]);
183 }
184 
185 void route_exception(struct ex_regs *regs)
186 {
187 	int vector;
188 	unsigned long pc, estat, badv;
189 	struct handlers *handlers;
190 
191 	handlers = (struct handlers *)exception_handlers;
192 	vector = (regs->estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
193 	if (handlers && handlers->exception_handlers[vector])
194 		return handlers->exception_handlers[vector](regs);
195 
196 	pc = regs->pc;
197 	badv  = regs->badv;
198 	estat = regs->estat;
199 	ucall(UCALL_UNHANDLED, 3, pc, estat, badv);
200 	while (1) ;
201 }
202 
203 void vm_init_descriptor_tables(struct kvm_vm *vm)
204 {
205 	void *addr;
206 
207 	vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
208 			LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
209 
210 	addr = addr_gva2hva(vm, vm->handlers);
211 	memset(addr, 0, vm->page_size);
212 	exception_handlers = vm->handlers;
213 	sync_global_to_guest(vm, exception_handlers);
214 }
215 
216 void vm_install_exception_handler(struct kvm_vm *vm, int vector, handler_fn handler)
217 {
218 	struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
219 
220 	assert(vector < VECTOR_NUM);
221 	handlers->exception_handlers[vector] = handler;
222 }
223 
224 uint32_t guest_get_vcpuid(void)
225 {
226 	return csr_read(LOONGARCH_CSR_CPUID);
227 }
228 
229 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
230 {
231 	int i;
232 	va_list ap;
233 	struct kvm_regs regs;
234 
235 	TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
236 		    "num: %u\n", num);
237 
238 	vcpu_regs_get(vcpu, &regs);
239 
240 	va_start(ap, num);
241 	for (i = 0; i < num; i++)
242 		regs.gpr[i + 4] = va_arg(ap, uint64_t);
243 	va_end(ap);
244 
245 	vcpu_regs_set(vcpu, &regs);
246 }
247 
248 static void loongarch_get_csr(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
249 {
250 	uint64_t csrid;
251 
252 	csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id;
253 	__vcpu_get_reg(vcpu, csrid, addr);
254 }
255 
256 static void loongarch_set_csr(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
257 {
258 	uint64_t csrid;
259 
260 	csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id;
261 	__vcpu_set_reg(vcpu, csrid, val);
262 }
263 
264 static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
265 {
266 	int width;
267 	unsigned long val;
268 	struct kvm_vm *vm = vcpu->vm;
269 
270 	switch (vm->mode) {
271 	case VM_MODE_P36V47_16K:
272 	case VM_MODE_P47V47_16K:
273 		break;
274 
275 	default:
276 		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
277 	}
278 
279 	/* kernel mode and page enable mode */
280 	val = PLV_KERN | CSR_CRMD_PG;
281 	loongarch_set_csr(vcpu, LOONGARCH_CSR_CRMD, val);
282 	loongarch_set_csr(vcpu, LOONGARCH_CSR_PRMD, val);
283 	loongarch_set_csr(vcpu, LOONGARCH_CSR_EUEN, 1);
284 	loongarch_set_csr(vcpu, LOONGARCH_CSR_ECFG, 0);
285 	loongarch_set_csr(vcpu, LOONGARCH_CSR_TCFG, 0);
286 	loongarch_set_csr(vcpu, LOONGARCH_CSR_ASID, 1);
287 
288 	val = 0;
289 	width = vm->page_shift - 3;
290 
291 	switch (vm->pgtable_levels) {
292 	case 4:
293 		/* pud page shift and width */
294 		val = (vm->page_shift + width * 2) << 20 | (width << 25);
295 		/* fall throuth */
296 	case 3:
297 		/* pmd page shift and width */
298 		val |= (vm->page_shift + width) << 10 | (width << 15);
299 		/* pte page shift and width */
300 		val |= vm->page_shift | width << 5;
301 		break;
302 	default:
303 		TEST_FAIL("Got %u page table levels, expected 3 or 4", vm->pgtable_levels);
304 	}
305 
306 	loongarch_set_csr(vcpu, LOONGARCH_CSR_PWCTL0, val);
307 
308 	/* PGD page shift and width */
309 	val = (vm->page_shift + width * (vm->pgtable_levels - 1)) | width << 6;
310 	loongarch_set_csr(vcpu, LOONGARCH_CSR_PWCTL1, val);
311 	loongarch_set_csr(vcpu, LOONGARCH_CSR_PGDL, vm->pgd);
312 
313 	/*
314 	 * Refill exception runs on real mode
315 	 * Entry address should be physical address
316 	 */
317 	val = addr_gva2gpa(vm, (unsigned long)handle_tlb_refill);
318 	loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBRENTRY, val);
319 
320 	/*
321 	 * General exception runs on page-enabled mode
322 	 * Entry address should be virtual address
323 	 */
324 	val = (unsigned long)handle_exception;
325 	loongarch_set_csr(vcpu, LOONGARCH_CSR_EENTRY, val);
326 
327 	loongarch_get_csr(vcpu, LOONGARCH_CSR_TLBIDX, &val);
328 	val &= ~CSR_TLBIDX_SIZEM;
329 	val |= PS_DEFAULT_SIZE << CSR_TLBIDX_SIZE;
330 	loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBIDX, val);
331 
332 	loongarch_set_csr(vcpu, LOONGARCH_CSR_STLBPGSIZE, PS_DEFAULT_SIZE);
333 
334 	/* LOONGARCH_CSR_KS1 is used for exception stack */
335 	val = __vm_vaddr_alloc(vm, vm->page_size,
336 			LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
337 	TEST_ASSERT(val != 0,  "No memory for exception stack");
338 	val = val + vm->page_size;
339 	loongarch_set_csr(vcpu, LOONGARCH_CSR_KS1, val);
340 
341 	loongarch_get_csr(vcpu, LOONGARCH_CSR_TLBREHI, &val);
342 	val &= ~CSR_TLBREHI_PS;
343 	val |= PS_DEFAULT_SIZE << CSR_TLBREHI_PS_SHIFT;
344 	loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBREHI, val);
345 
346 	loongarch_set_csr(vcpu, LOONGARCH_CSR_CPUID, vcpu->id);
347 	loongarch_set_csr(vcpu, LOONGARCH_CSR_TMID,  vcpu->id);
348 }
349 
350 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
351 {
352 	size_t stack_size;
353 	uint64_t stack_vaddr;
354 	struct kvm_regs regs;
355 	struct kvm_vcpu *vcpu;
356 
357 	vcpu = __vm_vcpu_add(vm, vcpu_id);
358 	stack_size = vm->page_size;
359 	stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
360 			LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
361 	TEST_ASSERT(stack_vaddr != 0,  "No memory for vm stack");
362 
363 	loongarch_vcpu_setup(vcpu);
364 	/* Setup guest general purpose registers */
365 	vcpu_regs_get(vcpu, &regs);
366 	regs.gpr[3] = stack_vaddr + stack_size;
367 	vcpu_regs_set(vcpu, &regs);
368 
369 	return vcpu;
370 }
371 
372 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
373 {
374 	struct kvm_regs regs;
375 
376 	/* Setup guest PC register */
377 	vcpu_regs_get(vcpu, &regs);
378 	regs.pc = (uint64_t)guest_code;
379 	vcpu_regs_set(vcpu, &regs);
380 }
381