xref: /linux/tools/testing/selftests/kvm/lib/loongarch/processor.c (revision 11c840192768a5a63b6aed75273c5e8e416230ee)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <assert.h>
4 #include <linux/compiler.h>
5 
6 #include <asm/kvm.h>
7 #include "kvm_util.h"
8 #include "pmu.h"
9 #include "processor.h"
10 #include "ucall_common.h"
11 
12 #define LOONGARCH_PAGE_TABLE_PHYS_MIN		0x200000
13 #define LOONGARCH_GUEST_STACK_VADDR_MIN		0x200000
14 
15 static vm_paddr_t invalid_pgtable[4];
16 static vm_vaddr_t exception_handlers;
17 
18 static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
19 {
20 	unsigned int shift;
21 	uint64_t mask;
22 
23 	shift = level * (vm->page_shift - 3) + vm->page_shift;
24 	mask = (1UL << (vm->page_shift - 3)) - 1;
25 	return (gva >> shift) & mask;
26 }
27 
28 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
29 {
30 	return entry &  ~((0x1UL << vm->page_shift) - 1);
31 }
32 
33 static uint64_t ptrs_per_pte(struct kvm_vm *vm)
34 {
35 	return 1 << (vm->page_shift - 3);
36 }
37 
38 static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t child)
39 {
40 	uint64_t *ptep;
41 	int i, ptrs_per_pte;
42 
43 	ptep = addr_gpa2hva(vm, table);
44 	ptrs_per_pte = 1 << (vm->page_shift - 3);
45 	for (i = 0; i < ptrs_per_pte; i++)
46 		WRITE_ONCE(*(ptep + i), child);
47 }
48 
49 void virt_arch_pgd_alloc(struct kvm_vm *vm)
50 {
51 	int i;
52 	vm_paddr_t child, table;
53 
54 	if (vm->mmu.pgd_created)
55 		return;
56 
57 	child = table = 0;
58 	for (i = 0; i < vm->mmu.pgtable_levels; i++) {
59 		invalid_pgtable[i] = child;
60 		table = vm_phy_page_alloc(vm, LOONGARCH_PAGE_TABLE_PHYS_MIN,
61 				vm->memslots[MEM_REGION_PT]);
62 		TEST_ASSERT(table, "Fail to allocate page tale at level %d\n", i);
63 		virt_set_pgtable(vm, table, child);
64 		child = table;
65 	}
66 	vm->mmu.pgd = table;
67 	vm->mmu.pgd_created = true;
68 }
69 
70 static int virt_pte_none(uint64_t *ptep, int level)
71 {
72 	return *ptep == invalid_pgtable[level];
73 }
74 
75 static uint64_t *virt_populate_pte(struct kvm_vm *vm, vm_vaddr_t gva, int alloc)
76 {
77 	int level;
78 	uint64_t *ptep;
79 	vm_paddr_t child;
80 
81 	if (!vm->mmu.pgd_created)
82 		goto unmapped_gva;
83 
84 	child = vm->mmu.pgd;
85 	level = vm->mmu.pgtable_levels - 1;
86 	while (level > 0) {
87 		ptep = addr_gpa2hva(vm, child) + virt_pte_index(vm, gva, level) * 8;
88 		if (virt_pte_none(ptep, level)) {
89 			if (alloc) {
90 				child = vm_alloc_page_table(vm);
91 				virt_set_pgtable(vm, child, invalid_pgtable[level - 1]);
92 				WRITE_ONCE(*ptep, child);
93 			} else
94 				goto unmapped_gva;
95 
96 		} else
97 			child = pte_addr(vm, *ptep);
98 		level--;
99 	}
100 
101 	ptep = addr_gpa2hva(vm, child) + virt_pte_index(vm, gva, level) * 8;
102 	return ptep;
103 
104 unmapped_gva:
105 	TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
106 	exit(EXIT_FAILURE);
107 }
108 
109 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
110 {
111 	uint64_t *ptep;
112 
113 	ptep = virt_populate_pte(vm, gva, 0);
114 	TEST_ASSERT(*ptep != 0, "Virtual address vaddr: 0x%lx not mapped\n", gva);
115 
116 	return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
117 }
118 
119 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
120 {
121 	uint32_t prot_bits;
122 	uint64_t *ptep;
123 
124 	TEST_ASSERT((vaddr % vm->page_size) == 0,
125 			"Virtual address not on page boundary,\n"
126 			"vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
127 	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
128 			(vaddr >> vm->page_shift)),
129 			"Invalid virtual address, vaddr: 0x%lx", vaddr);
130 	TEST_ASSERT((paddr % vm->page_size) == 0,
131 			"Physical address not on page boundary,\n"
132 			"paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
133 	TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
134 			"Physical address beyond maximum supported,\n"
135 			"paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
136 			paddr, vm->max_gfn, vm->page_size);
137 
138 	ptep = virt_populate_pte(vm, vaddr, 1);
139 	prot_bits = _PAGE_PRESENT | __READABLE | __WRITEABLE | _CACHE_CC | _PAGE_USER;
140 	WRITE_ONCE(*ptep, paddr | prot_bits);
141 }
142 
143 static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
144 {
145 	uint64_t pte, *ptep;
146 	static const char * const type[] = { "pte", "pmd", "pud", "pgd"};
147 
148 	if (level < 0)
149 		return;
150 
151 	for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
152 		ptep = addr_gpa2hva(vm, pte);
153 		if (virt_pte_none(ptep, level))
154 			continue;
155 		fprintf(stream, "%*s%s: %lx: %lx at %p\n",
156 				indent, "", type[level], pte, *ptep, ptep);
157 		pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level--);
158 	}
159 }
160 
161 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
162 {
163 	int level;
164 
165 	if (!vm->mmu.pgd_created)
166 		return;
167 
168 	level = vm->mmu.pgtable_levels - 1;
169 	pte_dump(stream, vm, indent, vm->mmu.pgd, level);
170 }
171 
172 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
173 {
174 }
175 
176 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
177 {
178 	struct ucall uc;
179 
180 	if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED)
181 		return;
182 
183 	TEST_FAIL("Unexpected exception (pc:0x%lx, estat:0x%lx, badv:0x%lx)",
184 			uc.args[0], uc.args[1], uc.args[2]);
185 }
186 
187 void route_exception(struct ex_regs *regs)
188 {
189 	int vector;
190 	unsigned long pc, estat, badv;
191 	struct handlers *handlers;
192 
193 	handlers = (struct handlers *)exception_handlers;
194 	vector = (regs->estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
195 	if (handlers && handlers->exception_handlers[vector])
196 		return handlers->exception_handlers[vector](regs);
197 
198 	pc = regs->pc;
199 	badv  = regs->badv;
200 	estat = regs->estat;
201 	ucall(UCALL_UNHANDLED, 3, pc, estat, badv);
202 	while (1) ;
203 }
204 
205 void vm_init_descriptor_tables(struct kvm_vm *vm)
206 {
207 	void *addr;
208 
209 	vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
210 			LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
211 
212 	addr = addr_gva2hva(vm, vm->handlers);
213 	memset(addr, 0, vm->page_size);
214 	exception_handlers = vm->handlers;
215 	sync_global_to_guest(vm, exception_handlers);
216 }
217 
218 void vm_install_exception_handler(struct kvm_vm *vm, int vector, handler_fn handler)
219 {
220 	struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
221 
222 	assert(vector < VECTOR_NUM);
223 	handlers->exception_handlers[vector] = handler;
224 }
225 
226 uint32_t guest_get_vcpuid(void)
227 {
228 	return csr_read(LOONGARCH_CSR_CPUID);
229 }
230 
231 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
232 {
233 	int i;
234 	va_list ap;
235 	struct kvm_regs regs;
236 
237 	TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
238 		    "num: %u\n", num);
239 
240 	vcpu_regs_get(vcpu, &regs);
241 
242 	va_start(ap, num);
243 	for (i = 0; i < num; i++)
244 		regs.gpr[i + 4] = va_arg(ap, uint64_t);
245 	va_end(ap);
246 
247 	vcpu_regs_set(vcpu, &regs);
248 }
249 
250 static void loongarch_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
251 {
252 	__vcpu_set_reg(vcpu, id, val);
253 }
254 
255 static void loongarch_set_cpucfg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
256 {
257 	uint64_t cfgid;
258 
259 	cfgid = KVM_REG_LOONGARCH_CPUCFG | KVM_REG_SIZE_U64 | 8 * id;
260 	__vcpu_set_reg(vcpu, cfgid, val);
261 }
262 
263 static void loongarch_get_csr(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
264 {
265 	uint64_t csrid;
266 
267 	csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id;
268 	__vcpu_get_reg(vcpu, csrid, addr);
269 }
270 
271 static void loongarch_set_csr(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
272 {
273 	uint64_t csrid;
274 
275 	csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id;
276 	__vcpu_set_reg(vcpu, csrid, val);
277 }
278 
279 void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
280 {
281 	int width;
282 	unsigned int cfg;
283 	unsigned long val;
284 	struct kvm_vm *vm = vcpu->vm;
285 
286 	switch (vm->mode) {
287 	case VM_MODE_P36V47_16K:
288 	case VM_MODE_P47V47_16K:
289 		break;
290 
291 	default:
292 		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
293 	}
294 
295 	cfg = read_cpucfg(LOONGARCH_CPUCFG6);
296 	loongarch_set_cpucfg(vcpu, LOONGARCH_CPUCFG6, cfg);
297 
298 	/* kernel mode and page enable mode */
299 	val = PLV_KERN | CSR_CRMD_PG;
300 	loongarch_set_csr(vcpu, LOONGARCH_CSR_CRMD, val);
301 	loongarch_set_csr(vcpu, LOONGARCH_CSR_PRMD, val);
302 	loongarch_set_csr(vcpu, LOONGARCH_CSR_EUEN, 1);
303 	loongarch_set_csr(vcpu, LOONGARCH_CSR_ECFG, 0);
304 	loongarch_set_csr(vcpu, LOONGARCH_CSR_TCFG, 0);
305 	loongarch_set_csr(vcpu, LOONGARCH_CSR_ASID, 1);
306 
307 	/* time count start from 0 */
308 	val = 0;
309 	loongarch_set_reg(vcpu, KVM_REG_LOONGARCH_COUNTER, val);
310 
311 	width = vm->page_shift - 3;
312 
313 	switch (vm->mmu.pgtable_levels) {
314 	case 4:
315 		/* pud page shift and width */
316 		val = (vm->page_shift + width * 2) << 20 | (width << 25);
317 		/* fall throuth */
318 	case 3:
319 		/* pmd page shift and width */
320 		val |= (vm->page_shift + width) << 10 | (width << 15);
321 		/* pte page shift and width */
322 		val |= vm->page_shift | width << 5;
323 		break;
324 	default:
325 		TEST_FAIL("Got %u page table levels, expected 3 or 4", vm->mmu.pgtable_levels);
326 	}
327 
328 	loongarch_set_csr(vcpu, LOONGARCH_CSR_PWCTL0, val);
329 
330 	/* PGD page shift and width */
331 	val = (vm->page_shift + width * (vm->mmu.pgtable_levels - 1)) | width << 6;
332 	loongarch_set_csr(vcpu, LOONGARCH_CSR_PWCTL1, val);
333 	loongarch_set_csr(vcpu, LOONGARCH_CSR_PGDL, vm->mmu.pgd);
334 
335 	/*
336 	 * Refill exception runs on real mode
337 	 * Entry address should be physical address
338 	 */
339 	val = addr_gva2gpa(vm, (unsigned long)handle_tlb_refill);
340 	loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBRENTRY, val);
341 
342 	/*
343 	 * General exception runs on page-enabled mode
344 	 * Entry address should be virtual address
345 	 */
346 	val = (unsigned long)handle_exception;
347 	loongarch_set_csr(vcpu, LOONGARCH_CSR_EENTRY, val);
348 
349 	loongarch_get_csr(vcpu, LOONGARCH_CSR_TLBIDX, &val);
350 	val &= ~CSR_TLBIDX_SIZEM;
351 	val |= PS_DEFAULT_SIZE << CSR_TLBIDX_SIZE;
352 	loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBIDX, val);
353 
354 	loongarch_set_csr(vcpu, LOONGARCH_CSR_STLBPGSIZE, PS_DEFAULT_SIZE);
355 
356 	/* LOONGARCH_CSR_KS1 is used for exception stack */
357 	val = __vm_vaddr_alloc(vm, vm->page_size,
358 			LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
359 	TEST_ASSERT(val != 0,  "No memory for exception stack");
360 	val = val + vm->page_size;
361 	loongarch_set_csr(vcpu, LOONGARCH_CSR_KS1, val);
362 
363 	loongarch_get_csr(vcpu, LOONGARCH_CSR_TLBREHI, &val);
364 	val &= ~CSR_TLBREHI_PS;
365 	val |= PS_DEFAULT_SIZE << CSR_TLBREHI_PS_SHIFT;
366 	loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBREHI, val);
367 
368 	loongarch_set_csr(vcpu, LOONGARCH_CSR_CPUID, vcpu->id);
369 	loongarch_set_csr(vcpu, LOONGARCH_CSR_TMID,  vcpu->id);
370 }
371 
372 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
373 {
374 	size_t stack_size;
375 	uint64_t stack_vaddr;
376 	struct kvm_regs regs;
377 	struct kvm_vcpu *vcpu;
378 
379 	vcpu = __vm_vcpu_add(vm, vcpu_id);
380 	stack_size = vm->page_size;
381 	stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
382 			LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
383 	TEST_ASSERT(stack_vaddr != 0,  "No memory for vm stack");
384 
385 	loongarch_vcpu_setup(vcpu);
386 	/* Setup guest general purpose registers */
387 	vcpu_regs_get(vcpu, &regs);
388 	regs.gpr[3] = stack_vaddr + stack_size;
389 	vcpu_regs_set(vcpu, &regs);
390 
391 	return vcpu;
392 }
393 
394 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
395 {
396 	struct kvm_regs regs;
397 
398 	/* Setup guest PC register */
399 	vcpu_regs_get(vcpu, &regs);
400 	regs.pc = (uint64_t)guest_code;
401 	vcpu_regs_set(vcpu, &regs);
402 }
403