xref: /linux/tools/testing/selftests/kvm/lib/loongarch/processor.c (revision 43db1111073049220381944af4a3b8a5400eda71)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <assert.h>
4 #include <linux/compiler.h>
5 
6 #include "kvm_util.h"
7 #include "processor.h"
8 #include "ucall_common.h"
9 
10 #define LOONGARCH_PAGE_TABLE_PHYS_MIN		0x200000
11 #define LOONGARCH_GUEST_STACK_VADDR_MIN		0x200000
12 
13 static vm_paddr_t invalid_pgtable[4];
14 
virt_pte_index(struct kvm_vm * vm,vm_vaddr_t gva,int level)15 static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
16 {
17 	unsigned int shift;
18 	uint64_t mask;
19 
20 	shift = level * (vm->page_shift - 3) + vm->page_shift;
21 	mask = (1UL << (vm->page_shift - 3)) - 1;
22 	return (gva >> shift) & mask;
23 }
24 
pte_addr(struct kvm_vm * vm,uint64_t entry)25 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
26 {
27 	return entry &  ~((0x1UL << vm->page_shift) - 1);
28 }
29 
ptrs_per_pte(struct kvm_vm * vm)30 static uint64_t ptrs_per_pte(struct kvm_vm *vm)
31 {
32 	return 1 << (vm->page_shift - 3);
33 }
34 
virt_set_pgtable(struct kvm_vm * vm,vm_paddr_t table,vm_paddr_t child)35 static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t child)
36 {
37 	uint64_t *ptep;
38 	int i, ptrs_per_pte;
39 
40 	ptep = addr_gpa2hva(vm, table);
41 	ptrs_per_pte = 1 << (vm->page_shift - 3);
42 	for (i = 0; i < ptrs_per_pte; i++)
43 		WRITE_ONCE(*(ptep + i), child);
44 }
45 
virt_arch_pgd_alloc(struct kvm_vm * vm)46 void virt_arch_pgd_alloc(struct kvm_vm *vm)
47 {
48 	int i;
49 	vm_paddr_t child, table;
50 
51 	if (vm->pgd_created)
52 		return;
53 
54 	child = table = 0;
55 	for (i = 0; i < vm->pgtable_levels; i++) {
56 		invalid_pgtable[i] = child;
57 		table = vm_phy_page_alloc(vm, LOONGARCH_PAGE_TABLE_PHYS_MIN,
58 				vm->memslots[MEM_REGION_PT]);
59 		TEST_ASSERT(table, "Fail to allocate page tale at level %d\n", i);
60 		virt_set_pgtable(vm, table, child);
61 		child = table;
62 	}
63 	vm->pgd = table;
64 	vm->pgd_created = true;
65 }
66 
virt_pte_none(uint64_t * ptep,int level)67 static int virt_pte_none(uint64_t *ptep, int level)
68 {
69 	return *ptep == invalid_pgtable[level];
70 }
71 
virt_populate_pte(struct kvm_vm * vm,vm_vaddr_t gva,int alloc)72 static uint64_t *virt_populate_pte(struct kvm_vm *vm, vm_vaddr_t gva, int alloc)
73 {
74 	int level;
75 	uint64_t *ptep;
76 	vm_paddr_t child;
77 
78 	if (!vm->pgd_created)
79 		goto unmapped_gva;
80 
81 	child = vm->pgd;
82 	level = vm->pgtable_levels - 1;
83 	while (level > 0) {
84 		ptep = addr_gpa2hva(vm, child) + virt_pte_index(vm, gva, level) * 8;
85 		if (virt_pte_none(ptep, level)) {
86 			if (alloc) {
87 				child = vm_alloc_page_table(vm);
88 				virt_set_pgtable(vm, child, invalid_pgtable[level - 1]);
89 				WRITE_ONCE(*ptep, child);
90 			} else
91 				goto unmapped_gva;
92 
93 		} else
94 			child = pte_addr(vm, *ptep);
95 		level--;
96 	}
97 
98 	ptep = addr_gpa2hva(vm, child) + virt_pte_index(vm, gva, level) * 8;
99 	return ptep;
100 
101 unmapped_gva:
102 	TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
103 	exit(EXIT_FAILURE);
104 }
105 
addr_arch_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva)106 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
107 {
108 	uint64_t *ptep;
109 
110 	ptep = virt_populate_pte(vm, gva, 0);
111 	TEST_ASSERT(*ptep != 0, "Virtual address vaddr: 0x%lx not mapped\n", gva);
112 
113 	return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
114 }
115 
virt_arch_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr)116 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
117 {
118 	uint32_t prot_bits;
119 	uint64_t *ptep;
120 
121 	TEST_ASSERT((vaddr % vm->page_size) == 0,
122 			"Virtual address not on page boundary,\n"
123 			"vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
124 	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
125 			(vaddr >> vm->page_shift)),
126 			"Invalid virtual address, vaddr: 0x%lx", vaddr);
127 	TEST_ASSERT((paddr % vm->page_size) == 0,
128 			"Physical address not on page boundary,\n"
129 			"paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
130 	TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
131 			"Physical address beyond maximum supported,\n"
132 			"paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
133 			paddr, vm->max_gfn, vm->page_size);
134 
135 	ptep = virt_populate_pte(vm, vaddr, 1);
136 	prot_bits = _PAGE_PRESENT | __READABLE | __WRITEABLE | _CACHE_CC | _PAGE_USER;
137 	WRITE_ONCE(*ptep, paddr | prot_bits);
138 }
139 
pte_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent,uint64_t page,int level)140 static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
141 {
142 	uint64_t pte, *ptep;
143 	static const char * const type[] = { "pte", "pmd", "pud", "pgd"};
144 
145 	if (level < 0)
146 		return;
147 
148 	for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
149 		ptep = addr_gpa2hva(vm, pte);
150 		if (virt_pte_none(ptep, level))
151 			continue;
152 		fprintf(stream, "%*s%s: %lx: %lx at %p\n",
153 				indent, "", type[level], pte, *ptep, ptep);
154 		pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level--);
155 	}
156 }
157 
virt_arch_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)158 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
159 {
160 	int level;
161 
162 	if (!vm->pgd_created)
163 		return;
164 
165 	level = vm->pgtable_levels - 1;
166 	pte_dump(stream, vm, indent, vm->pgd, level);
167 }
168 
vcpu_arch_dump(FILE * stream,struct kvm_vcpu * vcpu,uint8_t indent)169 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
170 {
171 }
172 
assert_on_unhandled_exception(struct kvm_vcpu * vcpu)173 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
174 {
175 	struct ucall uc;
176 
177 	if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED)
178 		return;
179 
180 	TEST_FAIL("Unexpected exception (pc:0x%lx, estat:0x%lx, badv:0x%lx)",
181 			uc.args[0], uc.args[1], uc.args[2]);
182 }
183 
route_exception(struct ex_regs * regs)184 void route_exception(struct ex_regs *regs)
185 {
186 	unsigned long pc, estat, badv;
187 
188 	pc = regs->pc;
189 	badv  = regs->badv;
190 	estat = regs->estat;
191 	ucall(UCALL_UNHANDLED, 3, pc, estat, badv);
192 	while (1) ;
193 }
194 
vcpu_args_set(struct kvm_vcpu * vcpu,unsigned int num,...)195 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
196 {
197 	int i;
198 	va_list ap;
199 	struct kvm_regs regs;
200 
201 	TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
202 		    "num: %u\n", num);
203 
204 	vcpu_regs_get(vcpu, &regs);
205 
206 	va_start(ap, num);
207 	for (i = 0; i < num; i++)
208 		regs.gpr[i + 4] = va_arg(ap, uint64_t);
209 	va_end(ap);
210 
211 	vcpu_regs_set(vcpu, &regs);
212 }
213 
loongarch_get_csr(struct kvm_vcpu * vcpu,uint64_t id,void * addr)214 static void loongarch_get_csr(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
215 {
216 	uint64_t csrid;
217 
218 	csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id;
219 	__vcpu_get_reg(vcpu, csrid, addr);
220 }
221 
loongarch_set_csr(struct kvm_vcpu * vcpu,uint64_t id,uint64_t val)222 static void loongarch_set_csr(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
223 {
224 	uint64_t csrid;
225 
226 	csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id;
227 	__vcpu_set_reg(vcpu, csrid, val);
228 }
229 
loongarch_vcpu_setup(struct kvm_vcpu * vcpu)230 static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
231 {
232 	int width;
233 	unsigned long val;
234 	struct kvm_vm *vm = vcpu->vm;
235 
236 	switch (vm->mode) {
237 	case VM_MODE_P36V47_16K:
238 	case VM_MODE_P47V47_16K:
239 		break;
240 
241 	default:
242 		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
243 	}
244 
245 	/* user mode and page enable mode */
246 	val = PLV_USER | CSR_CRMD_PG;
247 	loongarch_set_csr(vcpu, LOONGARCH_CSR_CRMD, val);
248 	loongarch_set_csr(vcpu, LOONGARCH_CSR_PRMD, val);
249 	loongarch_set_csr(vcpu, LOONGARCH_CSR_EUEN, 1);
250 	loongarch_set_csr(vcpu, LOONGARCH_CSR_ECFG, 0);
251 	loongarch_set_csr(vcpu, LOONGARCH_CSR_TCFG, 0);
252 	loongarch_set_csr(vcpu, LOONGARCH_CSR_ASID, 1);
253 
254 	val = 0;
255 	width = vm->page_shift - 3;
256 
257 	switch (vm->pgtable_levels) {
258 	case 4:
259 		/* pud page shift and width */
260 		val = (vm->page_shift + width * 2) << 20 | (width << 25);
261 		/* fall throuth */
262 	case 3:
263 		/* pmd page shift and width */
264 		val |= (vm->page_shift + width) << 10 | (width << 15);
265 		/* pte page shift and width */
266 		val |= vm->page_shift | width << 5;
267 		break;
268 	default:
269 		TEST_FAIL("Got %u page table levels, expected 3 or 4", vm->pgtable_levels);
270 	}
271 
272 	loongarch_set_csr(vcpu, LOONGARCH_CSR_PWCTL0, val);
273 
274 	/* PGD page shift and width */
275 	val = (vm->page_shift + width * (vm->pgtable_levels - 1)) | width << 6;
276 	loongarch_set_csr(vcpu, LOONGARCH_CSR_PWCTL1, val);
277 	loongarch_set_csr(vcpu, LOONGARCH_CSR_PGDL, vm->pgd);
278 
279 	/*
280 	 * Refill exception runs on real mode
281 	 * Entry address should be physical address
282 	 */
283 	val = addr_gva2gpa(vm, (unsigned long)handle_tlb_refill);
284 	loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBRENTRY, val);
285 
286 	/*
287 	 * General exception runs on page-enabled mode
288 	 * Entry address should be virtual address
289 	 */
290 	val = (unsigned long)handle_exception;
291 	loongarch_set_csr(vcpu, LOONGARCH_CSR_EENTRY, val);
292 
293 	loongarch_get_csr(vcpu, LOONGARCH_CSR_TLBIDX, &val);
294 	val &= ~CSR_TLBIDX_SIZEM;
295 	val |= PS_DEFAULT_SIZE << CSR_TLBIDX_SIZE;
296 	loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBIDX, val);
297 
298 	loongarch_set_csr(vcpu, LOONGARCH_CSR_STLBPGSIZE, PS_DEFAULT_SIZE);
299 
300 	/* LOONGARCH_CSR_KS1 is used for exception stack */
301 	val = __vm_vaddr_alloc(vm, vm->page_size,
302 			LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
303 	TEST_ASSERT(val != 0,  "No memory for exception stack");
304 	val = val + vm->page_size;
305 	loongarch_set_csr(vcpu, LOONGARCH_CSR_KS1, val);
306 
307 	loongarch_get_csr(vcpu, LOONGARCH_CSR_TLBREHI, &val);
308 	val &= ~CSR_TLBREHI_PS;
309 	val |= PS_DEFAULT_SIZE << CSR_TLBREHI_PS_SHIFT;
310 	loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBREHI, val);
311 
312 	loongarch_set_csr(vcpu, LOONGARCH_CSR_CPUID, vcpu->id);
313 	loongarch_set_csr(vcpu, LOONGARCH_CSR_TMID,  vcpu->id);
314 }
315 
vm_arch_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id)316 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
317 {
318 	size_t stack_size;
319 	uint64_t stack_vaddr;
320 	struct kvm_regs regs;
321 	struct kvm_vcpu *vcpu;
322 
323 	vcpu = __vm_vcpu_add(vm, vcpu_id);
324 	stack_size = vm->page_size;
325 	stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
326 			LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
327 	TEST_ASSERT(stack_vaddr != 0,  "No memory for vm stack");
328 
329 	loongarch_vcpu_setup(vcpu);
330 	/* Setup guest general purpose registers */
331 	vcpu_regs_get(vcpu, &regs);
332 	regs.gpr[3] = stack_vaddr + stack_size;
333 	vcpu_regs_set(vcpu, &regs);
334 
335 	return vcpu;
336 }
337 
vcpu_arch_set_entry_point(struct kvm_vcpu * vcpu,void * guest_code)338 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
339 {
340 	struct kvm_regs regs;
341 
342 	/* Setup guest PC register */
343 	vcpu_regs_get(vcpu, &regs);
344 	regs.pc = (uint64_t)guest_code;
345 	vcpu_regs_set(vcpu, &regs);
346 }
347