1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <assert.h>
4 #include <linux/compiler.h>
5
6 #include <asm/kvm.h>
7 #include "kvm_util.h"
8 #include "processor.h"
9 #include "ucall_common.h"
10
11 #define LOONGARCH_PAGE_TABLE_PHYS_MIN 0x200000
12 #define LOONGARCH_GUEST_STACK_VADDR_MIN 0x200000
13
14 static vm_paddr_t invalid_pgtable[4];
15 static vm_vaddr_t exception_handlers;
16
virt_pte_index(struct kvm_vm * vm,vm_vaddr_t gva,int level)17 static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
18 {
19 unsigned int shift;
20 uint64_t mask;
21
22 shift = level * (vm->page_shift - 3) + vm->page_shift;
23 mask = (1UL << (vm->page_shift - 3)) - 1;
24 return (gva >> shift) & mask;
25 }
26
pte_addr(struct kvm_vm * vm,uint64_t entry)27 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
28 {
29 return entry & ~((0x1UL << vm->page_shift) - 1);
30 }
31
ptrs_per_pte(struct kvm_vm * vm)32 static uint64_t ptrs_per_pte(struct kvm_vm *vm)
33 {
34 return 1 << (vm->page_shift - 3);
35 }
36
virt_set_pgtable(struct kvm_vm * vm,vm_paddr_t table,vm_paddr_t child)37 static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t child)
38 {
39 uint64_t *ptep;
40 int i, ptrs_per_pte;
41
42 ptep = addr_gpa2hva(vm, table);
43 ptrs_per_pte = 1 << (vm->page_shift - 3);
44 for (i = 0; i < ptrs_per_pte; i++)
45 WRITE_ONCE(*(ptep + i), child);
46 }
47
virt_arch_pgd_alloc(struct kvm_vm * vm)48 void virt_arch_pgd_alloc(struct kvm_vm *vm)
49 {
50 int i;
51 vm_paddr_t child, table;
52
53 if (vm->pgd_created)
54 return;
55
56 child = table = 0;
57 for (i = 0; i < vm->pgtable_levels; i++) {
58 invalid_pgtable[i] = child;
59 table = vm_phy_page_alloc(vm, LOONGARCH_PAGE_TABLE_PHYS_MIN,
60 vm->memslots[MEM_REGION_PT]);
61 TEST_ASSERT(table, "Fail to allocate page tale at level %d\n", i);
62 virt_set_pgtable(vm, table, child);
63 child = table;
64 }
65 vm->pgd = table;
66 vm->pgd_created = true;
67 }
68
virt_pte_none(uint64_t * ptep,int level)69 static int virt_pte_none(uint64_t *ptep, int level)
70 {
71 return *ptep == invalid_pgtable[level];
72 }
73
virt_populate_pte(struct kvm_vm * vm,vm_vaddr_t gva,int alloc)74 static uint64_t *virt_populate_pte(struct kvm_vm *vm, vm_vaddr_t gva, int alloc)
75 {
76 int level;
77 uint64_t *ptep;
78 vm_paddr_t child;
79
80 if (!vm->pgd_created)
81 goto unmapped_gva;
82
83 child = vm->pgd;
84 level = vm->pgtable_levels - 1;
85 while (level > 0) {
86 ptep = addr_gpa2hva(vm, child) + virt_pte_index(vm, gva, level) * 8;
87 if (virt_pte_none(ptep, level)) {
88 if (alloc) {
89 child = vm_alloc_page_table(vm);
90 virt_set_pgtable(vm, child, invalid_pgtable[level - 1]);
91 WRITE_ONCE(*ptep, child);
92 } else
93 goto unmapped_gva;
94
95 } else
96 child = pte_addr(vm, *ptep);
97 level--;
98 }
99
100 ptep = addr_gpa2hva(vm, child) + virt_pte_index(vm, gva, level) * 8;
101 return ptep;
102
103 unmapped_gva:
104 TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
105 exit(EXIT_FAILURE);
106 }
107
addr_arch_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva)108 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
109 {
110 uint64_t *ptep;
111
112 ptep = virt_populate_pte(vm, gva, 0);
113 TEST_ASSERT(*ptep != 0, "Virtual address vaddr: 0x%lx not mapped\n", gva);
114
115 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
116 }
117
virt_arch_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr)118 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
119 {
120 uint32_t prot_bits;
121 uint64_t *ptep;
122
123 TEST_ASSERT((vaddr % vm->page_size) == 0,
124 "Virtual address not on page boundary,\n"
125 "vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
126 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
127 (vaddr >> vm->page_shift)),
128 "Invalid virtual address, vaddr: 0x%lx", vaddr);
129 TEST_ASSERT((paddr % vm->page_size) == 0,
130 "Physical address not on page boundary,\n"
131 "paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
132 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
133 "Physical address beyond maximum supported,\n"
134 "paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
135 paddr, vm->max_gfn, vm->page_size);
136
137 ptep = virt_populate_pte(vm, vaddr, 1);
138 prot_bits = _PAGE_PRESENT | __READABLE | __WRITEABLE | _CACHE_CC | _PAGE_USER;
139 WRITE_ONCE(*ptep, paddr | prot_bits);
140 }
141
pte_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent,uint64_t page,int level)142 static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
143 {
144 uint64_t pte, *ptep;
145 static const char * const type[] = { "pte", "pmd", "pud", "pgd"};
146
147 if (level < 0)
148 return;
149
150 for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
151 ptep = addr_gpa2hva(vm, pte);
152 if (virt_pte_none(ptep, level))
153 continue;
154 fprintf(stream, "%*s%s: %lx: %lx at %p\n",
155 indent, "", type[level], pte, *ptep, ptep);
156 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level--);
157 }
158 }
159
virt_arch_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)160 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
161 {
162 int level;
163
164 if (!vm->pgd_created)
165 return;
166
167 level = vm->pgtable_levels - 1;
168 pte_dump(stream, vm, indent, vm->pgd, level);
169 }
170
vcpu_arch_dump(FILE * stream,struct kvm_vcpu * vcpu,uint8_t indent)171 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
172 {
173 }
174
assert_on_unhandled_exception(struct kvm_vcpu * vcpu)175 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
176 {
177 struct ucall uc;
178
179 if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED)
180 return;
181
182 TEST_FAIL("Unexpected exception (pc:0x%lx, estat:0x%lx, badv:0x%lx)",
183 uc.args[0], uc.args[1], uc.args[2]);
184 }
185
route_exception(struct ex_regs * regs)186 void route_exception(struct ex_regs *regs)
187 {
188 int vector;
189 unsigned long pc, estat, badv;
190 struct handlers *handlers;
191
192 handlers = (struct handlers *)exception_handlers;
193 vector = (regs->estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
194 if (handlers && handlers->exception_handlers[vector])
195 return handlers->exception_handlers[vector](regs);
196
197 pc = regs->pc;
198 badv = regs->badv;
199 estat = regs->estat;
200 ucall(UCALL_UNHANDLED, 3, pc, estat, badv);
201 while (1) ;
202 }
203
vm_init_descriptor_tables(struct kvm_vm * vm)204 void vm_init_descriptor_tables(struct kvm_vm *vm)
205 {
206 void *addr;
207
208 vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
209 LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
210
211 addr = addr_gva2hva(vm, vm->handlers);
212 memset(addr, 0, vm->page_size);
213 exception_handlers = vm->handlers;
214 sync_global_to_guest(vm, exception_handlers);
215 }
216
vm_install_exception_handler(struct kvm_vm * vm,int vector,handler_fn handler)217 void vm_install_exception_handler(struct kvm_vm *vm, int vector, handler_fn handler)
218 {
219 struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
220
221 assert(vector < VECTOR_NUM);
222 handlers->exception_handlers[vector] = handler;
223 }
224
guest_get_vcpuid(void)225 uint32_t guest_get_vcpuid(void)
226 {
227 return csr_read(LOONGARCH_CSR_CPUID);
228 }
229
vcpu_args_set(struct kvm_vcpu * vcpu,unsigned int num,...)230 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
231 {
232 int i;
233 va_list ap;
234 struct kvm_regs regs;
235
236 TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
237 "num: %u\n", num);
238
239 vcpu_regs_get(vcpu, ®s);
240
241 va_start(ap, num);
242 for (i = 0; i < num; i++)
243 regs.gpr[i + 4] = va_arg(ap, uint64_t);
244 va_end(ap);
245
246 vcpu_regs_set(vcpu, ®s);
247 }
248
loongarch_set_reg(struct kvm_vcpu * vcpu,uint64_t id,uint64_t val)249 static void loongarch_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
250 {
251 __vcpu_set_reg(vcpu, id, val);
252 }
253
loongarch_get_csr(struct kvm_vcpu * vcpu,uint64_t id,void * addr)254 static void loongarch_get_csr(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
255 {
256 uint64_t csrid;
257
258 csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id;
259 __vcpu_get_reg(vcpu, csrid, addr);
260 }
261
loongarch_set_csr(struct kvm_vcpu * vcpu,uint64_t id,uint64_t val)262 static void loongarch_set_csr(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
263 {
264 uint64_t csrid;
265
266 csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id;
267 __vcpu_set_reg(vcpu, csrid, val);
268 }
269
loongarch_vcpu_setup(struct kvm_vcpu * vcpu)270 static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
271 {
272 int width;
273 unsigned long val;
274 struct kvm_vm *vm = vcpu->vm;
275
276 switch (vm->mode) {
277 case VM_MODE_P36V47_16K:
278 case VM_MODE_P47V47_16K:
279 break;
280
281 default:
282 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
283 }
284
285 /* kernel mode and page enable mode */
286 val = PLV_KERN | CSR_CRMD_PG;
287 loongarch_set_csr(vcpu, LOONGARCH_CSR_CRMD, val);
288 loongarch_set_csr(vcpu, LOONGARCH_CSR_PRMD, val);
289 loongarch_set_csr(vcpu, LOONGARCH_CSR_EUEN, 1);
290 loongarch_set_csr(vcpu, LOONGARCH_CSR_ECFG, 0);
291 loongarch_set_csr(vcpu, LOONGARCH_CSR_TCFG, 0);
292 loongarch_set_csr(vcpu, LOONGARCH_CSR_ASID, 1);
293
294 /* time count start from 0 */
295 val = 0;
296 loongarch_set_reg(vcpu, KVM_REG_LOONGARCH_COUNTER, val);
297
298 width = vm->page_shift - 3;
299
300 switch (vm->pgtable_levels) {
301 case 4:
302 /* pud page shift and width */
303 val = (vm->page_shift + width * 2) << 20 | (width << 25);
304 /* fall throuth */
305 case 3:
306 /* pmd page shift and width */
307 val |= (vm->page_shift + width) << 10 | (width << 15);
308 /* pte page shift and width */
309 val |= vm->page_shift | width << 5;
310 break;
311 default:
312 TEST_FAIL("Got %u page table levels, expected 3 or 4", vm->pgtable_levels);
313 }
314
315 loongarch_set_csr(vcpu, LOONGARCH_CSR_PWCTL0, val);
316
317 /* PGD page shift and width */
318 val = (vm->page_shift + width * (vm->pgtable_levels - 1)) | width << 6;
319 loongarch_set_csr(vcpu, LOONGARCH_CSR_PWCTL1, val);
320 loongarch_set_csr(vcpu, LOONGARCH_CSR_PGDL, vm->pgd);
321
322 /*
323 * Refill exception runs on real mode
324 * Entry address should be physical address
325 */
326 val = addr_gva2gpa(vm, (unsigned long)handle_tlb_refill);
327 loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBRENTRY, val);
328
329 /*
330 * General exception runs on page-enabled mode
331 * Entry address should be virtual address
332 */
333 val = (unsigned long)handle_exception;
334 loongarch_set_csr(vcpu, LOONGARCH_CSR_EENTRY, val);
335
336 loongarch_get_csr(vcpu, LOONGARCH_CSR_TLBIDX, &val);
337 val &= ~CSR_TLBIDX_SIZEM;
338 val |= PS_DEFAULT_SIZE << CSR_TLBIDX_SIZE;
339 loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBIDX, val);
340
341 loongarch_set_csr(vcpu, LOONGARCH_CSR_STLBPGSIZE, PS_DEFAULT_SIZE);
342
343 /* LOONGARCH_CSR_KS1 is used for exception stack */
344 val = __vm_vaddr_alloc(vm, vm->page_size,
345 LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
346 TEST_ASSERT(val != 0, "No memory for exception stack");
347 val = val + vm->page_size;
348 loongarch_set_csr(vcpu, LOONGARCH_CSR_KS1, val);
349
350 loongarch_get_csr(vcpu, LOONGARCH_CSR_TLBREHI, &val);
351 val &= ~CSR_TLBREHI_PS;
352 val |= PS_DEFAULT_SIZE << CSR_TLBREHI_PS_SHIFT;
353 loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBREHI, val);
354
355 loongarch_set_csr(vcpu, LOONGARCH_CSR_CPUID, vcpu->id);
356 loongarch_set_csr(vcpu, LOONGARCH_CSR_TMID, vcpu->id);
357 }
358
vm_arch_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id)359 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
360 {
361 size_t stack_size;
362 uint64_t stack_vaddr;
363 struct kvm_regs regs;
364 struct kvm_vcpu *vcpu;
365
366 vcpu = __vm_vcpu_add(vm, vcpu_id);
367 stack_size = vm->page_size;
368 stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
369 LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
370 TEST_ASSERT(stack_vaddr != 0, "No memory for vm stack");
371
372 loongarch_vcpu_setup(vcpu);
373 /* Setup guest general purpose registers */
374 vcpu_regs_get(vcpu, ®s);
375 regs.gpr[3] = stack_vaddr + stack_size;
376 vcpu_regs_set(vcpu, ®s);
377
378 return vcpu;
379 }
380
vcpu_arch_set_entry_point(struct kvm_vcpu * vcpu,void * guest_code)381 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
382 {
383 struct kvm_regs regs;
384
385 /* Setup guest PC register */
386 vcpu_regs_get(vcpu, ®s);
387 regs.pc = (uint64_t)guest_code;
388 vcpu_regs_set(vcpu, ®s);
389 }
390