1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <assert.h>
4 #include <linux/compiler.h>
5
6 #include <asm/kvm.h>
7 #include "kvm_util.h"
8 #include "pmu.h"
9 #include "processor.h"
10 #include "ucall_common.h"
11
12 #define LOONGARCH_PAGE_TABLE_PHYS_MIN 0x200000
13 #define LOONGARCH_GUEST_STACK_VADDR_MIN 0x200000
14
15 static gpa_t invalid_pgtable[4];
16 static gva_t exception_handlers;
17
virt_pte_index(struct kvm_vm * vm,gva_t gva,int level)18 static u64 virt_pte_index(struct kvm_vm *vm, gva_t gva, int level)
19 {
20 unsigned int shift;
21 u64 mask;
22
23 shift = level * (vm->page_shift - 3) + vm->page_shift;
24 mask = (1UL << (vm->page_shift - 3)) - 1;
25 return (gva >> shift) & mask;
26 }
27
pte_addr(struct kvm_vm * vm,u64 entry)28 static u64 pte_addr(struct kvm_vm *vm, u64 entry)
29 {
30 return entry & ~((0x1UL << vm->page_shift) - 1);
31 }
32
ptrs_per_pte(struct kvm_vm * vm)33 static u64 ptrs_per_pte(struct kvm_vm *vm)
34 {
35 return 1 << (vm->page_shift - 3);
36 }
37
virt_set_pgtable(struct kvm_vm * vm,gpa_t table,gpa_t child)38 static void virt_set_pgtable(struct kvm_vm *vm, gpa_t table, gpa_t child)
39 {
40 u64 *ptep;
41 int i, ptrs_per_pte;
42
43 ptep = addr_gpa2hva(vm, table);
44 ptrs_per_pte = 1 << (vm->page_shift - 3);
45 for (i = 0; i < ptrs_per_pte; i++)
46 WRITE_ONCE(*(ptep + i), child);
47 }
48
virt_arch_pgd_alloc(struct kvm_vm * vm)49 void virt_arch_pgd_alloc(struct kvm_vm *vm)
50 {
51 int i;
52 gpa_t child, table;
53
54 if (vm->mmu.pgd_created)
55 return;
56
57 child = table = 0;
58 for (i = 0; i < vm->mmu.pgtable_levels; i++) {
59 invalid_pgtable[i] = child;
60 table = vm_phy_page_alloc(vm, LOONGARCH_PAGE_TABLE_PHYS_MIN,
61 vm->memslots[MEM_REGION_PT]);
62 TEST_ASSERT(table, "Fail to allocate page tale at level %d\n", i);
63 virt_set_pgtable(vm, table, child);
64 child = table;
65 }
66 vm->mmu.pgd = table;
67 vm->mmu.pgd_created = true;
68 }
69
virt_pte_none(u64 * ptep,int level)70 static int virt_pte_none(u64 *ptep, int level)
71 {
72 return *ptep == invalid_pgtable[level];
73 }
74
virt_populate_pte(struct kvm_vm * vm,gva_t gva,int alloc)75 static u64 *virt_populate_pte(struct kvm_vm *vm, gva_t gva, int alloc)
76 {
77 int level;
78 u64 *ptep;
79 gpa_t child;
80
81 if (!vm->mmu.pgd_created)
82 goto unmapped_gva;
83
84 child = vm->mmu.pgd;
85 level = vm->mmu.pgtable_levels - 1;
86 while (level > 0) {
87 ptep = addr_gpa2hva(vm, child) + virt_pte_index(vm, gva, level) * 8;
88 if (virt_pte_none(ptep, level)) {
89 if (alloc) {
90 child = vm_alloc_page_table(vm);
91 virt_set_pgtable(vm, child, invalid_pgtable[level - 1]);
92 WRITE_ONCE(*ptep, child);
93 } else
94 goto unmapped_gva;
95
96 } else
97 child = pte_addr(vm, *ptep);
98 level--;
99 }
100
101 ptep = addr_gpa2hva(vm, child) + virt_pte_index(vm, gva, level) * 8;
102 return ptep;
103
104 unmapped_gva:
105 TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
106 exit(EXIT_FAILURE);
107 }
108
addr_arch_gva2gpa(struct kvm_vm * vm,gva_t gva)109 gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
110 {
111 u64 *ptep;
112
113 ptep = virt_populate_pte(vm, gva, 0);
114 TEST_ASSERT(*ptep != 0, "Virtual address gva: 0x%lx not mapped\n", gva);
115
116 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
117 }
118
virt_arch_pg_map(struct kvm_vm * vm,gva_t gva,gpa_t gpa)119 void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
120 {
121 u32 prot_bits;
122 u64 *ptep;
123
124 TEST_ASSERT((gva % vm->page_size) == 0,
125 "Virtual address not on page boundary,\n"
126 "gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size);
127 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
128 "Invalid virtual address, gva: 0x%lx", gva);
129 TEST_ASSERT((gpa % vm->page_size) == 0,
130 "Physical address not on page boundary,\n"
131 "gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size);
132 TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
133 "Physical address beyond maximum supported,\n"
134 "gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
135 gpa, vm->max_gfn, vm->page_size);
136
137 ptep = virt_populate_pte(vm, gva, 1);
138 prot_bits = _PAGE_PRESENT | __READABLE | __WRITEABLE | _CACHE_CC | _PAGE_USER;
139 WRITE_ONCE(*ptep, gpa | prot_bits);
140 }
141
pte_dump(FILE * stream,struct kvm_vm * vm,u8 indent,u64 page,int level)142 static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level)
143 {
144 u64 pte, *ptep;
145 static const char * const type[] = { "pte", "pmd", "pud", "pgd"};
146
147 if (level < 0)
148 return;
149
150 for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
151 ptep = addr_gpa2hva(vm, pte);
152 if (virt_pte_none(ptep, level))
153 continue;
154 fprintf(stream, "%*s%s: %lx: %lx at %p\n",
155 indent, "", type[level], pte, *ptep, ptep);
156 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level--);
157 }
158 }
159
virt_arch_dump(FILE * stream,struct kvm_vm * vm,u8 indent)160 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
161 {
162 int level;
163
164 if (!vm->mmu.pgd_created)
165 return;
166
167 level = vm->mmu.pgtable_levels - 1;
168 pte_dump(stream, vm, indent, vm->mmu.pgd, level);
169 }
170
vcpu_arch_dump(FILE * stream,struct kvm_vcpu * vcpu,u8 indent)171 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
172 {
173 }
174
assert_on_unhandled_exception(struct kvm_vcpu * vcpu)175 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
176 {
177 struct ucall uc;
178
179 if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED)
180 return;
181
182 TEST_FAIL("Unexpected exception (pc:0x%lx, estat:0x%lx, badv:0x%lx)",
183 uc.args[0], uc.args[1], uc.args[2]);
184 }
185
route_exception(struct ex_regs * regs)186 void route_exception(struct ex_regs *regs)
187 {
188 int vector;
189 unsigned long pc, estat, badv;
190 struct handlers *handlers;
191
192 handlers = (struct handlers *)exception_handlers;
193 vector = (regs->estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
194 if (handlers && handlers->exception_handlers[vector])
195 return handlers->exception_handlers[vector](regs);
196
197 pc = regs->pc;
198 badv = regs->badv;
199 estat = regs->estat;
200 ucall(UCALL_UNHANDLED, 3, pc, estat, badv);
201 while (1) ;
202 }
203
vm_init_descriptor_tables(struct kvm_vm * vm)204 void vm_init_descriptor_tables(struct kvm_vm *vm)
205 {
206 void *addr;
207
208 vm->handlers = __vm_alloc(vm, sizeof(struct handlers),
209 LOONGARCH_GUEST_STACK_VADDR_MIN,
210 MEM_REGION_DATA);
211
212 addr = addr_gva2hva(vm, vm->handlers);
213 memset(addr, 0, vm->page_size);
214 exception_handlers = vm->handlers;
215 sync_global_to_guest(vm, exception_handlers);
216 }
217
vm_install_exception_handler(struct kvm_vm * vm,int vector,handler_fn handler)218 void vm_install_exception_handler(struct kvm_vm *vm, int vector, handler_fn handler)
219 {
220 struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
221
222 assert(vector < VECTOR_NUM);
223 handlers->exception_handlers[vector] = handler;
224 }
225
guest_get_vcpuid(void)226 u32 guest_get_vcpuid(void)
227 {
228 return csr_read(LOONGARCH_CSR_CPUID);
229 }
230
vcpu_args_set(struct kvm_vcpu * vcpu,unsigned int num,...)231 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
232 {
233 int i;
234 va_list ap;
235 struct kvm_regs regs;
236
237 TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
238 "num: %u\n", num);
239
240 vcpu_regs_get(vcpu, ®s);
241
242 va_start(ap, num);
243 for (i = 0; i < num; i++)
244 regs.gpr[i + 4] = va_arg(ap, u64);
245 va_end(ap);
246
247 vcpu_regs_set(vcpu, ®s);
248 }
249
loongarch_set_reg(struct kvm_vcpu * vcpu,u64 id,u64 val)250 static void loongarch_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val)
251 {
252 __vcpu_set_reg(vcpu, id, val);
253 }
254
loongarch_set_cpucfg(struct kvm_vcpu * vcpu,u64 id,u64 val)255 static void loongarch_set_cpucfg(struct kvm_vcpu *vcpu, u64 id, u64 val)
256 {
257 u64 cfgid;
258
259 cfgid = KVM_REG_LOONGARCH_CPUCFG | KVM_REG_SIZE_U64 | 8 * id;
260 __vcpu_set_reg(vcpu, cfgid, val);
261 }
262
loongarch_get_csr(struct kvm_vcpu * vcpu,u64 id,void * addr)263 static void loongarch_get_csr(struct kvm_vcpu *vcpu, u64 id, void *addr)
264 {
265 u64 csrid;
266
267 csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id;
268 __vcpu_get_reg(vcpu, csrid, addr);
269 }
270
loongarch_set_csr(struct kvm_vcpu * vcpu,u64 id,u64 val)271 static void loongarch_set_csr(struct kvm_vcpu *vcpu, u64 id, u64 val)
272 {
273 u64 csrid;
274
275 csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id;
276 __vcpu_set_reg(vcpu, csrid, val);
277 }
278
loongarch_vcpu_setup(struct kvm_vcpu * vcpu)279 void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
280 {
281 int width;
282 unsigned int cfg;
283 unsigned long val;
284 struct kvm_vm *vm = vcpu->vm;
285
286 switch (vm->mode) {
287 case VM_MODE_P36V47_16K:
288 case VM_MODE_P47V47_16K:
289 break;
290
291 default:
292 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
293 }
294
295 cfg = read_cpucfg(LOONGARCH_CPUCFG6);
296 loongarch_set_cpucfg(vcpu, LOONGARCH_CPUCFG6, cfg);
297
298 /* kernel mode and page enable mode */
299 val = PLV_KERN | CSR_CRMD_PG;
300 loongarch_set_csr(vcpu, LOONGARCH_CSR_CRMD, val);
301 loongarch_set_csr(vcpu, LOONGARCH_CSR_PRMD, val);
302 loongarch_set_csr(vcpu, LOONGARCH_CSR_EUEN, 1);
303 loongarch_set_csr(vcpu, LOONGARCH_CSR_ECFG, 0);
304 loongarch_set_csr(vcpu, LOONGARCH_CSR_TCFG, 0);
305 loongarch_set_csr(vcpu, LOONGARCH_CSR_ASID, 1);
306
307 /* time count start from 0 */
308 val = 0;
309 loongarch_set_reg(vcpu, KVM_REG_LOONGARCH_COUNTER, val);
310
311 width = vm->page_shift - 3;
312
313 switch (vm->mmu.pgtable_levels) {
314 case 4:
315 /* pud page shift and width */
316 val = (vm->page_shift + width * 2) << 20 | (width << 25);
317 /* fall throuth */
318 case 3:
319 /* pmd page shift and width */
320 val |= (vm->page_shift + width) << 10 | (width << 15);
321 /* pte page shift and width */
322 val |= vm->page_shift | width << 5;
323 break;
324 default:
325 TEST_FAIL("Got %u page table levels, expected 3 or 4", vm->mmu.pgtable_levels);
326 }
327
328 loongarch_set_csr(vcpu, LOONGARCH_CSR_PWCTL0, val);
329
330 /* PGD page shift and width */
331 val = (vm->page_shift + width * (vm->mmu.pgtable_levels - 1)) | width << 6;
332 loongarch_set_csr(vcpu, LOONGARCH_CSR_PWCTL1, val);
333 loongarch_set_csr(vcpu, LOONGARCH_CSR_PGDL, vm->mmu.pgd);
334
335 /*
336 * Refill exception runs on real mode
337 * Entry address should be physical address
338 */
339 val = addr_gva2gpa(vm, (unsigned long)handle_tlb_refill);
340 loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBRENTRY, val);
341
342 /*
343 * General exception runs on page-enabled mode
344 * Entry address should be virtual address
345 */
346 val = (unsigned long)handle_exception;
347 loongarch_set_csr(vcpu, LOONGARCH_CSR_EENTRY, val);
348
349 loongarch_get_csr(vcpu, LOONGARCH_CSR_TLBIDX, &val);
350 val &= ~CSR_TLBIDX_SIZEM;
351 val |= PS_DEFAULT_SIZE << CSR_TLBIDX_SIZE;
352 loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBIDX, val);
353
354 loongarch_set_csr(vcpu, LOONGARCH_CSR_STLBPGSIZE, PS_DEFAULT_SIZE);
355
356 /* LOONGARCH_CSR_KS1 is used for exception stack */
357 val = __vm_alloc(vm, vm->page_size, LOONGARCH_GUEST_STACK_VADDR_MIN,
358 MEM_REGION_DATA);
359 TEST_ASSERT(val != 0, "No memory for exception stack");
360 val = val + vm->page_size;
361 loongarch_set_csr(vcpu, LOONGARCH_CSR_KS1, val);
362
363 loongarch_get_csr(vcpu, LOONGARCH_CSR_TLBREHI, &val);
364 val &= ~CSR_TLBREHI_PS;
365 val |= PS_DEFAULT_SIZE << CSR_TLBREHI_PS_SHIFT;
366 loongarch_set_csr(vcpu, LOONGARCH_CSR_TLBREHI, val);
367
368 loongarch_set_csr(vcpu, LOONGARCH_CSR_CPUID, vcpu->id);
369 loongarch_set_csr(vcpu, LOONGARCH_CSR_TMID, vcpu->id);
370 }
371
vm_arch_vcpu_add(struct kvm_vm * vm,u32 vcpu_id)372 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
373 {
374 size_t stack_size;
375 u64 stack_gva;
376 struct kvm_regs regs;
377 struct kvm_vcpu *vcpu;
378
379 vcpu = __vm_vcpu_add(vm, vcpu_id);
380 stack_size = vm->page_size;
381 stack_gva = __vm_alloc(vm, stack_size,
382 LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
383 TEST_ASSERT(stack_gva != 0, "No memory for vm stack");
384
385 loongarch_vcpu_setup(vcpu);
386 /* Setup guest general purpose registers */
387 vcpu_regs_get(vcpu, ®s);
388 regs.gpr[3] = stack_gva + stack_size;
389 vcpu_regs_set(vcpu, ®s);
390
391 return vcpu;
392 }
393
vcpu_arch_set_entry_point(struct kvm_vcpu * vcpu,void * guest_code)394 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
395 {
396 struct kvm_regs regs;
397
398 /* Setup guest PC register */
399 vcpu_regs_get(vcpu, ®s);
400 regs.pc = (u64)guest_code;
401 vcpu_regs_set(vcpu, ®s);
402 }
403