xref: /linux/arch/riscv/kvm/mmu.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Anup Patel <anup.patel@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/hugetlb.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/vmalloc.h>
14 #include <linux/kvm_host.h>
15 #include <linux/sched/signal.h>
16 #include <asm/kvm_mmu.h>
17 #include <asm/kvm_nacl.h>
18 
19 static void mmu_wp_memory_region(struct kvm *kvm, int slot)
20 {
21 	struct kvm_memslots *slots = kvm_memslots(kvm);
22 	struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
23 	phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
24 	phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
25 	struct kvm_gstage gstage;
26 
27 	kvm_riscv_gstage_init(&gstage, kvm);
28 
29 	spin_lock(&kvm->mmu_lock);
30 	kvm_riscv_gstage_wp_range(&gstage, start, end);
31 	spin_unlock(&kvm->mmu_lock);
32 	kvm_flush_remote_tlbs_memslot(kvm, memslot);
33 }
34 
35 int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
36 			  unsigned long size, bool writable, bool in_atomic)
37 {
38 	int ret = 0;
39 	pgprot_t prot;
40 	unsigned long pfn;
41 	phys_addr_t addr, end;
42 	struct kvm_mmu_memory_cache pcache = {
43 		.gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
44 		.gfp_zero = __GFP_ZERO,
45 	};
46 	struct kvm_gstage_mapping map;
47 	struct kvm_gstage gstage;
48 
49 	kvm_riscv_gstage_init(&gstage, kvm);
50 
51 	end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
52 	pfn = __phys_to_pfn(hpa);
53 	prot = pgprot_noncached(PAGE_WRITE);
54 
55 	for (addr = gpa; addr < end; addr += PAGE_SIZE) {
56 		map.addr = addr;
57 		map.pte = pfn_pte(pfn, prot);
58 		map.pte = pte_mkdirty(map.pte);
59 		map.level = 0;
60 
61 		if (!writable)
62 			map.pte = pte_wrprotect(map.pte);
63 
64 		ret = kvm_mmu_topup_memory_cache(&pcache, kvm->arch.pgd_levels);
65 		if (ret)
66 			goto out;
67 
68 		spin_lock(&kvm->mmu_lock);
69 		ret = kvm_riscv_gstage_set_pte(&gstage, &pcache, &map);
70 		spin_unlock(&kvm->mmu_lock);
71 		if (ret)
72 			goto out;
73 
74 		pfn++;
75 	}
76 
77 out:
78 	kvm_mmu_free_memory_cache(&pcache);
79 	return ret;
80 }
81 
82 void kvm_riscv_mmu_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size)
83 {
84 	struct kvm_gstage gstage;
85 
86 	kvm_riscv_gstage_init(&gstage, kvm);
87 
88 	spin_lock(&kvm->mmu_lock);
89 	kvm_riscv_gstage_unmap_range(&gstage, gpa, size, false);
90 	spin_unlock(&kvm->mmu_lock);
91 }
92 
93 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
94 					     struct kvm_memory_slot *slot,
95 					     gfn_t gfn_offset,
96 					     unsigned long mask)
97 {
98 	phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
99 	phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
100 	phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
101 	struct kvm_gstage gstage;
102 
103 	kvm_riscv_gstage_init(&gstage, kvm);
104 
105 	kvm_riscv_gstage_wp_range(&gstage, start, end);
106 }
107 
108 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
109 {
110 }
111 
112 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free)
113 {
114 }
115 
116 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
117 {
118 }
119 
120 void kvm_arch_flush_shadow_all(struct kvm *kvm)
121 {
122 	kvm_riscv_mmu_free_pgd(kvm);
123 }
124 
125 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
126 				   struct kvm_memory_slot *slot)
127 {
128 	gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
129 	phys_addr_t size = slot->npages << PAGE_SHIFT;
130 	struct kvm_gstage gstage;
131 
132 	kvm_riscv_gstage_init(&gstage, kvm);
133 
134 	spin_lock(&kvm->mmu_lock);
135 	kvm_riscv_gstage_unmap_range(&gstage, gpa, size, false);
136 	spin_unlock(&kvm->mmu_lock);
137 }
138 
139 void kvm_arch_commit_memory_region(struct kvm *kvm,
140 				struct kvm_memory_slot *old,
141 				const struct kvm_memory_slot *new,
142 				enum kvm_mr_change change)
143 {
144 	/*
145 	 * At this point memslot has been committed and there is an
146 	 * allocated dirty_bitmap[], dirty pages will be tracked while
147 	 * the memory slot is write protected.
148 	 */
149 	if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
150 		if (kvm_dirty_log_manual_protect_and_init_set(kvm))
151 			return;
152 		mmu_wp_memory_region(kvm, new->id);
153 	}
154 }
155 
156 int kvm_arch_prepare_memory_region(struct kvm *kvm,
157 				const struct kvm_memory_slot *old,
158 				struct kvm_memory_slot *new,
159 				enum kvm_mr_change change)
160 {
161 	hva_t hva, reg_end, size;
162 	bool writable;
163 	int ret = 0;
164 
165 	if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
166 			change != KVM_MR_FLAGS_ONLY)
167 		return 0;
168 
169 	/*
170 	 * Prevent userspace from creating a memory region outside of the GPA
171 	 * space addressable by the KVM guest GPA space.
172 	 */
173 	if ((new->base_gfn + new->npages) >=
174 	     kvm_riscv_gstage_gpa_size(kvm->arch.pgd_levels) >> PAGE_SHIFT)
175 		return -EFAULT;
176 
177 	hva = new->userspace_addr;
178 	size = new->npages << PAGE_SHIFT;
179 	reg_end = hva + size;
180 	writable = !(new->flags & KVM_MEM_READONLY);
181 
182 	mmap_read_lock(current->mm);
183 
184 	/*
185 	 * A memory region could potentially cover multiple VMAs, and
186 	 * any holes between them, so iterate over all of them.
187 	 *
188 	 *     +--------------------------------------------+
189 	 * +---------------+----------------+   +----------------+
190 	 * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
191 	 * +---------------+----------------+   +----------------+
192 	 *     |               memory region                |
193 	 *     +--------------------------------------------+
194 	 */
195 	do {
196 		struct vm_area_struct *vma;
197 		hva_t vm_end;
198 
199 		vma = find_vma_intersection(current->mm, hva, reg_end);
200 		if (!vma)
201 			break;
202 
203 		/*
204 		 * Mapping a read-only VMA is only allowed if the
205 		 * memory region is configured as read-only.
206 		 */
207 		if (writable && !(vma->vm_flags & VM_WRITE)) {
208 			ret = -EPERM;
209 			break;
210 		}
211 
212 		/* Take the intersection of this VMA with the memory region */
213 		vm_end = min(reg_end, vma->vm_end);
214 
215 		if (vma->vm_flags & VM_PFNMAP) {
216 			/* IO region dirty page logging not allowed */
217 			if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
218 				ret = -EINVAL;
219 				goto out;
220 			}
221 		}
222 		hva = vm_end;
223 	} while (hva < reg_end);
224 
225 out:
226 	mmap_read_unlock(current->mm);
227 	return ret;
228 }
229 
230 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
231 {
232 	struct kvm_gstage gstage;
233 	bool mmu_locked;
234 
235 	if (!kvm->arch.pgd)
236 		return false;
237 
238 	kvm_riscv_gstage_init(&gstage, kvm);
239 	mmu_locked = spin_trylock(&kvm->mmu_lock);
240 	kvm_riscv_gstage_unmap_range(&gstage, range->start << PAGE_SHIFT,
241 				     (range->end - range->start) << PAGE_SHIFT,
242 				     range->may_block);
243 	if (mmu_locked)
244 		spin_unlock(&kvm->mmu_lock);
245 	return false;
246 }
247 
248 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
249 {
250 	pte_t *ptep;
251 	u32 ptep_level = 0;
252 	u64 size = (range->end - range->start) << PAGE_SHIFT;
253 	struct kvm_gstage gstage;
254 
255 	if (!kvm->arch.pgd)
256 		return false;
257 
258 	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
259 
260 	kvm_riscv_gstage_init(&gstage, kvm);
261 	if (!kvm_riscv_gstage_get_leaf(&gstage, range->start << PAGE_SHIFT,
262 				       &ptep, &ptep_level))
263 		return false;
264 
265 	return ptep_test_and_clear_young(NULL, 0, ptep);
266 }
267 
268 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
269 {
270 	pte_t *ptep;
271 	u32 ptep_level = 0;
272 	u64 size = (range->end - range->start) << PAGE_SHIFT;
273 	struct kvm_gstage gstage;
274 
275 	if (!kvm->arch.pgd)
276 		return false;
277 
278 	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
279 
280 	kvm_riscv_gstage_init(&gstage, kvm);
281 	if (!kvm_riscv_gstage_get_leaf(&gstage, range->start << PAGE_SHIFT,
282 				       &ptep, &ptep_level))
283 		return false;
284 
285 	return pte_young(ptep_get(ptep));
286 }
287 
288 static bool fault_supports_gstage_huge_mapping(struct kvm_memory_slot *memslot,
289 					       unsigned long hva)
290 {
291 	hva_t uaddr_start, uaddr_end;
292 	gpa_t gpa_start;
293 	size_t size;
294 
295 	size = memslot->npages * PAGE_SIZE;
296 	uaddr_start = memslot->userspace_addr;
297 	uaddr_end = uaddr_start + size;
298 
299 	gpa_start = memslot->base_gfn << PAGE_SHIFT;
300 
301 	/*
302 	 * Pages belonging to memslots that don't have the same alignment
303 	 * within a PMD for userspace and GPA cannot be mapped with g-stage
304 	 * PMD entries, because we'll end up mapping the wrong pages.
305 	 *
306 	 * Consider a layout like the following:
307 	 *
308 	 *    memslot->userspace_addr:
309 	 *    +-----+--------------------+--------------------+---+
310 	 *    |abcde|fgh  vs-stage block  |    vs-stage block tv|xyz|
311 	 *    +-----+--------------------+--------------------+---+
312 	 *
313 	 *    memslot->base_gfn << PAGE_SHIFT:
314 	 *      +---+--------------------+--------------------+-----+
315 	 *      |abc|def  g-stage block  |    g-stage block   |tvxyz|
316 	 *      +---+--------------------+--------------------+-----+
317 	 *
318 	 * If we create those g-stage blocks, we'll end up with this incorrect
319 	 * mapping:
320 	 *   d -> f
321 	 *   e -> g
322 	 *   f -> h
323 	 */
324 	if ((gpa_start & (PMD_SIZE - 1)) != (uaddr_start & (PMD_SIZE - 1)))
325 		return false;
326 
327 	/*
328 	 * Next, let's make sure we're not trying to map anything not covered
329 	 * by the memslot. This means we have to prohibit block size mappings
330 	 * for the beginning and end of a non-block aligned and non-block sized
331 	 * memory slot (illustrated by the head and tail parts of the
332 	 * userspace view above containing pages 'abcde' and 'xyz',
333 	 * respectively).
334 	 *
335 	 * Note that it doesn't matter if we do the check using the
336 	 * userspace_addr or the base_gfn, as both are equally aligned (per
337 	 * the check above) and equally sized.
338 	 */
339 	return (hva >= ALIGN(uaddr_start, PMD_SIZE)) && (hva < ALIGN_DOWN(uaddr_end, PMD_SIZE));
340 }
341 
342 static int get_hva_mapping_size(struct kvm *kvm,
343 				unsigned long hva)
344 {
345 	int size = PAGE_SIZE;
346 	unsigned long flags;
347 	pgd_t pgd;
348 	p4d_t p4d;
349 	pud_t pud;
350 	pmd_t pmd;
351 
352 	/*
353 	 * Disable IRQs to prevent concurrent tear down of host page tables,
354 	 * e.g. if the primary MMU promotes a P*D to a huge page and then frees
355 	 * the original page table.
356 	 */
357 	local_irq_save(flags);
358 
359 	/*
360 	 * Read each entry once.  As above, a non-leaf entry can be promoted to
361 	 * a huge page _during_ this walk.  Re-reading the entry could send the
362 	 * walk into the weeks, e.g. p*d_leaf() returns false (sees the old
363 	 * value) and then p*d_offset() walks into the target huge page instead
364 	 * of the old page table (sees the new value).
365 	 */
366 	pgd = pgdp_get(pgd_offset(kvm->mm, hva));
367 	if (pgd_none(pgd))
368 		goto out;
369 
370 	p4d = p4dp_get(p4d_offset(&pgd, hva));
371 	if (p4d_none(p4d) || !p4d_present(p4d))
372 		goto out;
373 
374 	pud = pudp_get(pud_offset(&p4d, hva));
375 	if (pud_none(pud) || !pud_present(pud))
376 		goto out;
377 
378 	if (pud_leaf(pud)) {
379 		size = PUD_SIZE;
380 		goto out;
381 	}
382 
383 	pmd = pmdp_get(pmd_offset(&pud, hva));
384 	if (pmd_none(pmd) || !pmd_present(pmd))
385 		goto out;
386 
387 	if (pmd_leaf(pmd))
388 		size = PMD_SIZE;
389 
390 out:
391 	local_irq_restore(flags);
392 	return size;
393 }
394 
395 static unsigned long transparent_hugepage_adjust(struct kvm *kvm,
396 						 struct kvm_memory_slot *memslot,
397 						 unsigned long hva,
398 						 kvm_pfn_t *hfnp, gpa_t *gpa)
399 {
400 	kvm_pfn_t hfn = *hfnp;
401 
402 	/*
403 	 * Make sure the adjustment is done only for THP pages. Also make
404 	 * sure that the HVA and GPA are sufficiently aligned and that the
405 	 * block map is contained within the memslot.
406 	 */
407 	if (fault_supports_gstage_huge_mapping(memslot, hva)) {
408 		int sz;
409 
410 		sz = get_hva_mapping_size(kvm, hva);
411 		if (sz < PMD_SIZE)
412 			return sz;
413 
414 		*gpa &= PMD_MASK;
415 		hfn &= ~(PTRS_PER_PMD - 1);
416 		*hfnp = hfn;
417 
418 		return PMD_SIZE;
419 	}
420 
421 	return PAGE_SIZE;
422 }
423 
424 int kvm_riscv_mmu_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
425 		      gpa_t gpa, unsigned long hva, bool is_write,
426 		      struct kvm_gstage_mapping *out_map)
427 {
428 	int ret;
429 	kvm_pfn_t hfn;
430 	bool writable;
431 	short vma_pageshift;
432 	gfn_t gfn = gpa >> PAGE_SHIFT;
433 	struct vm_area_struct *vma;
434 	struct kvm *kvm = vcpu->kvm;
435 	struct kvm_mmu_memory_cache *pcache = &vcpu->arch.mmu_page_cache;
436 	bool logging = (memslot->dirty_bitmap &&
437 			!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
438 	unsigned long vma_pagesize, mmu_seq;
439 	struct kvm_gstage gstage;
440 	struct page *page;
441 
442 	kvm_riscv_gstage_init(&gstage, kvm);
443 
444 	/* Setup initial state of output mapping */
445 	memset(out_map, 0, sizeof(*out_map));
446 
447 	/* We need minimum second+third level pages */
448 	ret = kvm_mmu_topup_memory_cache(pcache, kvm->arch.pgd_levels);
449 	if (ret) {
450 		kvm_err("Failed to topup G-stage cache\n");
451 		return ret;
452 	}
453 
454 	mmap_read_lock(current->mm);
455 
456 	vma = vma_lookup(current->mm, hva);
457 	if (unlikely(!vma)) {
458 		kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
459 		mmap_read_unlock(current->mm);
460 		return -EFAULT;
461 	}
462 
463 	if (is_vm_hugetlb_page(vma))
464 		vma_pageshift = huge_page_shift(hstate_vma(vma));
465 	else
466 		vma_pageshift = PAGE_SHIFT;
467 	vma_pagesize = 1ULL << vma_pageshift;
468 	if (logging || (vma->vm_flags & VM_PFNMAP))
469 		vma_pagesize = PAGE_SIZE;
470 
471 	if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
472 		gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
473 
474 	/*
475 	 * Read mmu_invalidate_seq so that KVM can detect if the results of
476 	 * vma_lookup() or __kvm_faultin_pfn() become stale prior to acquiring
477 	 * kvm->mmu_lock.
478 	 *
479 	 * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
480 	 * with the smp_wmb() in kvm_mmu_invalidate_end().
481 	 */
482 	mmu_seq = kvm->mmu_invalidate_seq;
483 	mmap_read_unlock(current->mm);
484 
485 	if (vma_pagesize != PUD_SIZE &&
486 	    vma_pagesize != PMD_SIZE &&
487 	    vma_pagesize != PAGE_SIZE) {
488 		kvm_err("Invalid VMA page size 0x%lx\n", vma_pagesize);
489 		return -EFAULT;
490 	}
491 
492 	hfn = __kvm_faultin_pfn(memslot, gfn, is_write ? FOLL_WRITE : 0,
493 				&writable, &page);
494 	if (hfn == KVM_PFN_ERR_HWPOISON) {
495 		send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
496 				vma_pageshift, current);
497 		return 0;
498 	}
499 	if (is_error_noslot_pfn(hfn))
500 		return -EFAULT;
501 
502 	/*
503 	 * If logging is active then we allow writable pages only
504 	 * for write faults.
505 	 */
506 	if (logging && !is_write)
507 		writable = false;
508 
509 	spin_lock(&kvm->mmu_lock);
510 
511 	if (mmu_invalidate_retry(kvm, mmu_seq))
512 		goto out_unlock;
513 
514 	/* Check if we are backed by a THP and thus use block mapping if possible */
515 	if (!logging && (vma_pagesize == PAGE_SIZE))
516 		vma_pagesize = transparent_hugepage_adjust(kvm, memslot, hva, &hfn, &gpa);
517 
518 	if (writable) {
519 		mark_page_dirty_in_slot(kvm, memslot, gfn);
520 		ret = kvm_riscv_gstage_map_page(&gstage, pcache, gpa, hfn << PAGE_SHIFT,
521 						vma_pagesize, false, true, out_map);
522 	} else {
523 		ret = kvm_riscv_gstage_map_page(&gstage, pcache, gpa, hfn << PAGE_SHIFT,
524 						vma_pagesize, true, true, out_map);
525 	}
526 
527 	if (ret)
528 		kvm_err("Failed to map in G-stage\n");
529 
530 out_unlock:
531 	kvm_release_faultin_page(kvm, page, ret && ret != -EEXIST, writable);
532 	spin_unlock(&kvm->mmu_lock);
533 	return ret;
534 }
535 
536 int kvm_riscv_mmu_alloc_pgd(struct kvm *kvm)
537 {
538 	struct page *pgd_page;
539 
540 	if (kvm->arch.pgd != NULL) {
541 		kvm_err("kvm_arch already initialized?\n");
542 		return -EINVAL;
543 	}
544 
545 	pgd_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
546 				get_order(kvm_riscv_gstage_pgd_size));
547 	if (!pgd_page)
548 		return -ENOMEM;
549 	kvm->arch.pgd = page_to_virt(pgd_page);
550 	kvm->arch.pgd_phys = page_to_phys(pgd_page);
551 	kvm->arch.pgd_levels = kvm_riscv_gstage_max_pgd_levels;
552 
553 	return 0;
554 }
555 
556 void kvm_riscv_mmu_free_pgd(struct kvm *kvm)
557 {
558 	struct kvm_gstage gstage;
559 	void *pgd = NULL;
560 
561 	spin_lock(&kvm->mmu_lock);
562 	if (kvm->arch.pgd) {
563 		kvm_riscv_gstage_init(&gstage, kvm);
564 		kvm_riscv_gstage_unmap_range(&gstage, 0UL,
565 			kvm_riscv_gstage_gpa_size(kvm->arch.pgd_levels), false);
566 		pgd = READ_ONCE(kvm->arch.pgd);
567 		kvm->arch.pgd = NULL;
568 		kvm->arch.pgd_phys = 0;
569 		kvm->arch.pgd_levels = 0;
570 	}
571 	spin_unlock(&kvm->mmu_lock);
572 
573 	if (pgd)
574 		free_pages((unsigned long)pgd, get_order(kvm_riscv_gstage_pgd_size));
575 }
576 
577 void kvm_riscv_mmu_update_hgatp(struct kvm_vcpu *vcpu)
578 {
579 	struct kvm_arch *ka = &vcpu->kvm->arch;
580 	unsigned long hgatp = kvm_riscv_gstage_mode(ka->pgd_levels)
581 			      << HGATP_MODE_SHIFT;
582 
583 	hgatp |= (READ_ONCE(ka->vmid.vmid) << HGATP_VMID_SHIFT) & HGATP_VMID;
584 	hgatp |= (ka->pgd_phys >> PAGE_SHIFT) & HGATP_PPN;
585 
586 	ncsr_write(CSR_HGATP, hgatp);
587 
588 	if (!kvm_riscv_gstage_vmid_bits())
589 		kvm_riscv_local_hfence_gvma_all();
590 }
591