xref: /linux/arch/riscv/kvm/mmu.c (revision ff124bbbca1d3a07fa1392ffdbbdeece71f68ece)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Anup Patel <anup.patel@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/hugetlb.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/vmalloc.h>
14 #include <linux/kvm_host.h>
15 #include <linux/sched/signal.h>
16 #include <asm/kvm_mmu.h>
17 #include <asm/kvm_nacl.h>
18 
19 static void mmu_wp_memory_region(struct kvm *kvm, int slot)
20 {
21 	struct kvm_memslots *slots = kvm_memslots(kvm);
22 	struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
23 	phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
24 	phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
25 	struct kvm_gstage gstage;
26 
27 	gstage.kvm = kvm;
28 	gstage.flags = 0;
29 	gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
30 	gstage.pgd = kvm->arch.pgd;
31 
32 	spin_lock(&kvm->mmu_lock);
33 	kvm_riscv_gstage_wp_range(&gstage, start, end);
34 	spin_unlock(&kvm->mmu_lock);
35 	kvm_flush_remote_tlbs_memslot(kvm, memslot);
36 }
37 
38 int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
39 			  unsigned long size, bool writable, bool in_atomic)
40 {
41 	int ret = 0;
42 	pgprot_t prot;
43 	unsigned long pfn;
44 	phys_addr_t addr, end;
45 	struct kvm_mmu_memory_cache pcache = {
46 		.gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
47 		.gfp_zero = __GFP_ZERO,
48 	};
49 	struct kvm_gstage_mapping map;
50 	struct kvm_gstage gstage;
51 
52 	gstage.kvm = kvm;
53 	gstage.flags = 0;
54 	gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
55 	gstage.pgd = kvm->arch.pgd;
56 
57 	end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
58 	pfn = __phys_to_pfn(hpa);
59 	prot = pgprot_noncached(PAGE_WRITE);
60 
61 	for (addr = gpa; addr < end; addr += PAGE_SIZE) {
62 		map.addr = addr;
63 		map.pte = pfn_pte(pfn, prot);
64 		map.pte = pte_mkdirty(map.pte);
65 		map.level = 0;
66 
67 		if (!writable)
68 			map.pte = pte_wrprotect(map.pte);
69 
70 		ret = kvm_mmu_topup_memory_cache(&pcache, kvm_riscv_gstage_pgd_levels);
71 		if (ret)
72 			goto out;
73 
74 		spin_lock(&kvm->mmu_lock);
75 		ret = kvm_riscv_gstage_set_pte(&gstage, &pcache, &map);
76 		spin_unlock(&kvm->mmu_lock);
77 		if (ret)
78 			goto out;
79 
80 		pfn++;
81 	}
82 
83 out:
84 	kvm_mmu_free_memory_cache(&pcache);
85 	return ret;
86 }
87 
88 void kvm_riscv_mmu_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size)
89 {
90 	struct kvm_gstage gstage;
91 
92 	gstage.kvm = kvm;
93 	gstage.flags = 0;
94 	gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
95 	gstage.pgd = kvm->arch.pgd;
96 
97 	spin_lock(&kvm->mmu_lock);
98 	kvm_riscv_gstage_unmap_range(&gstage, gpa, size, false);
99 	spin_unlock(&kvm->mmu_lock);
100 }
101 
102 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
103 					     struct kvm_memory_slot *slot,
104 					     gfn_t gfn_offset,
105 					     unsigned long mask)
106 {
107 	phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
108 	phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
109 	phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
110 	struct kvm_gstage gstage;
111 
112 	gstage.kvm = kvm;
113 	gstage.flags = 0;
114 	gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
115 	gstage.pgd = kvm->arch.pgd;
116 
117 	kvm_riscv_gstage_wp_range(&gstage, start, end);
118 }
119 
120 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
121 {
122 }
123 
124 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free)
125 {
126 }
127 
128 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
129 {
130 }
131 
132 void kvm_arch_flush_shadow_all(struct kvm *kvm)
133 {
134 	kvm_riscv_mmu_free_pgd(kvm);
135 }
136 
137 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
138 				   struct kvm_memory_slot *slot)
139 {
140 	gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
141 	phys_addr_t size = slot->npages << PAGE_SHIFT;
142 	struct kvm_gstage gstage;
143 
144 	gstage.kvm = kvm;
145 	gstage.flags = 0;
146 	gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
147 	gstage.pgd = kvm->arch.pgd;
148 
149 	spin_lock(&kvm->mmu_lock);
150 	kvm_riscv_gstage_unmap_range(&gstage, gpa, size, false);
151 	spin_unlock(&kvm->mmu_lock);
152 }
153 
154 void kvm_arch_commit_memory_region(struct kvm *kvm,
155 				struct kvm_memory_slot *old,
156 				const struct kvm_memory_slot *new,
157 				enum kvm_mr_change change)
158 {
159 	/*
160 	 * At this point memslot has been committed and there is an
161 	 * allocated dirty_bitmap[], dirty pages will be tracked while
162 	 * the memory slot is write protected.
163 	 */
164 	if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
165 		if (kvm_dirty_log_manual_protect_and_init_set(kvm))
166 			return;
167 		mmu_wp_memory_region(kvm, new->id);
168 	}
169 }
170 
171 int kvm_arch_prepare_memory_region(struct kvm *kvm,
172 				const struct kvm_memory_slot *old,
173 				struct kvm_memory_slot *new,
174 				enum kvm_mr_change change)
175 {
176 	hva_t hva, reg_end, size;
177 	bool writable;
178 	int ret = 0;
179 
180 	if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
181 			change != KVM_MR_FLAGS_ONLY)
182 		return 0;
183 
184 	/*
185 	 * Prevent userspace from creating a memory region outside of the GPA
186 	 * space addressable by the KVM guest GPA space.
187 	 */
188 	if ((new->base_gfn + new->npages) >=
189 	    (kvm_riscv_gstage_gpa_size >> PAGE_SHIFT))
190 		return -EFAULT;
191 
192 	hva = new->userspace_addr;
193 	size = new->npages << PAGE_SHIFT;
194 	reg_end = hva + size;
195 	writable = !(new->flags & KVM_MEM_READONLY);
196 
197 	mmap_read_lock(current->mm);
198 
199 	/*
200 	 * A memory region could potentially cover multiple VMAs, and
201 	 * any holes between them, so iterate over all of them.
202 	 *
203 	 *     +--------------------------------------------+
204 	 * +---------------+----------------+   +----------------+
205 	 * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
206 	 * +---------------+----------------+   +----------------+
207 	 *     |               memory region                |
208 	 *     +--------------------------------------------+
209 	 */
210 	do {
211 		struct vm_area_struct *vma;
212 		hva_t vm_end;
213 
214 		vma = find_vma_intersection(current->mm, hva, reg_end);
215 		if (!vma)
216 			break;
217 
218 		/*
219 		 * Mapping a read-only VMA is only allowed if the
220 		 * memory region is configured as read-only.
221 		 */
222 		if (writable && !(vma->vm_flags & VM_WRITE)) {
223 			ret = -EPERM;
224 			break;
225 		}
226 
227 		/* Take the intersection of this VMA with the memory region */
228 		vm_end = min(reg_end, vma->vm_end);
229 
230 		if (vma->vm_flags & VM_PFNMAP) {
231 			/* IO region dirty page logging not allowed */
232 			if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
233 				ret = -EINVAL;
234 				goto out;
235 			}
236 		}
237 		hva = vm_end;
238 	} while (hva < reg_end);
239 
240 out:
241 	mmap_read_unlock(current->mm);
242 	return ret;
243 }
244 
245 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
246 {
247 	struct kvm_gstage gstage;
248 
249 	if (!kvm->arch.pgd)
250 		return false;
251 
252 	gstage.kvm = kvm;
253 	gstage.flags = 0;
254 	gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
255 	gstage.pgd = kvm->arch.pgd;
256 	kvm_riscv_gstage_unmap_range(&gstage, range->start << PAGE_SHIFT,
257 				     (range->end - range->start) << PAGE_SHIFT,
258 				     range->may_block);
259 	return false;
260 }
261 
262 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
263 {
264 	pte_t *ptep;
265 	u32 ptep_level = 0;
266 	u64 size = (range->end - range->start) << PAGE_SHIFT;
267 	struct kvm_gstage gstage;
268 
269 	if (!kvm->arch.pgd)
270 		return false;
271 
272 	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
273 
274 	gstage.kvm = kvm;
275 	gstage.flags = 0;
276 	gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
277 	gstage.pgd = kvm->arch.pgd;
278 	if (!kvm_riscv_gstage_get_leaf(&gstage, range->start << PAGE_SHIFT,
279 				       &ptep, &ptep_level))
280 		return false;
281 
282 	return ptep_test_and_clear_young(NULL, 0, ptep);
283 }
284 
285 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
286 {
287 	pte_t *ptep;
288 	u32 ptep_level = 0;
289 	u64 size = (range->end - range->start) << PAGE_SHIFT;
290 	struct kvm_gstage gstage;
291 
292 	if (!kvm->arch.pgd)
293 		return false;
294 
295 	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
296 
297 	gstage.kvm = kvm;
298 	gstage.flags = 0;
299 	gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
300 	gstage.pgd = kvm->arch.pgd;
301 	if (!kvm_riscv_gstage_get_leaf(&gstage, range->start << PAGE_SHIFT,
302 				       &ptep, &ptep_level))
303 		return false;
304 
305 	return pte_young(ptep_get(ptep));
306 }
307 
308 static bool fault_supports_gstage_huge_mapping(struct kvm_memory_slot *memslot,
309 					       unsigned long hva)
310 {
311 	hva_t uaddr_start, uaddr_end;
312 	gpa_t gpa_start;
313 	size_t size;
314 
315 	size = memslot->npages * PAGE_SIZE;
316 	uaddr_start = memslot->userspace_addr;
317 	uaddr_end = uaddr_start + size;
318 
319 	gpa_start = memslot->base_gfn << PAGE_SHIFT;
320 
321 	/*
322 	 * Pages belonging to memslots that don't have the same alignment
323 	 * within a PMD for userspace and GPA cannot be mapped with g-stage
324 	 * PMD entries, because we'll end up mapping the wrong pages.
325 	 *
326 	 * Consider a layout like the following:
327 	 *
328 	 *    memslot->userspace_addr:
329 	 *    +-----+--------------------+--------------------+---+
330 	 *    |abcde|fgh  vs-stage block  |    vs-stage block tv|xyz|
331 	 *    +-----+--------------------+--------------------+---+
332 	 *
333 	 *    memslot->base_gfn << PAGE_SHIFT:
334 	 *      +---+--------------------+--------------------+-----+
335 	 *      |abc|def  g-stage block  |    g-stage block   |tvxyz|
336 	 *      +---+--------------------+--------------------+-----+
337 	 *
338 	 * If we create those g-stage blocks, we'll end up with this incorrect
339 	 * mapping:
340 	 *   d -> f
341 	 *   e -> g
342 	 *   f -> h
343 	 */
344 	if ((gpa_start & (PMD_SIZE - 1)) != (uaddr_start & (PMD_SIZE - 1)))
345 		return false;
346 
347 	/*
348 	 * Next, let's make sure we're not trying to map anything not covered
349 	 * by the memslot. This means we have to prohibit block size mappings
350 	 * for the beginning and end of a non-block aligned and non-block sized
351 	 * memory slot (illustrated by the head and tail parts of the
352 	 * userspace view above containing pages 'abcde' and 'xyz',
353 	 * respectively).
354 	 *
355 	 * Note that it doesn't matter if we do the check using the
356 	 * userspace_addr or the base_gfn, as both are equally aligned (per
357 	 * the check above) and equally sized.
358 	 */
359 	return (hva >= ALIGN(uaddr_start, PMD_SIZE)) && (hva < ALIGN_DOWN(uaddr_end, PMD_SIZE));
360 }
361 
362 static int get_hva_mapping_size(struct kvm *kvm,
363 				unsigned long hva)
364 {
365 	int size = PAGE_SIZE;
366 	unsigned long flags;
367 	pgd_t pgd;
368 	p4d_t p4d;
369 	pud_t pud;
370 	pmd_t pmd;
371 
372 	/*
373 	 * Disable IRQs to prevent concurrent tear down of host page tables,
374 	 * e.g. if the primary MMU promotes a P*D to a huge page and then frees
375 	 * the original page table.
376 	 */
377 	local_irq_save(flags);
378 
379 	/*
380 	 * Read each entry once.  As above, a non-leaf entry can be promoted to
381 	 * a huge page _during_ this walk.  Re-reading the entry could send the
382 	 * walk into the weeks, e.g. p*d_leaf() returns false (sees the old
383 	 * value) and then p*d_offset() walks into the target huge page instead
384 	 * of the old page table (sees the new value).
385 	 */
386 	pgd = pgdp_get(pgd_offset(kvm->mm, hva));
387 	if (pgd_none(pgd))
388 		goto out;
389 
390 	p4d = p4dp_get(p4d_offset(&pgd, hva));
391 	if (p4d_none(p4d) || !p4d_present(p4d))
392 		goto out;
393 
394 	pud = pudp_get(pud_offset(&p4d, hva));
395 	if (pud_none(pud) || !pud_present(pud))
396 		goto out;
397 
398 	if (pud_leaf(pud)) {
399 		size = PUD_SIZE;
400 		goto out;
401 	}
402 
403 	pmd = pmdp_get(pmd_offset(&pud, hva));
404 	if (pmd_none(pmd) || !pmd_present(pmd))
405 		goto out;
406 
407 	if (pmd_leaf(pmd))
408 		size = PMD_SIZE;
409 
410 out:
411 	local_irq_restore(flags);
412 	return size;
413 }
414 
415 static unsigned long transparent_hugepage_adjust(struct kvm *kvm,
416 						 struct kvm_memory_slot *memslot,
417 						 unsigned long hva,
418 						 kvm_pfn_t *hfnp, gpa_t *gpa)
419 {
420 	kvm_pfn_t hfn = *hfnp;
421 
422 	/*
423 	 * Make sure the adjustment is done only for THP pages. Also make
424 	 * sure that the HVA and GPA are sufficiently aligned and that the
425 	 * block map is contained within the memslot.
426 	 */
427 	if (fault_supports_gstage_huge_mapping(memslot, hva)) {
428 		int sz;
429 
430 		sz = get_hva_mapping_size(kvm, hva);
431 		if (sz < PMD_SIZE)
432 			return sz;
433 
434 		*gpa &= PMD_MASK;
435 		hfn &= ~(PTRS_PER_PMD - 1);
436 		*hfnp = hfn;
437 
438 		return PMD_SIZE;
439 	}
440 
441 	return PAGE_SIZE;
442 }
443 
444 int kvm_riscv_mmu_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
445 		      gpa_t gpa, unsigned long hva, bool is_write,
446 		      struct kvm_gstage_mapping *out_map)
447 {
448 	int ret;
449 	kvm_pfn_t hfn;
450 	bool writable;
451 	short vma_pageshift;
452 	gfn_t gfn = gpa >> PAGE_SHIFT;
453 	struct vm_area_struct *vma;
454 	struct kvm *kvm = vcpu->kvm;
455 	struct kvm_mmu_memory_cache *pcache = &vcpu->arch.mmu_page_cache;
456 	bool logging = (memslot->dirty_bitmap &&
457 			!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
458 	unsigned long vma_pagesize, mmu_seq;
459 	struct kvm_gstage gstage;
460 	struct page *page;
461 
462 	gstage.kvm = kvm;
463 	gstage.flags = 0;
464 	gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
465 	gstage.pgd = kvm->arch.pgd;
466 
467 	/* Setup initial state of output mapping */
468 	memset(out_map, 0, sizeof(*out_map));
469 
470 	/* We need minimum second+third level pages */
471 	ret = kvm_mmu_topup_memory_cache(pcache, kvm_riscv_gstage_pgd_levels);
472 	if (ret) {
473 		kvm_err("Failed to topup G-stage cache\n");
474 		return ret;
475 	}
476 
477 	mmap_read_lock(current->mm);
478 
479 	vma = vma_lookup(current->mm, hva);
480 	if (unlikely(!vma)) {
481 		kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
482 		mmap_read_unlock(current->mm);
483 		return -EFAULT;
484 	}
485 
486 	if (is_vm_hugetlb_page(vma))
487 		vma_pageshift = huge_page_shift(hstate_vma(vma));
488 	else
489 		vma_pageshift = PAGE_SHIFT;
490 	vma_pagesize = 1ULL << vma_pageshift;
491 	if (logging || (vma->vm_flags & VM_PFNMAP))
492 		vma_pagesize = PAGE_SIZE;
493 
494 	if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
495 		gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
496 
497 	/*
498 	 * Read mmu_invalidate_seq so that KVM can detect if the results of
499 	 * vma_lookup() or __kvm_faultin_pfn() become stale prior to acquiring
500 	 * kvm->mmu_lock.
501 	 *
502 	 * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
503 	 * with the smp_wmb() in kvm_mmu_invalidate_end().
504 	 */
505 	mmu_seq = kvm->mmu_invalidate_seq;
506 	mmap_read_unlock(current->mm);
507 
508 	if (vma_pagesize != PUD_SIZE &&
509 	    vma_pagesize != PMD_SIZE &&
510 	    vma_pagesize != PAGE_SIZE) {
511 		kvm_err("Invalid VMA page size 0x%lx\n", vma_pagesize);
512 		return -EFAULT;
513 	}
514 
515 	hfn = __kvm_faultin_pfn(memslot, gfn, is_write ? FOLL_WRITE : 0,
516 				&writable, &page);
517 	if (hfn == KVM_PFN_ERR_HWPOISON) {
518 		send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
519 				vma_pageshift, current);
520 		return 0;
521 	}
522 	if (is_error_noslot_pfn(hfn))
523 		return -EFAULT;
524 
525 	/*
526 	 * If logging is active then we allow writable pages only
527 	 * for write faults.
528 	 */
529 	if (logging && !is_write)
530 		writable = false;
531 
532 	spin_lock(&kvm->mmu_lock);
533 
534 	if (mmu_invalidate_retry(kvm, mmu_seq))
535 		goto out_unlock;
536 
537 	/* Check if we are backed by a THP and thus use block mapping if possible */
538 	if (vma_pagesize == PAGE_SIZE)
539 		vma_pagesize = transparent_hugepage_adjust(kvm, memslot, hva, &hfn, &gpa);
540 
541 	if (writable) {
542 		mark_page_dirty_in_slot(kvm, memslot, gfn);
543 		ret = kvm_riscv_gstage_map_page(&gstage, pcache, gpa, hfn << PAGE_SHIFT,
544 						vma_pagesize, false, true, out_map);
545 	} else {
546 		ret = kvm_riscv_gstage_map_page(&gstage, pcache, gpa, hfn << PAGE_SHIFT,
547 						vma_pagesize, true, true, out_map);
548 	}
549 
550 	if (ret)
551 		kvm_err("Failed to map in G-stage\n");
552 
553 out_unlock:
554 	kvm_release_faultin_page(kvm, page, ret && ret != -EEXIST, writable);
555 	spin_unlock(&kvm->mmu_lock);
556 	return ret;
557 }
558 
559 int kvm_riscv_mmu_alloc_pgd(struct kvm *kvm)
560 {
561 	struct page *pgd_page;
562 
563 	if (kvm->arch.pgd != NULL) {
564 		kvm_err("kvm_arch already initialized?\n");
565 		return -EINVAL;
566 	}
567 
568 	pgd_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
569 				get_order(kvm_riscv_gstage_pgd_size));
570 	if (!pgd_page)
571 		return -ENOMEM;
572 	kvm->arch.pgd = page_to_virt(pgd_page);
573 	kvm->arch.pgd_phys = page_to_phys(pgd_page);
574 
575 	return 0;
576 }
577 
578 void kvm_riscv_mmu_free_pgd(struct kvm *kvm)
579 {
580 	struct kvm_gstage gstage;
581 	void *pgd = NULL;
582 
583 	spin_lock(&kvm->mmu_lock);
584 	if (kvm->arch.pgd) {
585 		gstage.kvm = kvm;
586 		gstage.flags = 0;
587 		gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
588 		gstage.pgd = kvm->arch.pgd;
589 		kvm_riscv_gstage_unmap_range(&gstage, 0UL, kvm_riscv_gstage_gpa_size, false);
590 		pgd = READ_ONCE(kvm->arch.pgd);
591 		kvm->arch.pgd = NULL;
592 		kvm->arch.pgd_phys = 0;
593 	}
594 	spin_unlock(&kvm->mmu_lock);
595 
596 	if (pgd)
597 		free_pages((unsigned long)pgd, get_order(kvm_riscv_gstage_pgd_size));
598 }
599 
600 void kvm_riscv_mmu_update_hgatp(struct kvm_vcpu *vcpu)
601 {
602 	unsigned long hgatp = kvm_riscv_gstage_mode << HGATP_MODE_SHIFT;
603 	struct kvm_arch *k = &vcpu->kvm->arch;
604 
605 	hgatp |= (READ_ONCE(k->vmid.vmid) << HGATP_VMID_SHIFT) & HGATP_VMID;
606 	hgatp |= (k->pgd_phys >> PAGE_SHIFT) & HGATP_PPN;
607 
608 	ncsr_write(CSR_HGATP, hgatp);
609 
610 	if (!kvm_riscv_gstage_vmid_bits())
611 		kvm_riscv_local_hfence_gvma_all();
612 }
613