xref: /linux/arch/x86/xen/mmu.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/pfn.h>
4 #include <asm/xen/page.h>
5 #include <asm/xen/hypercall.h>
6 #include <xen/interface/memory.h>
7 
8 #include "xen-ops.h"
9 
arbitrary_virt_to_mfn(void * vaddr)10 unsigned long arbitrary_virt_to_mfn(void *vaddr)
11 {
12 	xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
13 
14 	return PFN_DOWN(maddr.maddr);
15 }
16 
arbitrary_virt_to_machine(void * vaddr)17 xmaddr_t arbitrary_virt_to_machine(void *vaddr)
18 {
19 	unsigned long address = (unsigned long)vaddr;
20 	unsigned int level;
21 	pte_t *pte;
22 	unsigned offset;
23 
24 	/*
25 	 * if the PFN is in the linear mapped vaddr range, we can just use
26 	 * the (quick) virt_to_machine() p2m lookup
27 	 */
28 	if (virt_addr_valid(vaddr))
29 		return virt_to_machine(vaddr);
30 
31 	/* otherwise we have to do a (slower) full page-table walk */
32 
33 	pte = lookup_address(address, &level);
34 	BUG_ON(pte == NULL);
35 	offset = address & ~PAGE_MASK;
36 	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
37 }
38 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
39 
40 /* Returns: 0 success */
xen_unmap_domain_gfn_range(struct vm_area_struct * vma,int nr,struct page ** pages)41 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
42 			       int nr, struct page **pages)
43 {
44 	if (xen_feature(XENFEAT_auto_translated_physmap))
45 		return xen_xlate_unmap_gfn_range(vma, nr, pages);
46 
47 	if (!pages)
48 		return 0;
49 
50 	return -EINVAL;
51 }
52 EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
53