xref: /linux/arch/arm/include/asm/xen/page.h (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 #ifndef _ASM_ARM_XEN_PAGE_H
2 #define _ASM_ARM_XEN_PAGE_H
3 
4 #include <asm/page.h>
5 #include <asm/pgtable.h>
6 
7 #include <linux/pfn.h>
8 #include <linux/types.h>
9 #include <linux/dma-mapping.h>
10 
11 #include <xen/xen.h>
12 #include <xen/interface/grant_table.h>
13 
14 #define phys_to_machine_mapping_valid(pfn) (1)
15 
16 #define pte_mfn	    pte_pfn
17 #define mfn_pte	    pfn_pte
18 
19 /* Xen machine address */
20 typedef struct xmaddr {
21 	phys_addr_t maddr;
22 } xmaddr_t;
23 
24 /* Xen pseudo-physical address */
25 typedef struct xpaddr {
26 	phys_addr_t paddr;
27 } xpaddr_t;
28 
29 #define XMADDR(x)	((xmaddr_t) { .maddr = (x) })
30 #define XPADDR(x)	((xpaddr_t) { .paddr = (x) })
31 
32 #define INVALID_P2M_ENTRY      (~0UL)
33 
34 unsigned long __pfn_to_mfn(unsigned long pfn);
35 extern struct rb_root phys_to_mach;
36 
37 static inline unsigned long pfn_to_mfn(unsigned long pfn)
38 {
39 	unsigned long mfn;
40 
41 	if (phys_to_mach.rb_node != NULL) {
42 		mfn = __pfn_to_mfn(pfn);
43 		if (mfn != INVALID_P2M_ENTRY)
44 			return mfn;
45 	}
46 
47 	return pfn;
48 }
49 
50 static inline unsigned long mfn_to_pfn(unsigned long mfn)
51 {
52 	return mfn;
53 }
54 
55 #define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn)
56 
57 static inline xmaddr_t phys_to_machine(xpaddr_t phys)
58 {
59 	unsigned offset = phys.paddr & ~PAGE_MASK;
60 	return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
61 }
62 
63 static inline xpaddr_t machine_to_phys(xmaddr_t machine)
64 {
65 	unsigned offset = machine.maddr & ~PAGE_MASK;
66 	return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
67 }
68 /* VIRT <-> MACHINE conversion */
69 #define virt_to_machine(v)	(phys_to_machine(XPADDR(__pa(v))))
70 #define virt_to_mfn(v)		(pfn_to_mfn(virt_to_pfn(v)))
71 #define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
72 
73 static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
74 {
75 	/* TODO: assuming it is mapped in the kernel 1:1 */
76 	return virt_to_machine(vaddr);
77 }
78 
79 /* TODO: this shouldn't be here but it is because the frontend drivers
80  * are using it (its rolled in headers) even though we won't hit the code path.
81  * So for right now just punt with this.
82  */
83 static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
84 {
85 	BUG();
86 	return NULL;
87 }
88 
89 extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
90 				   struct gnttab_map_grant_ref *kmap_ops,
91 				   struct page **pages, unsigned int count);
92 
93 extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
94 				     struct gnttab_unmap_grant_ref *kunmap_ops,
95 				     struct page **pages, unsigned int count);
96 
97 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
98 bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
99 		unsigned long nr_pages);
100 
101 static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
102 {
103 	return __set_phys_to_machine(pfn, mfn);
104 }
105 
106 #define xen_remap(cookie, size) ioremap_cache((cookie), (size))
107 #define xen_unmap(cookie) iounmap((cookie))
108 
109 bool xen_arch_need_swiotlb(struct device *dev,
110 			   unsigned long pfn,
111 			   unsigned long mfn);
112 unsigned long xen_get_swiotlb_free_pages(unsigned int order);
113 
114 #endif /* _ASM_ARM_XEN_PAGE_H */
115