1 #ifndef _ASM_ARM_XEN_PAGE_H 2 #define _ASM_ARM_XEN_PAGE_H 3 4 #include <asm/page.h> 5 #include <asm/pgtable.h> 6 7 #include <linux/pfn.h> 8 #include <linux/types.h> 9 #include <linux/dma-mapping.h> 10 11 #include <xen/xen.h> 12 #include <xen/interface/grant_table.h> 13 14 #define phys_to_machine_mapping_valid(pfn) (1) 15 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) 16 17 #define pte_mfn pte_pfn 18 #define mfn_pte pfn_pte 19 20 /* Xen machine address */ 21 typedef struct xmaddr { 22 phys_addr_t maddr; 23 } xmaddr_t; 24 25 /* Xen pseudo-physical address */ 26 typedef struct xpaddr { 27 phys_addr_t paddr; 28 } xpaddr_t; 29 30 #define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) 31 #define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) 32 33 #define INVALID_P2M_ENTRY (~0UL) 34 35 unsigned long __pfn_to_mfn(unsigned long pfn); 36 extern struct rb_root phys_to_mach; 37 38 static inline unsigned long pfn_to_mfn(unsigned long pfn) 39 { 40 unsigned long mfn; 41 42 if (phys_to_mach.rb_node != NULL) { 43 mfn = __pfn_to_mfn(pfn); 44 if (mfn != INVALID_P2M_ENTRY) 45 return mfn; 46 } 47 48 return pfn; 49 } 50 51 static inline unsigned long mfn_to_pfn(unsigned long mfn) 52 { 53 return mfn; 54 } 55 56 #define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn) 57 58 static inline xmaddr_t phys_to_machine(xpaddr_t phys) 59 { 60 unsigned offset = phys.paddr & ~PAGE_MASK; 61 return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset); 62 } 63 64 static inline xpaddr_t machine_to_phys(xmaddr_t machine) 65 { 66 unsigned offset = machine.maddr & ~PAGE_MASK; 67 return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); 68 } 69 /* VIRT <-> MACHINE conversion */ 70 #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) 71 #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) 72 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) 73 74 static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) 75 { 76 /* TODO: assuming it is mapped in the kernel 1:1 */ 77 return virt_to_machine(vaddr); 78 } 79 80 /* TODO: this shouldn't be here but it is because the frontend drivers 81 * are using it (its rolled in headers) even though we won't hit the code path. 82 * So for right now just punt with this. 83 */ 84 static inline pte_t *lookup_address(unsigned long address, unsigned int *level) 85 { 86 BUG(); 87 return NULL; 88 } 89 90 extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, 91 struct gnttab_map_grant_ref *kmap_ops, 92 struct page **pages, unsigned int count); 93 94 extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 95 struct gnttab_map_grant_ref *kmap_ops, 96 struct page **pages, unsigned int count); 97 98 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 99 bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, 100 unsigned long nr_pages); 101 102 static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) 103 { 104 return __set_phys_to_machine(pfn, mfn); 105 } 106 107 #define xen_remap(cookie, size) ioremap_cache((cookie), (size)) 108 #define xen_unmap(cookie) iounmap((cookie)) 109 110 bool xen_arch_need_swiotlb(struct device *dev, 111 unsigned long pfn, 112 unsigned long mfn); 113 114 #endif /* _ASM_ARM_XEN_PAGE_H */ 115