1 #ifndef _ASM_X86_XEN_PAGE_H 2 #define _ASM_X86_XEN_PAGE_H 3 4 #include <linux/kernel.h> 5 #include <linux/types.h> 6 #include <linux/spinlock.h> 7 #include <linux/pfn.h> 8 #include <linux/mm.h> 9 10 #include <asm/uaccess.h> 11 #include <asm/page.h> 12 #include <asm/pgtable.h> 13 14 #include <xen/interface/xen.h> 15 #include <xen/grant_table.h> 16 #include <xen/features.h> 17 18 /* Xen machine address */ 19 typedef struct xmaddr { 20 phys_addr_t maddr; 21 } xmaddr_t; 22 23 /* Xen pseudo-physical address */ 24 typedef struct xpaddr { 25 phys_addr_t paddr; 26 } xpaddr_t; 27 28 #define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) 29 #define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) 30 31 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ 32 #define INVALID_P2M_ENTRY (~0UL) 33 #define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1)) 34 #define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2)) 35 #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) 36 #define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT) 37 38 /* Maximum amount of memory we can handle in a domain in pages */ 39 #define MAX_DOMAIN_PAGES \ 40 ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE)) 41 42 extern unsigned long *machine_to_phys_mapping; 43 extern unsigned long machine_to_phys_nr; 44 extern unsigned long *xen_p2m_addr; 45 extern unsigned long xen_p2m_size; 46 extern unsigned long xen_max_p2m_pfn; 47 48 extern unsigned long get_phys_to_machine(unsigned long pfn); 49 extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); 50 extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 51 extern unsigned long set_phys_range_identity(unsigned long pfn_s, 52 unsigned long pfn_e); 53 54 extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, 55 struct gnttab_map_grant_ref *kmap_ops, 56 struct page **pages, unsigned int count); 57 extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 58 struct gnttab_map_grant_ref *kmap_ops, 59 struct page **pages, unsigned int count); 60 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); 61 62 /* 63 * Helper functions to write or read unsigned long values to/from 64 * memory, when the access may fault. 65 */ 66 static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val) 67 { 68 return __put_user(val, (unsigned long __user *)addr); 69 } 70 71 static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val) 72 { 73 return __get_user(*val, (unsigned long __user *)addr); 74 } 75 76 /* 77 * When to use pfn_to_mfn(), __pfn_to_mfn() or get_phys_to_machine(): 78 * - pfn_to_mfn() returns either INVALID_P2M_ENTRY or the mfn. No indicator 79 * bits (identity or foreign) are set. 80 * - __pfn_to_mfn() returns the found entry of the p2m table. A possibly set 81 * identity or foreign indicator will be still set. __pfn_to_mfn() is 82 * encapsulating get_phys_to_machine() which is called in special cases only. 83 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special 84 * cases needing an extended handling. 85 */ 86 static inline unsigned long __pfn_to_mfn(unsigned long pfn) 87 { 88 unsigned long mfn; 89 90 if (pfn < xen_p2m_size) 91 mfn = xen_p2m_addr[pfn]; 92 else if (unlikely(pfn < xen_max_p2m_pfn)) 93 return get_phys_to_machine(pfn); 94 else 95 return IDENTITY_FRAME(pfn); 96 97 if (unlikely(mfn == INVALID_P2M_ENTRY)) 98 return get_phys_to_machine(pfn); 99 100 return mfn; 101 } 102 103 static inline unsigned long pfn_to_mfn(unsigned long pfn) 104 { 105 unsigned long mfn; 106 107 if (xen_feature(XENFEAT_auto_translated_physmap)) 108 return pfn; 109 110 mfn = __pfn_to_mfn(pfn); 111 112 if (mfn != INVALID_P2M_ENTRY) 113 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT); 114 115 return mfn; 116 } 117 118 static inline int phys_to_machine_mapping_valid(unsigned long pfn) 119 { 120 if (xen_feature(XENFEAT_auto_translated_physmap)) 121 return 1; 122 123 return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY; 124 } 125 126 static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn) 127 { 128 unsigned long pfn; 129 int ret; 130 131 if (xen_feature(XENFEAT_auto_translated_physmap)) 132 return mfn; 133 134 if (unlikely(mfn >= machine_to_phys_nr)) 135 return ~0; 136 137 /* 138 * The array access can fail (e.g., device space beyond end of RAM). 139 * In such cases it doesn't matter what we return (we return garbage), 140 * but we must handle the fault without crashing! 141 */ 142 ret = xen_safe_read_ulong(&machine_to_phys_mapping[mfn], &pfn); 143 if (ret < 0) 144 return ~0; 145 146 return pfn; 147 } 148 149 static inline unsigned long mfn_to_pfn(unsigned long mfn) 150 { 151 unsigned long pfn; 152 153 if (xen_feature(XENFEAT_auto_translated_physmap)) 154 return mfn; 155 156 pfn = mfn_to_pfn_no_overrides(mfn); 157 if (__pfn_to_mfn(pfn) != mfn) { 158 /* 159 * If this appears to be a foreign mfn (because the pfn 160 * doesn't map back to the mfn), then check the local override 161 * table to see if there's a better pfn to use. 162 * 163 * m2p_find_override_pfn returns ~0 if it doesn't find anything. 164 */ 165 pfn = m2p_find_override_pfn(mfn, ~0); 166 } 167 168 /* 169 * pfn is ~0 if there are no entries in the m2p for mfn or if the 170 * entry doesn't map back to the mfn and m2p_override doesn't have a 171 * valid entry for it. 172 */ 173 if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn)) 174 pfn = mfn; 175 176 return pfn; 177 } 178 179 static inline xmaddr_t phys_to_machine(xpaddr_t phys) 180 { 181 unsigned offset = phys.paddr & ~PAGE_MASK; 182 return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset); 183 } 184 185 static inline xpaddr_t machine_to_phys(xmaddr_t machine) 186 { 187 unsigned offset = machine.maddr & ~PAGE_MASK; 188 return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); 189 } 190 191 /* 192 * We detect special mappings in one of two ways: 193 * 1. If the MFN is an I/O page then Xen will set the m2p entry 194 * to be outside our maximum possible pseudophys range. 195 * 2. If the MFN belongs to a different domain then we will certainly 196 * not have MFN in our p2m table. Conversely, if the page is ours, 197 * then we'll have p2m(m2p(MFN))==MFN. 198 * If we detect a special mapping then it doesn't have a 'struct page'. 199 * We force !pfn_valid() by returning an out-of-range pointer. 200 * 201 * NB. These checks require that, for any MFN that is not in our reservation, 202 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if 203 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. 204 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. 205 * 206 * NB2. When deliberately mapping foreign pages into the p2m table, you *must* 207 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we 208 * require. In all the cases we care about, the FOREIGN_FRAME bit is 209 * masked (e.g., pfn_to_mfn()) so behaviour there is correct. 210 */ 211 static inline unsigned long mfn_to_local_pfn(unsigned long mfn) 212 { 213 unsigned long pfn; 214 215 if (xen_feature(XENFEAT_auto_translated_physmap)) 216 return mfn; 217 218 pfn = mfn_to_pfn(mfn); 219 if (__pfn_to_mfn(pfn) != mfn) 220 return -1; /* force !pfn_valid() */ 221 return pfn; 222 } 223 224 /* VIRT <-> MACHINE conversion */ 225 #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) 226 #define virt_to_pfn(v) (PFN_DOWN(__pa(v))) 227 #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) 228 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) 229 230 static inline unsigned long pte_mfn(pte_t pte) 231 { 232 return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT; 233 } 234 235 static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot) 236 { 237 pte_t pte; 238 239 pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) | 240 massage_pgprot(pgprot); 241 242 return pte; 243 } 244 245 static inline pteval_t pte_val_ma(pte_t pte) 246 { 247 return pte.pte; 248 } 249 250 static inline pte_t __pte_ma(pteval_t x) 251 { 252 return (pte_t) { .pte = x }; 253 } 254 255 #define pmd_val_ma(v) ((v).pmd) 256 #ifdef __PAGETABLE_PUD_FOLDED 257 #define pud_val_ma(v) ((v).pgd.pgd) 258 #else 259 #define pud_val_ma(v) ((v).pud) 260 #endif 261 #define __pmd_ma(x) ((pmd_t) { (x) } ) 262 263 #define pgd_val_ma(x) ((x).pgd) 264 265 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid); 266 267 xmaddr_t arbitrary_virt_to_machine(void *address); 268 unsigned long arbitrary_virt_to_mfn(void *vaddr); 269 void make_lowmem_page_readonly(void *vaddr); 270 void make_lowmem_page_readwrite(void *vaddr); 271 272 #define xen_remap(cookie, size) ioremap((cookie), (size)); 273 #define xen_unmap(cookie) iounmap((cookie)) 274 275 static inline bool xen_arch_need_swiotlb(struct device *dev, 276 unsigned long pfn, 277 unsigned long mfn) 278 { 279 return false; 280 } 281 282 #endif /* _ASM_X86_XEN_PAGE_H */ 283