1 /* 2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Copyright 2003 PathScale, Inc. 4 * Derived from include/asm-i386/pgtable.h 5 * Licensed under the GPL 6 */ 7 8 #ifndef __UM_PGTABLE_H 9 #define __UM_PGTABLE_H 10 11 #include <asm/fixmap.h> 12 13 #define _PAGE_PRESENT 0x001 14 #define _PAGE_NEWPAGE 0x002 15 #define _PAGE_NEWPROT 0x004 16 #define _PAGE_RW 0x020 17 #define _PAGE_USER 0x040 18 #define _PAGE_ACCESSED 0x080 19 #define _PAGE_DIRTY 0x100 20 /* If _PAGE_PRESENT is clear, we use these: */ 21 #define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */ 22 #define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE; 23 pte_present gives true */ 24 25 #ifdef CONFIG_3_LEVEL_PGTABLES 26 #include <asm/pgtable-3level.h> 27 #else 28 #include <asm/pgtable-2level.h> 29 #endif 30 31 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 32 33 /* zero page used for uninitialized stuff */ 34 extern unsigned long *empty_zero_page; 35 36 #define pgtable_cache_init() do ; while (0) 37 38 /* Just any arbitrary offset to the start of the vmalloc VM area: the 39 * current 8MB value just means that there will be a 8MB "hole" after the 40 * physical memory until the kernel virtual memory starts. That means that 41 * any out-of-bounds memory accesses will hopefully be caught. 42 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 43 * area for the same reason. ;) 44 */ 45 46 extern unsigned long end_iomem; 47 48 #define VMALLOC_OFFSET (__va_space) 49 #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 50 #define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK) 51 #ifdef CONFIG_HIGHMEM 52 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) 53 #else 54 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) 55 #endif 56 #define MODULES_VADDR VMALLOC_START 57 #define MODULES_END VMALLOC_END 58 #define MODULES_LEN (MODULES_VADDR - MODULES_END) 59 60 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) 61 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 62 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 63 #define __PAGE_KERNEL_EXEC \ 64 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) 65 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) 66 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) 67 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 68 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 69 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) 70 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) 71 72 #define io_remap_pfn_range remap_pfn_range 73 74 /* 75 * The i386 can't do page protection for execute, and considers that the same 76 * are read. 77 * Also, write permissions imply read permissions. This is the closest we can 78 * get.. 79 */ 80 #define __P000 PAGE_NONE 81 #define __P001 PAGE_READONLY 82 #define __P010 PAGE_COPY 83 #define __P011 PAGE_COPY 84 #define __P100 PAGE_READONLY 85 #define __P101 PAGE_READONLY 86 #define __P110 PAGE_COPY 87 #define __P111 PAGE_COPY 88 89 #define __S000 PAGE_NONE 90 #define __S001 PAGE_READONLY 91 #define __S010 PAGE_SHARED 92 #define __S011 PAGE_SHARED 93 #define __S100 PAGE_READONLY 94 #define __S101 PAGE_READONLY 95 #define __S110 PAGE_SHARED 96 #define __S111 PAGE_SHARED 97 98 /* 99 * ZERO_PAGE is a global shared page that is always zero: used 100 * for zero-mapped memory areas etc.. 101 */ 102 #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) 103 104 #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) 105 106 #define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE)) 107 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) 108 109 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) 110 #define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0) 111 112 #define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE) 113 #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE) 114 115 #define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE) 116 #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE) 117 118 #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) 119 120 #define pte_page(x) pfn_to_page(pte_pfn(x)) 121 122 #define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE)) 123 124 /* 125 * ================================= 126 * Flags checking section. 127 * ================================= 128 */ 129 130 static inline int pte_none(pte_t pte) 131 { 132 return pte_is_zero(pte); 133 } 134 135 /* 136 * The following only work if pte_present() is true. 137 * Undefined behaviour if not.. 138 */ 139 static inline int pte_read(pte_t pte) 140 { 141 return((pte_get_bits(pte, _PAGE_USER)) && 142 !(pte_get_bits(pte, _PAGE_PROTNONE))); 143 } 144 145 static inline int pte_exec(pte_t pte){ 146 return((pte_get_bits(pte, _PAGE_USER)) && 147 !(pte_get_bits(pte, _PAGE_PROTNONE))); 148 } 149 150 static inline int pte_write(pte_t pte) 151 { 152 return((pte_get_bits(pte, _PAGE_RW)) && 153 !(pte_get_bits(pte, _PAGE_PROTNONE))); 154 } 155 156 /* 157 * The following only works if pte_present() is not true. 158 */ 159 static inline int pte_file(pte_t pte) 160 { 161 return pte_get_bits(pte, _PAGE_FILE); 162 } 163 164 static inline int pte_dirty(pte_t pte) 165 { 166 return pte_get_bits(pte, _PAGE_DIRTY); 167 } 168 169 static inline int pte_young(pte_t pte) 170 { 171 return pte_get_bits(pte, _PAGE_ACCESSED); 172 } 173 174 static inline int pte_newpage(pte_t pte) 175 { 176 return pte_get_bits(pte, _PAGE_NEWPAGE); 177 } 178 179 static inline int pte_newprot(pte_t pte) 180 { 181 return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT))); 182 } 183 184 static inline int pte_special(pte_t pte) 185 { 186 return 0; 187 } 188 189 /* 190 * ================================= 191 * Flags setting section. 192 * ================================= 193 */ 194 195 static inline pte_t pte_mknewprot(pte_t pte) 196 { 197 pte_set_bits(pte, _PAGE_NEWPROT); 198 return(pte); 199 } 200 201 static inline pte_t pte_mkclean(pte_t pte) 202 { 203 pte_clear_bits(pte, _PAGE_DIRTY); 204 return(pte); 205 } 206 207 static inline pte_t pte_mkold(pte_t pte) 208 { 209 pte_clear_bits(pte, _PAGE_ACCESSED); 210 return(pte); 211 } 212 213 static inline pte_t pte_wrprotect(pte_t pte) 214 { 215 pte_clear_bits(pte, _PAGE_RW); 216 return(pte_mknewprot(pte)); 217 } 218 219 static inline pte_t pte_mkread(pte_t pte) 220 { 221 pte_set_bits(pte, _PAGE_USER); 222 return(pte_mknewprot(pte)); 223 } 224 225 static inline pte_t pte_mkdirty(pte_t pte) 226 { 227 pte_set_bits(pte, _PAGE_DIRTY); 228 return(pte); 229 } 230 231 static inline pte_t pte_mkyoung(pte_t pte) 232 { 233 pte_set_bits(pte, _PAGE_ACCESSED); 234 return(pte); 235 } 236 237 static inline pte_t pte_mkwrite(pte_t pte) 238 { 239 pte_set_bits(pte, _PAGE_RW); 240 return(pte_mknewprot(pte)); 241 } 242 243 static inline pte_t pte_mkuptodate(pte_t pte) 244 { 245 pte_clear_bits(pte, _PAGE_NEWPAGE); 246 if(pte_present(pte)) 247 pte_clear_bits(pte, _PAGE_NEWPROT); 248 return(pte); 249 } 250 251 static inline pte_t pte_mknewpage(pte_t pte) 252 { 253 pte_set_bits(pte, _PAGE_NEWPAGE); 254 return(pte); 255 } 256 257 static inline pte_t pte_mkspecial(pte_t pte) 258 { 259 return(pte); 260 } 261 262 static inline void set_pte(pte_t *pteptr, pte_t pteval) 263 { 264 pte_copy(*pteptr, pteval); 265 266 /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so 267 * fix_range knows to unmap it. _PAGE_NEWPROT is specific to 268 * mapped pages. 269 */ 270 271 *pteptr = pte_mknewpage(*pteptr); 272 if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); 273 } 274 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 275 276 #define __HAVE_ARCH_PTE_SAME 277 static inline int pte_same(pte_t pte_a, pte_t pte_b) 278 { 279 return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE); 280 } 281 282 /* 283 * Conversion functions: convert a page and protection to a page entry, 284 * and a page entry and page directory to the page they refer to. 285 */ 286 287 #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) 288 #define __virt_to_page(virt) phys_to_page(__pa(virt)) 289 #define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page)) 290 #define virt_to_page(addr) __virt_to_page((const unsigned long) addr) 291 292 #define mk_pte(page, pgprot) \ 293 ({ pte_t pte; \ 294 \ 295 pte_set_val(pte, page_to_phys(page), (pgprot)); \ 296 if (pte_present(pte)) \ 297 pte_mknewprot(pte_mknewpage(pte)); \ 298 pte;}) 299 300 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 301 { 302 pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot); 303 return pte; 304 } 305 306 /* 307 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] 308 * 309 * this macro returns the index of the entry in the pgd page which would 310 * control the given virtual address 311 */ 312 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 313 314 /* 315 * pgd_offset() returns a (pgd_t *) 316 * pgd_index() is used get the offset into the pgd page's array of pgd_t's; 317 */ 318 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) 319 320 /* 321 * a shortcut which implies the use of the kernel's pgd, instead 322 * of a process's 323 */ 324 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 325 326 /* 327 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] 328 * 329 * this macro returns the index of the entry in the pmd page which would 330 * control the given virtual address 331 */ 332 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 333 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 334 335 #define pmd_page_vaddr(pmd) \ 336 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 337 338 /* 339 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] 340 * 341 * this macro returns the index of the entry in the pte page which would 342 * control the given virtual address 343 */ 344 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 345 #define pte_offset_kernel(dir, address) \ 346 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) 347 #define pte_offset_map(dir, address) \ 348 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) 349 #define pte_unmap(pte) do { } while (0) 350 351 struct mm_struct; 352 extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); 353 354 #define update_mmu_cache(vma,address,ptep) do ; while (0) 355 356 /* Encode and de-code a swap entry */ 357 #define __swp_type(x) (((x).val >> 5) & 0x1f) 358 #define __swp_offset(x) ((x).val >> 11) 359 360 #define __swp_entry(type, offset) \ 361 ((swp_entry_t) { ((type) << 5) | ((offset) << 11) }) 362 #define __pte_to_swp_entry(pte) \ 363 ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) }) 364 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 365 366 #define kern_addr_valid(addr) (1) 367 368 #include <asm-generic/pgtable.h> 369 370 /* Clear a kernel PTE and flush it from the TLB */ 371 #define kpte_clear_flush(ptep, vaddr) \ 372 do { \ 373 pte_clear(&init_mm, (vaddr), (ptep)); \ 374 __flush_tlb_one((vaddr)); \ 375 } while (0) 376 377 #endif 378