1 #ifndef _ASM_X86_PGTABLE_64_H 2 #define _ASM_X86_PGTABLE_64_H 3 4 #include <linux/const.h> 5 #ifndef __ASSEMBLY__ 6 7 /* 8 * This file contains the functions and defines necessary to modify and use 9 * the x86-64 page table tree. 10 */ 11 #include <asm/processor.h> 12 #include <linux/bitops.h> 13 #include <linux/threads.h> 14 #include <asm/pda.h> 15 16 extern pud_t level3_kernel_pgt[512]; 17 extern pud_t level3_ident_pgt[512]; 18 extern pmd_t level2_kernel_pgt[512]; 19 extern pmd_t level2_fixmap_pgt[512]; 20 extern pmd_t level2_ident_pgt[512]; 21 extern pgd_t init_level4_pgt[]; 22 23 #define swapper_pg_dir init_level4_pgt 24 25 extern void paging_init(void); 26 27 #endif /* !__ASSEMBLY__ */ 28 29 #define SHARED_KERNEL_PMD 0 30 31 /* 32 * PGDIR_SHIFT determines what a top-level page table entry can map 33 */ 34 #define PGDIR_SHIFT 39 35 #define PTRS_PER_PGD 512 36 37 /* 38 * 3rd level page 39 */ 40 #define PUD_SHIFT 30 41 #define PTRS_PER_PUD 512 42 43 /* 44 * PMD_SHIFT determines the size of the area a middle-level 45 * page table can map 46 */ 47 #define PMD_SHIFT 21 48 #define PTRS_PER_PMD 512 49 50 /* 51 * entries per page directory level 52 */ 53 #define PTRS_PER_PTE 512 54 55 #ifndef __ASSEMBLY__ 56 57 #define pte_ERROR(e) \ 58 printk("%s:%d: bad pte %p(%016lx).\n", \ 59 __FILE__, __LINE__, &(e), pte_val(e)) 60 #define pmd_ERROR(e) \ 61 printk("%s:%d: bad pmd %p(%016lx).\n", \ 62 __FILE__, __LINE__, &(e), pmd_val(e)) 63 #define pud_ERROR(e) \ 64 printk("%s:%d: bad pud %p(%016lx).\n", \ 65 __FILE__, __LINE__, &(e), pud_val(e)) 66 #define pgd_ERROR(e) \ 67 printk("%s:%d: bad pgd %p(%016lx).\n", \ 68 __FILE__, __LINE__, &(e), pgd_val(e)) 69 70 #define pgd_none(x) (!pgd_val(x)) 71 #define pud_none(x) (!pud_val(x)) 72 73 struct mm_struct; 74 75 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); 76 77 78 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, 79 pte_t *ptep) 80 { 81 *ptep = native_make_pte(0); 82 } 83 84 static inline void native_set_pte(pte_t *ptep, pte_t pte) 85 { 86 *ptep = pte; 87 } 88 89 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) 90 { 91 native_set_pte(ptep, pte); 92 } 93 94 static inline pte_t native_ptep_get_and_clear(pte_t *xp) 95 { 96 #ifdef CONFIG_SMP 97 return native_make_pte(xchg(&xp->pte, 0)); 98 #else 99 /* native_local_ptep_get_and_clear, 100 but duplicated because of cyclic dependency */ 101 pte_t ret = *xp; 102 native_pte_clear(NULL, 0, xp); 103 return ret; 104 #endif 105 } 106 107 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) 108 { 109 *pmdp = pmd; 110 } 111 112 static inline void native_pmd_clear(pmd_t *pmd) 113 { 114 native_set_pmd(pmd, native_make_pmd(0)); 115 } 116 117 static inline void native_set_pud(pud_t *pudp, pud_t pud) 118 { 119 *pudp = pud; 120 } 121 122 static inline void native_pud_clear(pud_t *pud) 123 { 124 native_set_pud(pud, native_make_pud(0)); 125 } 126 127 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) 128 { 129 *pgdp = pgd; 130 } 131 132 static inline void native_pgd_clear(pgd_t *pgd) 133 { 134 native_set_pgd(pgd, native_make_pgd(0)); 135 } 136 137 #define pte_same(a, b) ((a).pte == (b).pte) 138 139 #endif /* !__ASSEMBLY__ */ 140 141 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) 142 #define PMD_MASK (~(PMD_SIZE - 1)) 143 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) 144 #define PUD_MASK (~(PUD_SIZE - 1)) 145 #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) 146 #define PGDIR_MASK (~(PGDIR_SIZE - 1)) 147 148 149 #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) 150 #define VMALLOC_START _AC(0xffffc20000000000, UL) 151 #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) 152 #define VMEMMAP_START _AC(0xffffe20000000000, UL) 153 #define MODULES_VADDR _AC(0xffffffffa0000000, UL) 154 #define MODULES_END _AC(0xffffffffff000000, UL) 155 #define MODULES_LEN (MODULES_END - MODULES_VADDR) 156 157 #ifndef __ASSEMBLY__ 158 159 static inline int pgd_bad(pgd_t pgd) 160 { 161 return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; 162 } 163 164 static inline int pud_bad(pud_t pud) 165 { 166 return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; 167 } 168 169 static inline int pmd_bad(pmd_t pmd) 170 { 171 return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; 172 } 173 174 #define pte_none(x) (!pte_val((x))) 175 #define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE)) 176 177 #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ 178 179 /* 180 * Conversion functions: convert a page and protection to a page entry, 181 * and a page entry and page directory to the page they refer to. 182 */ 183 184 /* 185 * Level 4 access. 186 */ 187 #define pgd_page_vaddr(pgd) \ 188 ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK)) 189 #define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT)) 190 #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT) 191 static inline int pgd_large(pgd_t pgd) { return 0; } 192 #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE) 193 194 /* PUD - Level3 access */ 195 /* to find an entry in a page-table-directory. */ 196 #define pud_page_vaddr(pud) \ 197 ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK)) 198 #define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT)) 199 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 200 #define pud_offset(pgd, address) \ 201 ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address))) 202 #define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT) 203 204 static inline int pud_large(pud_t pte) 205 { 206 return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == 207 (_PAGE_PSE | _PAGE_PRESENT); 208 } 209 210 /* PMD - Level 2 access */ 211 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_PFN_MASK)) 212 #define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) 213 214 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) 215 #define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \ 216 pmd_index(address)) 217 #define pmd_none(x) (!pmd_val((x))) 218 #define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) 219 #define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot)))) 220 #define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) 221 222 #define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) 223 #define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \ 224 _PAGE_FILE }) 225 #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT 226 227 /* PTE - Level 1 access. */ 228 229 /* page, protection -> pte */ 230 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot)) 231 232 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 233 #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ 234 pte_index((address))) 235 236 /* x86-64 always has all page tables mapped. */ 237 #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) 238 #define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address)) 239 #define pte_unmap(pte) /* NOP */ 240 #define pte_unmap_nested(pte) /* NOP */ 241 242 #define update_mmu_cache(vma, address, pte) do { } while (0) 243 244 extern int direct_gbpages; 245 246 /* Encode and de-code a swap entry */ 247 #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE 248 #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) 249 #define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1) 250 #else 251 #define SWP_TYPE_BITS (_PAGE_BIT_PROTNONE - _PAGE_BIT_PRESENT - 1) 252 #define SWP_OFFSET_SHIFT (_PAGE_BIT_FILE + 1) 253 #endif 254 255 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) 256 257 #define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \ 258 & ((1U << SWP_TYPE_BITS) - 1)) 259 #define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT) 260 #define __swp_entry(type, offset) ((swp_entry_t) { \ 261 ((type) << (_PAGE_BIT_PRESENT + 1)) \ 262 | ((offset) << SWP_OFFSET_SHIFT) }) 263 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) 264 #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) 265 266 extern int kern_addr_valid(unsigned long addr); 267 extern void cleanup_highmap(void); 268 269 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 270 remap_pfn_range(vma, vaddr, pfn, size, prot) 271 272 #define HAVE_ARCH_UNMAPPED_AREA 273 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 274 275 #define pgtable_cache_init() do { } while (0) 276 #define check_pgt_cache() do { } while (0) 277 278 #define PAGE_AGP PAGE_KERNEL_NOCACHE 279 #define HAVE_PAGE_AGP 1 280 281 /* fs/proc/kcore.c */ 282 #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) 283 #define kc_offset_to_vaddr(o) \ 284 (((o) & (1UL << (__VIRTUAL_MASK_SHIFT - 1))) \ 285 ? ((o) | ~__VIRTUAL_MASK) \ 286 : (o)) 287 288 #define __HAVE_ARCH_PTE_SAME 289 #endif /* !__ASSEMBLY__ */ 290 291 #endif /* _ASM_X86_PGTABLE_64_H */ 292