1 #ifndef _ASM_X86_PGTABLE_64_H 2 #define _ASM_X86_PGTABLE_64_H 3 4 #include <linux/const.h> 5 #include <asm/pgtable_64_types.h> 6 7 #ifndef __ASSEMBLY__ 8 9 /* 10 * This file contains the functions and defines necessary to modify and use 11 * the x86-64 page table tree. 12 */ 13 #include <asm/processor.h> 14 #include <linux/bitops.h> 15 #include <linux/threads.h> 16 17 extern pud_t level3_kernel_pgt[512]; 18 extern pud_t level3_ident_pgt[512]; 19 extern pmd_t level2_kernel_pgt[512]; 20 extern pmd_t level2_fixmap_pgt[512]; 21 extern pmd_t level2_ident_pgt[512]; 22 extern pte_t level1_fixmap_pgt[512]; 23 extern pgd_t init_level4_pgt[]; 24 25 #define swapper_pg_dir init_level4_pgt 26 27 extern void paging_init(void); 28 29 #define pte_ERROR(e) \ 30 pr_err("%s:%d: bad pte %p(%016lx)\n", \ 31 __FILE__, __LINE__, &(e), pte_val(e)) 32 #define pmd_ERROR(e) \ 33 pr_err("%s:%d: bad pmd %p(%016lx)\n", \ 34 __FILE__, __LINE__, &(e), pmd_val(e)) 35 #define pud_ERROR(e) \ 36 pr_err("%s:%d: bad pud %p(%016lx)\n", \ 37 __FILE__, __LINE__, &(e), pud_val(e)) 38 39 #if CONFIG_PGTABLE_LEVELS >= 5 40 #define p4d_ERROR(e) \ 41 pr_err("%s:%d: bad p4d %p(%016lx)\n", \ 42 __FILE__, __LINE__, &(e), p4d_val(e)) 43 #endif 44 45 #define pgd_ERROR(e) \ 46 pr_err("%s:%d: bad pgd %p(%016lx)\n", \ 47 __FILE__, __LINE__, &(e), pgd_val(e)) 48 49 struct mm_struct; 50 51 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte); 52 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); 53 54 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, 55 pte_t *ptep) 56 { 57 *ptep = native_make_pte(0); 58 } 59 60 static inline void native_set_pte(pte_t *ptep, pte_t pte) 61 { 62 *ptep = pte; 63 } 64 65 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) 66 { 67 native_set_pte(ptep, pte); 68 } 69 70 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) 71 { 72 *pmdp = pmd; 73 } 74 75 static inline void native_pmd_clear(pmd_t *pmd) 76 { 77 native_set_pmd(pmd, native_make_pmd(0)); 78 } 79 80 static inline pte_t native_ptep_get_and_clear(pte_t *xp) 81 { 82 #ifdef CONFIG_SMP 83 return native_make_pte(xchg(&xp->pte, 0)); 84 #else 85 /* native_local_ptep_get_and_clear, 86 but duplicated because of cyclic dependency */ 87 pte_t ret = *xp; 88 native_pte_clear(NULL, 0, xp); 89 return ret; 90 #endif 91 } 92 93 static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) 94 { 95 #ifdef CONFIG_SMP 96 return native_make_pmd(xchg(&xp->pmd, 0)); 97 #else 98 /* native_local_pmdp_get_and_clear, 99 but duplicated because of cyclic dependency */ 100 pmd_t ret = *xp; 101 native_pmd_clear(xp); 102 return ret; 103 #endif 104 } 105 106 static inline void native_set_pud(pud_t *pudp, pud_t pud) 107 { 108 *pudp = pud; 109 } 110 111 static inline void native_pud_clear(pud_t *pud) 112 { 113 native_set_pud(pud, native_make_pud(0)); 114 } 115 116 static inline pud_t native_pudp_get_and_clear(pud_t *xp) 117 { 118 #ifdef CONFIG_SMP 119 return native_make_pud(xchg(&xp->pud, 0)); 120 #else 121 /* native_local_pudp_get_and_clear, 122 * but duplicated because of cyclic dependency 123 */ 124 pud_t ret = *xp; 125 126 native_pud_clear(xp); 127 return ret; 128 #endif 129 } 130 131 static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) 132 { 133 *p4dp = p4d; 134 } 135 136 static inline void native_p4d_clear(p4d_t *p4d) 137 { 138 #ifdef CONFIG_X86_5LEVEL 139 native_set_p4d(p4d, native_make_p4d(0)); 140 #else 141 native_set_p4d(p4d, (p4d_t) { .pgd = native_make_pgd(0)}); 142 #endif 143 } 144 145 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) 146 { 147 *pgdp = pgd; 148 } 149 150 static inline void native_pgd_clear(pgd_t *pgd) 151 { 152 native_set_pgd(pgd, native_make_pgd(0)); 153 } 154 155 extern void sync_global_pgds(unsigned long start, unsigned long end); 156 157 /* 158 * Conversion functions: convert a page and protection to a page entry, 159 * and a page entry and page directory to the page they refer to. 160 */ 161 162 /* 163 * Level 4 access. 164 */ 165 static inline int pgd_large(pgd_t pgd) { return 0; } 166 #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE) 167 168 /* PUD - Level3 access */ 169 170 /* PMD - Level 2 access */ 171 172 /* PTE - Level 1 access. */ 173 174 /* x86-64 always has all page tables mapped. */ 175 #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) 176 #define pte_unmap(pte) ((void)(pte))/* NOP */ 177 178 /* 179 * Encode and de-code a swap entry 180 * 181 * | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number 182 * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names 183 * | OFFSET (14->63) | TYPE (9-13) |0|X|X|X| X| X|X|X|0| <- swp entry 184 * 185 * G (8) is aliased and used as a PROT_NONE indicator for 186 * !present ptes. We need to start storing swap entries above 187 * there. We also need to avoid using A and D because of an 188 * erratum where they can be incorrectly set by hardware on 189 * non-present PTEs. 190 */ 191 #define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) 192 #define SWP_TYPE_BITS 5 193 /* Place the offset above the type: */ 194 #define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS) 195 196 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) 197 198 #define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \ 199 & ((1U << SWP_TYPE_BITS) - 1)) 200 #define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT) 201 #define __swp_entry(type, offset) ((swp_entry_t) { \ 202 ((type) << (SWP_TYPE_FIRST_BIT)) \ 203 | ((offset) << SWP_OFFSET_FIRST_BIT) }) 204 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) 205 #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) 206 207 extern int kern_addr_valid(unsigned long addr); 208 extern void cleanup_highmap(void); 209 210 #define HAVE_ARCH_UNMAPPED_AREA 211 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 212 213 #define pgtable_cache_init() do { } while (0) 214 #define check_pgt_cache() do { } while (0) 215 216 #define PAGE_AGP PAGE_KERNEL_NOCACHE 217 #define HAVE_PAGE_AGP 1 218 219 /* fs/proc/kcore.c */ 220 #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) 221 #define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK) 222 223 #define __HAVE_ARCH_PTE_SAME 224 225 #define vmemmap ((struct page *)VMEMMAP_START) 226 227 extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); 228 extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); 229 230 #endif /* !__ASSEMBLY__ */ 231 232 #endif /* _ASM_X86_PGTABLE_64_H */ 233