1 /* 2 * arch/arm/include/asm/pgtable-2level.h 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #ifndef _ASM_PGTABLE_2LEVEL_H 11 #define _ASM_PGTABLE_2LEVEL_H 12 13 #define __PAGETABLE_PMD_FOLDED 14 15 /* 16 * Hardware-wise, we have a two level page table structure, where the first 17 * level has 4096 entries, and the second level has 256 entries. Each entry 18 * is one 32-bit word. Most of the bits in the second level entry are used 19 * by hardware, and there aren't any "accessed" and "dirty" bits. 20 * 21 * Linux on the other hand has a three level page table structure, which can 22 * be wrapped to fit a two level page table structure easily - using the PGD 23 * and PTE only. However, Linux also expects one "PTE" table per page, and 24 * at least a "dirty" bit. 25 * 26 * Therefore, we tweak the implementation slightly - we tell Linux that we 27 * have 2048 entries in the first level, each of which is 8 bytes (iow, two 28 * hardware pointers to the second level.) The second level contains two 29 * hardware PTE tables arranged contiguously, preceded by Linux versions 30 * which contain the state information Linux needs. We, therefore, end up 31 * with 512 entries in the "PTE" level. 32 * 33 * This leads to the page tables having the following layout: 34 * 35 * pgd pte 36 * | | 37 * +--------+ 38 * | | +------------+ +0 39 * +- - - - + | Linux pt 0 | 40 * | | +------------+ +1024 41 * +--------+ +0 | Linux pt 1 | 42 * | |-----> +------------+ +2048 43 * +- - - - + +4 | h/w pt 0 | 44 * | |-----> +------------+ +3072 45 * +--------+ +8 | h/w pt 1 | 46 * | | +------------+ +4096 47 * 48 * See L_PTE_xxx below for definitions of bits in the "Linux pt", and 49 * PTE_xxx for definitions of bits appearing in the "h/w pt". 50 * 51 * PMD_xxx definitions refer to bits in the first level page table. 52 * 53 * The "dirty" bit is emulated by only granting hardware write permission 54 * iff the page is marked "writable" and "dirty" in the Linux PTE. This 55 * means that a write to a clean page will cause a permission fault, and 56 * the Linux MM layer will mark the page dirty via handle_pte_fault(). 57 * For the hardware to notice the permission change, the TLB entry must 58 * be flushed, and ptep_set_access_flags() does that for us. 59 * 60 * The "accessed" or "young" bit is emulated by a similar method; we only 61 * allow accesses to the page if the "young" bit is set. Accesses to the 62 * page will cause a fault, and handle_pte_fault() will set the young bit 63 * for us as long as the page is marked present in the corresponding Linux 64 * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is 65 * up to date. 66 * 67 * However, when the "young" bit is cleared, we deny access to the page 68 * by clearing the hardware PTE. Currently Linux does not flush the TLB 69 * for us in this case, which means the TLB will retain the transation 70 * until either the TLB entry is evicted under pressure, or a context 71 * switch which changes the user space mapping occurs. 72 */ 73 #define PTRS_PER_PTE 512 74 #define PTRS_PER_PMD 1 75 #define PTRS_PER_PGD 2048 76 77 #define PTE_HWTABLE_PTRS (PTRS_PER_PTE) 78 #define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t)) 79 #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32)) 80 81 /* 82 * PMD_SHIFT determines the size of the area a second-level page table can map 83 * PGDIR_SHIFT determines what a third-level page table entry can map 84 */ 85 #define PMD_SHIFT 21 86 #define PGDIR_SHIFT 21 87 88 #define PMD_SIZE (1UL << PMD_SHIFT) 89 #define PMD_MASK (~(PMD_SIZE-1)) 90 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 91 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 92 93 /* 94 * section address mask and size definitions. 95 */ 96 #define SECTION_SHIFT 20 97 #define SECTION_SIZE (1UL << SECTION_SHIFT) 98 #define SECTION_MASK (~(SECTION_SIZE-1)) 99 100 /* 101 * ARMv6 supersection address mask and size definitions. 102 */ 103 #define SUPERSECTION_SHIFT 24 104 #define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT) 105 #define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1)) 106 107 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 108 109 /* 110 * "Linux" PTE definitions. 111 * 112 * We keep two sets of PTEs - the hardware and the linux version. 113 * This allows greater flexibility in the way we map the Linux bits 114 * onto the hardware tables, and allows us to have YOUNG and DIRTY 115 * bits. 116 * 117 * The PTE table pointer refers to the hardware entries; the "Linux" 118 * entries are stored 1024 bytes below. 119 */ 120 #define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */ 121 #define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) 122 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) 123 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) 124 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) 125 #define L_PTE_USER (_AT(pteval_t, 1) << 8) 126 #define L_PTE_XN (_AT(pteval_t, 1) << 9) 127 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ 128 #define L_PTE_NONE (_AT(pteval_t, 1) << 11) 129 130 /* 131 * These are the memory types, defined to be compatible with 132 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB 133 */ 134 #define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ 135 #define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ 136 #define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */ 137 #define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */ 138 #define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */ 139 #define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */ 140 #define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */ 141 #define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ 142 #define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ 143 #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ 144 #define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */ 145 #define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) 146 147 #ifndef __ASSEMBLY__ 148 149 /* 150 * The "pud_xxx()" functions here are trivial when the pmd is folded into 151 * the pud: the pud entry is never bad, always exists, and can't be set or 152 * cleared. 153 */ 154 #define pud_none(pud) (0) 155 #define pud_bad(pud) (0) 156 #define pud_present(pud) (1) 157 #define pud_clear(pudp) do { } while (0) 158 #define set_pud(pud,pudp) do { } while (0) 159 160 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) 161 { 162 return (pmd_t *)pud; 163 } 164 165 #define pmd_large(pmd) (pmd_val(pmd) & 2) 166 #define pmd_bad(pmd) (pmd_val(pmd) & 2) 167 168 #define copy_pmd(pmdpd,pmdps) \ 169 do { \ 170 pmdpd[0] = pmdps[0]; \ 171 pmdpd[1] = pmdps[1]; \ 172 flush_pmd_entry(pmdpd); \ 173 } while (0) 174 175 #define pmd_clear(pmdp) \ 176 do { \ 177 pmdp[0] = __pmd(0); \ 178 pmdp[1] = __pmd(0); \ 179 clean_pmd_entry(pmdp); \ 180 } while (0) 181 182 /* we don't need complex calculations here as the pmd is folded into the pgd */ 183 #define pmd_addr_end(addr,end) (end) 184 185 #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) 186 #define pte_special(pte) (0) 187 static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 188 189 /* 190 * We don't have huge page support for short descriptors, for the moment 191 * define empty stubs for use by pin_page_for_write. 192 */ 193 #define pmd_hugewillfault(pmd) (0) 194 #define pmd_thp_or_huge(pmd) (0) 195 196 #endif /* __ASSEMBLY__ */ 197 198 #endif /* _ASM_PGTABLE_2LEVEL_H */ 199