1 /* 2 * arch/arm/include/asm/pgtable.h 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #ifndef _ASMARM_PGTABLE_H 11 #define _ASMARM_PGTABLE_H 12 13 #include <asm-generic/4level-fixup.h> 14 #include <asm/proc-fns.h> 15 16 #ifndef CONFIG_MMU 17 18 #include "pgtable-nommu.h" 19 20 #else 21 22 #include <asm/memory.h> 23 #include <mach/vmalloc.h> 24 #include <asm/pgtable-hwdef.h> 25 26 /* 27 * Just any arbitrary offset to the start of the vmalloc VM area: the 28 * current 8MB value just means that there will be a 8MB "hole" after the 29 * physical memory until the kernel virtual memory starts. That means that 30 * any out-of-bounds memory accesses will hopefully be caught. 31 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 32 * area for the same reason. ;) 33 * 34 * Note that platforms may override VMALLOC_START, but they must provide 35 * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space, 36 * which may not overlap IO space. 37 */ 38 #ifndef VMALLOC_START 39 #define VMALLOC_OFFSET (8*1024*1024) 40 #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 41 #endif 42 43 /* 44 * Hardware-wise, we have a two level page table structure, where the first 45 * level has 4096 entries, and the second level has 256 entries. Each entry 46 * is one 32-bit word. Most of the bits in the second level entry are used 47 * by hardware, and there aren't any "accessed" and "dirty" bits. 48 * 49 * Linux on the other hand has a three level page table structure, which can 50 * be wrapped to fit a two level page table structure easily - using the PGD 51 * and PTE only. However, Linux also expects one "PTE" table per page, and 52 * at least a "dirty" bit. 53 * 54 * Therefore, we tweak the implementation slightly - we tell Linux that we 55 * have 2048 entries in the first level, each of which is 8 bytes (iow, two 56 * hardware pointers to the second level.) The second level contains two 57 * hardware PTE tables arranged contiguously, followed by Linux versions 58 * which contain the state information Linux needs. We, therefore, end up 59 * with 512 entries in the "PTE" level. 60 * 61 * This leads to the page tables having the following layout: 62 * 63 * pgd pte 64 * | | 65 * +--------+ +0 66 * | |-----> +------------+ +0 67 * +- - - - + +4 | h/w pt 0 | 68 * | |-----> +------------+ +1024 69 * +--------+ +8 | h/w pt 1 | 70 * | | +------------+ +2048 71 * +- - - - + | Linux pt 0 | 72 * | | +------------+ +3072 73 * +--------+ | Linux pt 1 | 74 * | | +------------+ +4096 75 * 76 * See L_PTE_xxx below for definitions of bits in the "Linux pt", and 77 * PTE_xxx for definitions of bits appearing in the "h/w pt". 78 * 79 * PMD_xxx definitions refer to bits in the first level page table. 80 * 81 * The "dirty" bit is emulated by only granting hardware write permission 82 * iff the page is marked "writable" and "dirty" in the Linux PTE. This 83 * means that a write to a clean page will cause a permission fault, and 84 * the Linux MM layer will mark the page dirty via handle_pte_fault(). 85 * For the hardware to notice the permission change, the TLB entry must 86 * be flushed, and ptep_set_access_flags() does that for us. 87 * 88 * The "accessed" or "young" bit is emulated by a similar method; we only 89 * allow accesses to the page if the "young" bit is set. Accesses to the 90 * page will cause a fault, and handle_pte_fault() will set the young bit 91 * for us as long as the page is marked present in the corresponding Linux 92 * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is 93 * up to date. 94 * 95 * However, when the "young" bit is cleared, we deny access to the page 96 * by clearing the hardware PTE. Currently Linux does not flush the TLB 97 * for us in this case, which means the TLB will retain the transation 98 * until either the TLB entry is evicted under pressure, or a context 99 * switch which changes the user space mapping occurs. 100 */ 101 #define PTRS_PER_PTE 512 102 #define PTRS_PER_PMD 1 103 #define PTRS_PER_PGD 2048 104 105 /* 106 * PMD_SHIFT determines the size of the area a second-level page table can map 107 * PGDIR_SHIFT determines what a third-level page table entry can map 108 */ 109 #define PMD_SHIFT 21 110 #define PGDIR_SHIFT 21 111 112 #define LIBRARY_TEXT_START 0x0c000000 113 114 #ifndef __ASSEMBLY__ 115 extern void __pte_error(const char *file, int line, unsigned long val); 116 extern void __pmd_error(const char *file, int line, unsigned long val); 117 extern void __pgd_error(const char *file, int line, unsigned long val); 118 119 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) 120 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) 121 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) 122 #endif /* !__ASSEMBLY__ */ 123 124 #define PMD_SIZE (1UL << PMD_SHIFT) 125 #define PMD_MASK (~(PMD_SIZE-1)) 126 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 127 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 128 129 /* 130 * This is the lowest virtual address we can permit any user space 131 * mapping to be mapped at. This is particularly important for 132 * non-high vector CPUs. 133 */ 134 #define FIRST_USER_ADDRESS PAGE_SIZE 135 136 #define FIRST_USER_PGD_NR 1 137 #define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR) 138 139 /* 140 * section address mask and size definitions. 141 */ 142 #define SECTION_SHIFT 20 143 #define SECTION_SIZE (1UL << SECTION_SHIFT) 144 #define SECTION_MASK (~(SECTION_SIZE-1)) 145 146 /* 147 * ARMv6 supersection address mask and size definitions. 148 */ 149 #define SUPERSECTION_SHIFT 24 150 #define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT) 151 #define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1)) 152 153 /* 154 * "Linux" PTE definitions. 155 * 156 * We keep two sets of PTEs - the hardware and the linux version. 157 * This allows greater flexibility in the way we map the Linux bits 158 * onto the hardware tables, and allows us to have YOUNG and DIRTY 159 * bits. 160 * 161 * The PTE table pointer refers to the hardware entries; the "Linux" 162 * entries are stored 1024 bytes below. 163 */ 164 #define L_PTE_PRESENT (1 << 0) 165 #define L_PTE_YOUNG (1 << 1) 166 #define L_PTE_FILE (1 << 2) /* only when !PRESENT */ 167 #define L_PTE_DIRTY (1 << 6) 168 #define L_PTE_WRITE (1 << 7) 169 #define L_PTE_USER (1 << 8) 170 #define L_PTE_EXEC (1 << 9) 171 #define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */ 172 173 /* 174 * These are the memory types, defined to be compatible with 175 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB 176 */ 177 #define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */ 178 #define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */ 179 #define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */ 180 #define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */ 181 #define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */ 182 #define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */ 183 #define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */ 184 #define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */ 185 #define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */ 186 #define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */ 187 #define L_PTE_MT_MASK (0x0f << 2) 188 189 #ifndef __ASSEMBLY__ 190 191 /* 192 * The pgprot_* and protection_map entries will be fixed up in runtime 193 * to include the cachable and bufferable bits based on memory policy, 194 * as well as any architecture dependent bits like global/ASID and SMP 195 * shared mapping bits. 196 */ 197 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG 198 199 extern pgprot_t pgprot_user; 200 extern pgprot_t pgprot_kernel; 201 202 #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) 203 204 #define PAGE_NONE pgprot_user 205 #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE) 206 #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) 207 #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER) 208 #define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) 209 #define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER) 210 #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) 211 #define PAGE_KERNEL pgprot_kernel 212 #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC) 213 214 #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT) 215 #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE) 216 #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) 217 #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) 218 #define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) 219 #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) 220 #define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) 221 222 #endif /* __ASSEMBLY__ */ 223 224 /* 225 * The table below defines the page protection levels that we insert into our 226 * Linux page table version. These get translated into the best that the 227 * architecture can perform. Note that on most ARM hardware: 228 * 1) We cannot do execute protection 229 * 2) If we could do execute protection, then read is implied 230 * 3) write implies read permissions 231 */ 232 #define __P000 __PAGE_NONE 233 #define __P001 __PAGE_READONLY 234 #define __P010 __PAGE_COPY 235 #define __P011 __PAGE_COPY 236 #define __P100 __PAGE_READONLY_EXEC 237 #define __P101 __PAGE_READONLY_EXEC 238 #define __P110 __PAGE_COPY_EXEC 239 #define __P111 __PAGE_COPY_EXEC 240 241 #define __S000 __PAGE_NONE 242 #define __S001 __PAGE_READONLY 243 #define __S010 __PAGE_SHARED 244 #define __S011 __PAGE_SHARED 245 #define __S100 __PAGE_READONLY_EXEC 246 #define __S101 __PAGE_READONLY_EXEC 247 #define __S110 __PAGE_SHARED_EXEC 248 #define __S111 __PAGE_SHARED_EXEC 249 250 #ifndef __ASSEMBLY__ 251 /* 252 * ZERO_PAGE is a global shared page that is always zero: used 253 * for zero-mapped memory areas etc.. 254 */ 255 extern struct page *empty_zero_page; 256 #define ZERO_PAGE(vaddr) (empty_zero_page) 257 258 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 259 #define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) 260 261 #define pte_none(pte) (!pte_val(pte)) 262 #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) 263 #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 264 #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) 265 266 #define pte_offset_map(dir,addr) (__pte_map(dir) + __pte_index(addr)) 267 #define pte_unmap(pte) __pte_unmap(pte) 268 269 #ifndef CONFIG_HIGHPTE 270 #define __pte_map(dir) pmd_page_vaddr(*(dir)) 271 #define __pte_unmap(pte) do { } while (0) 272 #else 273 #define __pte_map(dir) ((pte_t *)kmap_atomic(pmd_page(*(dir))) + PTRS_PER_PTE) 274 #define __pte_unmap(pte) kunmap_atomic((pte - PTRS_PER_PTE)) 275 #endif 276 277 #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) 278 279 #if __LINUX_ARM_ARCH__ < 6 280 static inline void __sync_icache_dcache(pte_t pteval) 281 { 282 } 283 #else 284 extern void __sync_icache_dcache(pte_t pteval); 285 #endif 286 287 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 288 pte_t *ptep, pte_t pteval) 289 { 290 if (addr >= TASK_SIZE) 291 set_pte_ext(ptep, pteval, 0); 292 else { 293 __sync_icache_dcache(pteval); 294 set_pte_ext(ptep, pteval, PTE_EXT_NG); 295 } 296 } 297 298 /* 299 * The following only work if pte_present() is true. 300 * Undefined behaviour if not.. 301 */ 302 #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) 303 #define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) 304 #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) 305 #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) 306 #define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC) 307 #define pte_special(pte) (0) 308 309 #define pte_present_user(pte) \ 310 ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ 311 (L_PTE_PRESENT | L_PTE_USER)) 312 313 #define PTE_BIT_FUNC(fn,op) \ 314 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } 315 316 PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE); 317 PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE); 318 PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); 319 PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); 320 PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); 321 PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); 322 323 static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 324 325 #define __pgprot_modify(prot,mask,bits) \ 326 __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 327 328 /* 329 * Mark the prot value as uncacheable and unbufferable. 330 */ 331 #define pgprot_noncached(prot) \ 332 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) 333 #define pgprot_writecombine(prot) \ 334 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) 335 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 336 #define pgprot_dmacoherent(prot) \ 337 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) 338 #define __HAVE_PHYS_MEM_ACCESS_PROT 339 struct file; 340 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 341 unsigned long size, pgprot_t vma_prot); 342 #else 343 #define pgprot_dmacoherent(prot) \ 344 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) 345 #endif 346 347 #define pmd_none(pmd) (!pmd_val(pmd)) 348 #define pmd_present(pmd) (pmd_val(pmd)) 349 #define pmd_bad(pmd) (pmd_val(pmd) & 2) 350 351 #define copy_pmd(pmdpd,pmdps) \ 352 do { \ 353 pmdpd[0] = pmdps[0]; \ 354 pmdpd[1] = pmdps[1]; \ 355 flush_pmd_entry(pmdpd); \ 356 } while (0) 357 358 #define pmd_clear(pmdp) \ 359 do { \ 360 pmdp[0] = __pmd(0); \ 361 pmdp[1] = __pmd(0); \ 362 clean_pmd_entry(pmdp); \ 363 } while (0) 364 365 static inline pte_t *pmd_page_vaddr(pmd_t pmd) 366 { 367 unsigned long ptr; 368 369 ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1); 370 ptr += PTRS_PER_PTE * sizeof(void *); 371 372 return __va(ptr); 373 } 374 375 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) 376 377 /* 378 * Conversion functions: convert a page and protection to a page entry, 379 * and a page entry and page directory to the page they refer to. 380 */ 381 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) 382 383 /* 384 * The "pgd_xxx()" functions here are trivial for a folded two-level 385 * setup: the pgd is never bad, and a pmd always exists (as it's folded 386 * into the pgd entry) 387 */ 388 #define pgd_none(pgd) (0) 389 #define pgd_bad(pgd) (0) 390 #define pgd_present(pgd) (1) 391 #define pgd_clear(pgdp) do { } while (0) 392 #define set_pgd(pgd,pgdp) do { } while (0) 393 394 /* to find an entry in a page-table-directory */ 395 #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) 396 397 #define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) 398 399 /* to find an entry in a kernel page-table-directory */ 400 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 401 402 /* Find an entry in the second-level page table.. */ 403 #define pmd_offset(dir, addr) ((pmd_t *)(dir)) 404 405 /* Find an entry in the third-level page table.. */ 406 #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 407 408 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 409 { 410 const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER; 411 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 412 return pte; 413 } 414 415 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 416 417 /* 418 * Encode and decode a swap entry. Swap entries are stored in the Linux 419 * page tables as follows: 420 * 421 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 422 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 423 * <--------------- offset --------------------> <- type --> 0 0 0 424 * 425 * This gives us up to 63 swap files and 32GB per swap file. Note that 426 * the offset field is always non-zero. 427 */ 428 #define __SWP_TYPE_SHIFT 3 429 #define __SWP_TYPE_BITS 6 430 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 431 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 432 433 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 434 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 435 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 436 437 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 438 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 439 440 /* 441 * It is an error for the kernel to have more swap files than we can 442 * encode in the PTEs. This ensures that we know when MAX_SWAPFILES 443 * is increased beyond what we presently support. 444 */ 445 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 446 447 /* 448 * Encode and decode a file entry. File entries are stored in the Linux 449 * page tables as follows: 450 * 451 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 452 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 453 * <----------------------- offset ------------------------> 1 0 0 454 */ 455 #define pte_file(pte) (pte_val(pte) & L_PTE_FILE) 456 #define pte_to_pgoff(x) (pte_val(x) >> 3) 457 #define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE) 458 459 #define PTE_FILE_MAX_BITS 29 460 461 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 462 /* FIXME: this is not correct */ 463 #define kern_addr_valid(addr) (1) 464 465 #include <asm-generic/pgtable.h> 466 467 /* 468 * We provide our own arch_get_unmapped_area to cope with VIPT caches. 469 */ 470 #define HAVE_ARCH_UNMAPPED_AREA 471 472 /* 473 * remap a physical page `pfn' of size `size' with page protection `prot' 474 * into virtual address `from' 475 */ 476 #define io_remap_pfn_range(vma,from,pfn,size,prot) \ 477 remap_pfn_range(vma, from, pfn, size, prot) 478 479 #define pgtable_cache_init() do { } while (0) 480 481 #endif /* !__ASSEMBLY__ */ 482 483 #endif /* CONFIG_MMU */ 484 485 #endif /* _ASMARM_PGTABLE_H */ 486