1 /* 2 * OpenRISC Linux 3 * 4 * Linux architectural port borrowing liberally from similar works of 5 * others. All original copyrights apply as per the original source 6 * declaration. 7 * 8 * OpenRISC implementation: 9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> 10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> 11 * et al. 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 */ 18 19 /* or32 pgtable.h - macros and functions to manipulate page tables 20 * 21 * Based on: 22 * include/asm-cris/pgtable.h 23 */ 24 25 #ifndef __ASM_OPENRISC_PGTABLE_H 26 #define __ASM_OPENRISC_PGTABLE_H 27 28 #define __ARCH_USE_5LEVEL_HACK 29 #include <asm-generic/pgtable-nopmd.h> 30 31 #ifndef __ASSEMBLY__ 32 #include <asm/mmu.h> 33 #include <asm/fixmap.h> 34 35 /* 36 * The Linux memory management assumes a three-level page table setup. On 37 * or32, we use that, but "fold" the mid level into the top-level page 38 * table. Since the MMU TLB is software loaded through an interrupt, it 39 * supports any page table structure, so we could have used a three-level 40 * setup, but for the amounts of memory we normally use, a two-level is 41 * probably more efficient. 42 * 43 * This file contains the functions and defines necessary to modify and use 44 * the or32 page table tree. 45 */ 46 47 extern void paging_init(void); 48 49 /* Certain architectures need to do special things when pte's 50 * within a page table are directly modified. Thus, the following 51 * hook is made available. 52 */ 53 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) 54 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) 55 /* 56 * (pmds are folded into pgds so this doesn't get actually called, 57 * but the define is needed for a generic inline function.) 58 */ 59 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) 60 61 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2)) 62 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 63 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 64 65 /* 66 * entries per page directory level: we use a two-level, so 67 * we don't really have any PMD directory physically. 68 * pointers are 4 bytes so we can use the page size and 69 * divide it by 4 (shift by 2). 70 */ 71 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2)) 72 73 #define PTRS_PER_PGD (1UL << (32-PGDIR_SHIFT)) 74 75 /* calculate how many PGD entries a user-level program can use 76 * the first mappable virtual address is 0 77 * (TASK_SIZE is the maximum virtual address space) 78 */ 79 80 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) 81 #define FIRST_USER_ADDRESS 0UL 82 83 /* 84 * Kernels own virtual memory area. 85 */ 86 87 /* 88 * The size and location of the vmalloc area are chosen so that modules 89 * placed in this area aren't more than a 28-bit signed offset from any 90 * kernel functions that they may need. This greatly simplifies handling 91 * of the relocations for l.j and l.jal instructions as we don't need to 92 * introduce any trampolines for reaching "distant" code. 93 * 94 * 64 MB of vmalloc area is comparable to what's available on other arches. 95 */ 96 97 #define VMALLOC_START (PAGE_OFFSET-0x04000000) 98 #define VMALLOC_END (PAGE_OFFSET) 99 #define VMALLOC_VMADDR(x) ((unsigned long)(x)) 100 101 /* Define some higher level generic page attributes. 102 * 103 * If you change _PAGE_CI definition be sure to change it in 104 * io.h for ioremap_nocache() too. 105 */ 106 107 /* 108 * An OR32 PTE looks like this: 109 * 110 * | 31 ... 10 | 9 | 8 ... 6 | 5 | 4 | 3 | 2 | 1 | 0 | 111 * Phys pg.num L PP Index D A WOM WBC CI CC 112 * 113 * L : link 114 * PPI: Page protection index 115 * D : Dirty 116 * A : Accessed 117 * WOM: Weakly ordered memory 118 * WBC: Write-back cache 119 * CI : Cache inhibit 120 * CC : Cache coherent 121 * 122 * The protection bits below should correspond to the layout of the actual 123 * PTE as per above 124 */ 125 126 #define _PAGE_CC 0x001 /* software: pte contains a translation */ 127 #define _PAGE_CI 0x002 /* cache inhibit */ 128 #define _PAGE_WBC 0x004 /* write back cache */ 129 #define _PAGE_WOM 0x008 /* weakly ordered memory */ 130 131 #define _PAGE_A 0x010 /* accessed */ 132 #define _PAGE_D 0x020 /* dirty */ 133 #define _PAGE_URE 0x040 /* user read enable */ 134 #define _PAGE_UWE 0x080 /* user write enable */ 135 136 #define _PAGE_SRE 0x100 /* superuser read enable */ 137 #define _PAGE_SWE 0x200 /* superuser write enable */ 138 #define _PAGE_EXEC 0x400 /* software: page is executable */ 139 #define _PAGE_U_SHARED 0x800 /* software: page is shared in user space */ 140 141 /* 0x001 is cache coherency bit, which should always be set to 142 * 1 - for SMP (when we support it) 143 * 0 - otherwise 144 * 145 * we just reuse this bit in software for _PAGE_PRESENT and 146 * force it to 0 when loading it into TLB. 147 */ 148 #define _PAGE_PRESENT _PAGE_CC 149 #define _PAGE_USER _PAGE_URE 150 #define _PAGE_WRITE (_PAGE_UWE | _PAGE_SWE) 151 #define _PAGE_DIRTY _PAGE_D 152 #define _PAGE_ACCESSED _PAGE_A 153 #define _PAGE_NO_CACHE _PAGE_CI 154 #define _PAGE_SHARED _PAGE_U_SHARED 155 #define _PAGE_READ (_PAGE_URE | _PAGE_SRE) 156 157 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 158 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) 159 #define _PAGE_ALL (_PAGE_PRESENT | _PAGE_ACCESSED) 160 #define _KERNPG_TABLE \ 161 (_PAGE_BASE | _PAGE_SRE | _PAGE_SWE | _PAGE_ACCESSED | _PAGE_DIRTY) 162 163 #define PAGE_NONE __pgprot(_PAGE_ALL) 164 #define PAGE_READONLY __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE) 165 #define PAGE_READONLY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC) 166 #define PAGE_SHARED \ 167 __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \ 168 | _PAGE_SHARED) 169 #define PAGE_SHARED_X \ 170 __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \ 171 | _PAGE_SHARED | _PAGE_EXEC) 172 #define PAGE_COPY __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE) 173 #define PAGE_COPY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC) 174 175 #define PAGE_KERNEL \ 176 __pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \ 177 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC) 178 #define PAGE_KERNEL_RO \ 179 __pgprot(_PAGE_ALL | _PAGE_SRE \ 180 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC) 181 #define PAGE_KERNEL_NOCACHE \ 182 __pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \ 183 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI) 184 185 #define __P000 PAGE_NONE 186 #define __P001 PAGE_READONLY_X 187 #define __P010 PAGE_COPY 188 #define __P011 PAGE_COPY_X 189 #define __P100 PAGE_READONLY 190 #define __P101 PAGE_READONLY_X 191 #define __P110 PAGE_COPY 192 #define __P111 PAGE_COPY_X 193 194 #define __S000 PAGE_NONE 195 #define __S001 PAGE_READONLY_X 196 #define __S010 PAGE_SHARED 197 #define __S011 PAGE_SHARED_X 198 #define __S100 PAGE_READONLY 199 #define __S101 PAGE_READONLY_X 200 #define __S110 PAGE_SHARED 201 #define __S111 PAGE_SHARED_X 202 203 /* zero page used for uninitialized stuff */ 204 extern unsigned long empty_zero_page[2048]; 205 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 206 207 /* number of bits that fit into a memory pointer */ 208 #define BITS_PER_PTR (8*sizeof(unsigned long)) 209 210 /* to align the pointer to a pointer address */ 211 #define PTR_MASK (~(sizeof(void *)-1)) 212 213 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ 214 /* 64-bit machines, beware! SRB. */ 215 #define SIZEOF_PTR_LOG2 2 216 217 /* to find an entry in a page-table */ 218 #define PAGE_PTR(address) \ 219 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) 220 221 /* to set the page-dir */ 222 #define SET_PAGE_DIR(tsk, pgdir) 223 224 #define pte_none(x) (!pte_val(x)) 225 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 226 #define pte_clear(mm, addr, xp) do { pte_val(*(xp)) = 0; } while (0) 227 228 #define pmd_none(x) (!pmd_val(x)) 229 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK)) != _KERNPG_TABLE) 230 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) 231 #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) 232 233 /* 234 * The following only work if pte_present() is true. 235 * Undefined behaviour if not.. 236 */ 237 238 static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; } 239 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 240 static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } 241 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 242 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 243 static inline int pte_special(pte_t pte) { return 0; } 244 static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 245 246 static inline pte_t pte_wrprotect(pte_t pte) 247 { 248 pte_val(pte) &= ~(_PAGE_WRITE); 249 return pte; 250 } 251 252 static inline pte_t pte_rdprotect(pte_t pte) 253 { 254 pte_val(pte) &= ~(_PAGE_READ); 255 return pte; 256 } 257 258 static inline pte_t pte_exprotect(pte_t pte) 259 { 260 pte_val(pte) &= ~(_PAGE_EXEC); 261 return pte; 262 } 263 264 static inline pte_t pte_mkclean(pte_t pte) 265 { 266 pte_val(pte) &= ~(_PAGE_DIRTY); 267 return pte; 268 } 269 270 static inline pte_t pte_mkold(pte_t pte) 271 { 272 pte_val(pte) &= ~(_PAGE_ACCESSED); 273 return pte; 274 } 275 276 static inline pte_t pte_mkwrite(pte_t pte) 277 { 278 pte_val(pte) |= _PAGE_WRITE; 279 return pte; 280 } 281 282 static inline pte_t pte_mkread(pte_t pte) 283 { 284 pte_val(pte) |= _PAGE_READ; 285 return pte; 286 } 287 288 static inline pte_t pte_mkexec(pte_t pte) 289 { 290 pte_val(pte) |= _PAGE_EXEC; 291 return pte; 292 } 293 294 static inline pte_t pte_mkdirty(pte_t pte) 295 { 296 pte_val(pte) |= _PAGE_DIRTY; 297 return pte; 298 } 299 300 static inline pte_t pte_mkyoung(pte_t pte) 301 { 302 pte_val(pte) |= _PAGE_ACCESSED; 303 return pte; 304 } 305 306 /* 307 * Conversion functions: convert a page and protection to a page entry, 308 * and a page entry and page directory to the page they refer to. 309 */ 310 311 /* What actually goes as arguments to the various functions is less than 312 * obvious, but a rule of thumb is that struct page's goes as struct page *, 313 * really physical DRAM addresses are unsigned long's, and DRAM "virtual" 314 * addresses (the 0xc0xxxxxx's) goes as void *'s. 315 */ 316 317 static inline pte_t __mk_pte(void *page, pgprot_t pgprot) 318 { 319 pte_t pte; 320 /* the PTE needs a physical address */ 321 pte_val(pte) = __pa(page) | pgprot_val(pgprot); 322 return pte; 323 } 324 325 #define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot)) 326 327 #define mk_pte_phys(physpage, pgprot) \ 328 ({ \ 329 pte_t __pte; \ 330 \ 331 pte_val(__pte) = (physpage) + pgprot_val(pgprot); \ 332 __pte; \ 333 }) 334 335 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 336 { 337 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); 338 return pte; 339 } 340 341 342 /* 343 * pte_val refers to a page in the 0x0xxxxxxx physical DRAM interval 344 * __pte_page(pte_val) refers to the "virtual" DRAM interval 345 * pte_pagenr refers to the page-number counted starting from the virtual 346 * DRAM start 347 */ 348 349 static inline unsigned long __pte_page(pte_t pte) 350 { 351 /* the PTE contains a physical address */ 352 return (unsigned long)__va(pte_val(pte) & PAGE_MASK); 353 } 354 355 #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT) 356 357 /* permanent address of a page */ 358 359 #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)) 360 #define pte_page(pte) (mem_map+pte_pagenr(pte)) 361 362 /* 363 * only the pte's themselves need to point to physical DRAM (see above) 364 * the pagetable links are purely handled within the kernel SW and thus 365 * don't need the __pa and __va transformations. 366 */ 367 static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) 368 { 369 pmd_val(*pmdp) = _KERNPG_TABLE | (unsigned long) ptep; 370 } 371 372 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) 373 #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 374 375 /* to find an entry in a page-table-directory. */ 376 #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 377 378 #define __pgd_offset(address) pgd_index(address) 379 380 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) 381 382 /* to find an entry in a kernel page-table-directory */ 383 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 384 385 #define __pmd_offset(address) \ 386 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 387 388 /* 389 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] 390 * 391 * this macro returns the index of the entry in the pte page which would 392 * control the given virtual address 393 */ 394 #define __pte_offset(address) \ 395 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 396 #define pte_offset_kernel(dir, address) \ 397 ((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(address)) 398 #define pte_offset_map(dir, address) \ 399 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) 400 #define pte_offset_map_nested(dir, address) \ 401 pte_offset_map(dir, address) 402 403 #define pte_unmap(pte) do { } while (0) 404 #define pte_unmap_nested(pte) do { } while (0) 405 #define pte_pfn(x) ((unsigned long)(((x).pte)) >> PAGE_SHIFT) 406 #define pfn_pte(pfn, prot) __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot)) 407 408 #define pte_ERROR(e) \ 409 printk(KERN_ERR "%s:%d: bad pte %p(%08lx).\n", \ 410 __FILE__, __LINE__, &(e), pte_val(e)) 411 #define pgd_ERROR(e) \ 412 printk(KERN_ERR "%s:%d: bad pgd %p(%08lx).\n", \ 413 __FILE__, __LINE__, &(e), pgd_val(e)) 414 415 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */ 416 417 /* 418 * or32 doesn't have any external MMU info: the kernel page 419 * tables contain all the necessary information. 420 * 421 * Actually I am not sure on what this could be used for. 422 */ 423 static inline void update_mmu_cache(struct vm_area_struct *vma, 424 unsigned long address, pte_t *pte) 425 { 426 } 427 428 /* __PHX__ FIXME, SWAP, this probably doesn't work */ 429 430 /* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */ 431 /* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */ 432 433 #define __swp_type(x) (((x).val >> 5) & 0x7f) 434 #define __swp_offset(x) ((x).val >> 12) 435 #define __swp_entry(type, offset) \ 436 ((swp_entry_t) { ((type) << 5) | ((offset) << 12) }) 437 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 438 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 439 440 #define kern_addr_valid(addr) (1) 441 442 #include <asm-generic/pgtable.h> 443 444 /* 445 * No page table caches to initialise 446 */ 447 #define pgtable_cache_init() do { } while (0) 448 449 typedef pte_t *pte_addr_t; 450 451 #endif /* __ASSEMBLY__ */ 452 #endif /* __ASM_OPENRISC_PGTABLE_H */ 453