1 #ifndef _ASM_POWERPC_PAGE_H 2 #define _ASM_POWERPC_PAGE_H 3 4 /* 5 * Copyright (C) 2001,2005 IBM Corporation. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #ifndef __ASSEMBLY__ 14 #include <linux/types.h> 15 #include <linux/kernel.h> 16 #else 17 #include <asm/types.h> 18 #endif 19 #include <asm/asm-compat.h> 20 #include <asm/kdump.h> 21 22 /* 23 * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages 24 * on PPC44x). For PPC64 we support either 4K or 64K software 25 * page size. When using 64K pages however, whether we are really supporting 26 * 64K pages in HW or not is irrelevant to those definitions. 27 */ 28 #if defined(CONFIG_PPC_256K_PAGES) 29 #define PAGE_SHIFT 18 30 #elif defined(CONFIG_PPC_64K_PAGES) 31 #define PAGE_SHIFT 16 32 #elif defined(CONFIG_PPC_16K_PAGES) 33 #define PAGE_SHIFT 14 34 #else 35 #define PAGE_SHIFT 12 36 #endif 37 38 #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) 39 40 #ifndef __ASSEMBLY__ 41 #ifdef CONFIG_HUGETLB_PAGE 42 extern unsigned int HPAGE_SHIFT; 43 #else 44 #define HPAGE_SHIFT PAGE_SHIFT 45 #endif 46 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) 47 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 48 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 49 #define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1) 50 #endif 51 52 /* 53 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we 54 * assign PAGE_MASK to a larger type it gets extended the way we want 55 * (i.e. with 1s in the high bits) 56 */ 57 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) 58 59 /* 60 * KERNELBASE is the virtual address of the start of the kernel, it's often 61 * the same as PAGE_OFFSET, but _might not be_. 62 * 63 * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET. 64 * 65 * PAGE_OFFSET is the virtual address of the start of lowmem. 66 * 67 * PHYSICAL_START is the physical address of the start of the kernel. 68 * 69 * MEMORY_START is the physical address of the start of lowmem. 70 * 71 * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on 72 * ppc32 and based on how they are set we determine MEMORY_START. 73 * 74 * For the linear mapping the following equation should be true: 75 * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START 76 * 77 * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START 78 * 79 * There are two ways to determine a physical address from a virtual one: 80 * va = pa + PAGE_OFFSET - MEMORY_START 81 * va = pa + KERNELBASE - PHYSICAL_START 82 * 83 * If you want to know something's offset from the start of the kernel you 84 * should subtract KERNELBASE. 85 * 86 * If you want to test if something's a kernel address, use is_kernel_addr(). 87 */ 88 89 #define KERNELBASE ASM_CONST(CONFIG_KERNEL_START) 90 #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET) 91 #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START)) 92 93 #if defined(CONFIG_NONSTATIC_KERNEL) 94 #ifndef __ASSEMBLY__ 95 96 extern phys_addr_t memstart_addr; 97 extern phys_addr_t kernstart_addr; 98 99 #ifdef CONFIG_RELOCATABLE_PPC32 100 extern long long virt_phys_offset; 101 #endif 102 103 #endif /* __ASSEMBLY__ */ 104 #define PHYSICAL_START kernstart_addr 105 106 #else /* !CONFIG_NONSTATIC_KERNEL */ 107 #define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START) 108 #endif 109 110 /* See Description below for VIRT_PHYS_OFFSET */ 111 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE) 112 #ifdef CONFIG_RELOCATABLE 113 #define VIRT_PHYS_OFFSET virt_phys_offset 114 #else 115 #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START) 116 #endif 117 #endif 118 119 #ifdef CONFIG_PPC64 120 #define MEMORY_START 0UL 121 #elif defined(CONFIG_NONSTATIC_KERNEL) 122 #define MEMORY_START memstart_addr 123 #else 124 #define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE) 125 #endif 126 127 #ifdef CONFIG_FLATMEM 128 #define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT)) 129 #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr) 130 #endif 131 132 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 133 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) 134 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 135 #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) 136 137 /* 138 * On Book-E parts we need __va to parse the device tree and we can't 139 * determine MEMORY_START until then. However we can determine PHYSICAL_START 140 * from information at hand (program counter, TLB lookup). 141 * 142 * On BookE with RELOCATABLE (RELOCATABLE_PPC32) 143 * 144 * With RELOCATABLE_PPC32, we support loading the kernel at any physical 145 * address without any restriction on the page alignment. 146 * 147 * We find the runtime address of _stext and relocate ourselves based on 148 * the following calculation: 149 * 150 * virtual_base = ALIGN_DOWN(KERNELBASE,256M) + 151 * MODULO(_stext.run,256M) 152 * and create the following mapping: 153 * 154 * ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M) 155 * 156 * When we process relocations, we cannot depend on the 157 * existing equation for the __va()/__pa() translations: 158 * 159 * __va(x) = (x) - PHYSICAL_START + KERNELBASE 160 * 161 * Where: 162 * PHYSICAL_START = kernstart_addr = Physical address of _stext 163 * KERNELBASE = Compiled virtual address of _stext. 164 * 165 * This formula holds true iff, kernel load address is TLB page aligned. 166 * 167 * In our case, we need to also account for the shift in the kernel Virtual 168 * address. 169 * 170 * E.g., 171 * 172 * Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET). 173 * In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M 174 * 175 * Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000 176 * = 0xbc100000 , which is wrong. 177 * 178 * Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000 179 * according to our mapping. 180 * 181 * Hence we use the following formula to get the translations right: 182 * 183 * __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ] 184 * 185 * Where : 186 * PHYSICAL_START = dynamic load address.(kernstart_addr variable) 187 * Effective KERNELBASE = virtual_base = 188 * = ALIGN_DOWN(KERNELBASE,256M) + 189 * MODULO(PHYSICAL_START,256M) 190 * 191 * To make the cost of __va() / __pa() more light weight, we introduce 192 * a new variable virt_phys_offset, which will hold : 193 * 194 * virt_phys_offset = Effective KERNELBASE - PHYSICAL_START 195 * = ALIGN_DOWN(KERNELBASE,256M) - 196 * ALIGN_DOWN(PHYSICALSTART,256M) 197 * 198 * Hence : 199 * 200 * __va(x) = x - PHYSICAL_START + Effective KERNELBASE 201 * = x + virt_phys_offset 202 * 203 * and 204 * __pa(x) = x + PHYSICAL_START - Effective KERNELBASE 205 * = x - virt_phys_offset 206 * 207 * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use 208 * the other definitions for __va & __pa. 209 */ 210 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE) 211 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) 212 #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET) 213 #else 214 #ifdef CONFIG_PPC64 215 /* 216 * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET 217 * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit. 218 */ 219 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET)) 220 #define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL) 221 222 #else /* 32-bit, non book E */ 223 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) 224 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) 225 #endif 226 #endif 227 228 /* 229 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, 230 * and needs to be executable. This means the whole heap ends 231 * up being executable. 232 */ 233 #define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ 234 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 235 236 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ 237 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 238 239 #ifdef __powerpc64__ 240 #include <asm/page_64.h> 241 #else 242 #include <asm/page_32.h> 243 #endif 244 245 /* align addr on a size boundary - adjust address up/down if needed */ 246 #define _ALIGN_UP(addr, size) __ALIGN_KERNEL(addr, size) 247 #define _ALIGN_DOWN(addr, size) ((addr)&(~((typeof(addr))(size)-1))) 248 249 /* align addr on a size boundary - adjust address up if needed */ 250 #define _ALIGN(addr,size) _ALIGN_UP(addr,size) 251 252 /* 253 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for 254 * "kernelness", use is_kernel_addr() - it should do what you want. 255 */ 256 #ifdef CONFIG_PPC_BOOK3E_64 257 #define is_kernel_addr(x) ((x) >= 0x8000000000000000ul) 258 #else 259 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) 260 #endif 261 262 #ifndef CONFIG_PPC_BOOK3S_64 263 /* 264 * Use the top bit of the higher-level page table entries to indicate whether 265 * the entries we point to contain hugepages. This works because we know that 266 * the page tables live in kernel space. If we ever decide to support having 267 * page tables at arbitrary addresses, this breaks and will have to change. 268 */ 269 #ifdef CONFIG_PPC64 270 #define PD_HUGE 0x8000000000000000 271 #else 272 #define PD_HUGE 0x80000000 273 #endif 274 #endif /* CONFIG_PPC_BOOK3S_64 */ 275 276 /* 277 * Some number of bits at the level of the page table that points to 278 * a hugepte are used to encode the size. This masks those bits. 279 */ 280 #define HUGEPD_SHIFT_MASK 0x3f 281 282 #ifndef __ASSEMBLY__ 283 284 #ifdef CONFIG_STRICT_MM_TYPECHECKS 285 /* These are used to make use of C type-checking. */ 286 287 /* PTE level */ 288 typedef struct { pte_basic_t pte; } pte_t; 289 #define __pte(x) ((pte_t) { (x) }) 290 static inline pte_basic_t pte_val(pte_t x) 291 { 292 return x.pte; 293 } 294 295 /* 64k pages additionally define a bigger "real PTE" type that gathers 296 * the "second half" part of the PTE for pseudo 64k pages 297 */ 298 #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64) 299 typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; 300 #else 301 typedef struct { pte_t pte; } real_pte_t; 302 #endif 303 304 /* PMD level */ 305 #ifdef CONFIG_PPC64 306 typedef struct { unsigned long pmd; } pmd_t; 307 #define __pmd(x) ((pmd_t) { (x) }) 308 static inline unsigned long pmd_val(pmd_t x) 309 { 310 return x.pmd; 311 } 312 313 /* PUD level exusts only on 4k pages */ 314 #ifndef CONFIG_PPC_64K_PAGES 315 typedef struct { unsigned long pud; } pud_t; 316 #define __pud(x) ((pud_t) { (x) }) 317 static inline unsigned long pud_val(pud_t x) 318 { 319 return x.pud; 320 } 321 #endif /* !CONFIG_PPC_64K_PAGES */ 322 #endif /* CONFIG_PPC64 */ 323 324 /* PGD level */ 325 typedef struct { unsigned long pgd; } pgd_t; 326 #define __pgd(x) ((pgd_t) { (x) }) 327 static inline unsigned long pgd_val(pgd_t x) 328 { 329 return x.pgd; 330 } 331 332 /* Page protection bits */ 333 typedef struct { unsigned long pgprot; } pgprot_t; 334 #define pgprot_val(x) ((x).pgprot) 335 #define __pgprot(x) ((pgprot_t) { (x) }) 336 337 #else 338 339 /* 340 * .. while these make it easier on the compiler 341 */ 342 343 typedef pte_basic_t pte_t; 344 #define __pte(x) (x) 345 static inline pte_basic_t pte_val(pte_t pte) 346 { 347 return pte; 348 } 349 350 #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64) 351 typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; 352 #else 353 typedef pte_t real_pte_t; 354 #endif 355 356 357 #ifdef CONFIG_PPC64 358 typedef unsigned long pmd_t; 359 #define __pmd(x) (x) 360 static inline unsigned long pmd_val(pmd_t pmd) 361 { 362 return pmd; 363 } 364 365 #ifndef CONFIG_PPC_64K_PAGES 366 typedef unsigned long pud_t; 367 #define __pud(x) (x) 368 static inline unsigned long pud_val(pud_t pud) 369 { 370 return pud; 371 } 372 #endif /* !CONFIG_PPC_64K_PAGES */ 373 #endif /* CONFIG_PPC64 */ 374 375 typedef unsigned long pgd_t; 376 #define __pgd(x) (x) 377 static inline unsigned long pgd_val(pgd_t pgd) 378 { 379 return pgd; 380 } 381 382 typedef unsigned long pgprot_t; 383 #define pgprot_val(x) (x) 384 #define __pgprot(x) (x) 385 386 #endif 387 388 typedef struct { signed long pd; } hugepd_t; 389 390 #ifdef CONFIG_HUGETLB_PAGE 391 #ifdef CONFIG_PPC_BOOK3S_64 392 #ifdef CONFIG_PPC_64K_PAGES 393 /* 394 * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't 395 * need to setup hugepage directory for them. Our pte and page directory format 396 * enable us to have this enabled. But to avoid errors when implementing new 397 * features disable hugepd for 64K. We enable a debug version here, So we catch 398 * wrong usage. 399 */ 400 #ifdef CONFIG_DEBUG_VM 401 extern int hugepd_ok(hugepd_t hpd); 402 #else 403 #define hugepd_ok(x) (0) 404 #endif 405 #else 406 static inline int hugepd_ok(hugepd_t hpd) 407 { 408 /* 409 * hugepd pointer, bottom two bits == 00 and next 4 bits 410 * indicate size of table 411 */ 412 return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); 413 } 414 #endif 415 #else 416 static inline int hugepd_ok(hugepd_t hpd) 417 { 418 return (hpd.pd > 0); 419 } 420 #endif 421 422 #define is_hugepd(hpd) (hugepd_ok(hpd)) 423 #define pgd_huge pgd_huge 424 int pgd_huge(pgd_t pgd); 425 #else /* CONFIG_HUGETLB_PAGE */ 426 #define is_hugepd(pdep) 0 427 #define pgd_huge(pgd) 0 428 #endif /* CONFIG_HUGETLB_PAGE */ 429 #define __hugepd(x) ((hugepd_t) { (x) }) 430 431 struct page; 432 extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); 433 extern void copy_user_page(void *to, void *from, unsigned long vaddr, 434 struct page *p); 435 extern int page_is_ram(unsigned long pfn); 436 extern int devmem_is_allowed(unsigned long pfn); 437 438 #ifdef CONFIG_PPC_SMLPAR 439 void arch_free_page(struct page *page, int order); 440 #define HAVE_ARCH_FREE_PAGE 441 #endif 442 443 struct vm_area_struct; 444 445 #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64) 446 typedef pte_t *pgtable_t; 447 #else 448 typedef struct page *pgtable_t; 449 #endif 450 451 #include <asm-generic/memory_model.h> 452 #endif /* __ASSEMBLY__ */ 453 454 #endif /* _ASM_POWERPC_PAGE_H */ 455