1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/arm/mm/mmu.c 4 * 5 * Copyright (C) 1995-2005 Russell King 6 */ 7 #include <linux/module.h> 8 #include <linux/kernel.h> 9 #include <linux/errno.h> 10 #include <linux/init.h> 11 #include <linux/mman.h> 12 #include <linux/nodemask.h> 13 #include <linux/memblock.h> 14 #include <linux/fs.h> 15 #include <linux/vmalloc.h> 16 #include <linux/sizes.h> 17 18 #include <asm/cp15.h> 19 #include <asm/cputype.h> 20 #include <asm/sections.h> 21 #include <asm/cachetype.h> 22 #include <asm/fixmap.h> 23 #include <asm/sections.h> 24 #include <asm/setup.h> 25 #include <asm/smp_plat.h> 26 #include <asm/tlb.h> 27 #include <asm/highmem.h> 28 #include <asm/system_info.h> 29 #include <asm/traps.h> 30 #include <asm/procinfo.h> 31 #include <asm/memory.h> 32 #include <asm/pgalloc.h> 33 34 #include <asm/mach/arch.h> 35 #include <asm/mach/map.h> 36 #include <asm/mach/pci.h> 37 #include <asm/fixmap.h> 38 39 #include "fault.h" 40 #include "mm.h" 41 #include "tcm.h" 42 43 /* 44 * empty_zero_page is a special page that is used for 45 * zero-initialized data and COW. 46 */ 47 struct page *empty_zero_page; 48 EXPORT_SYMBOL(empty_zero_page); 49 50 /* 51 * The pmd table for the upper-most set of pages. 52 */ 53 pmd_t *top_pmd; 54 55 pmdval_t user_pmd_table = _PAGE_USER_TABLE; 56 57 #define CPOLICY_UNCACHED 0 58 #define CPOLICY_BUFFERED 1 59 #define CPOLICY_WRITETHROUGH 2 60 #define CPOLICY_WRITEBACK 3 61 #define CPOLICY_WRITEALLOC 4 62 63 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; 64 static unsigned int ecc_mask __initdata = 0; 65 pgprot_t pgprot_user; 66 pgprot_t pgprot_kernel; 67 68 EXPORT_SYMBOL(pgprot_user); 69 EXPORT_SYMBOL(pgprot_kernel); 70 71 struct cachepolicy { 72 const char policy[16]; 73 unsigned int cr_mask; 74 pmdval_t pmd; 75 pteval_t pte; 76 }; 77 78 static struct cachepolicy cache_policies[] __initdata = { 79 { 80 .policy = "uncached", 81 .cr_mask = CR_W|CR_C, 82 .pmd = PMD_SECT_UNCACHED, 83 .pte = L_PTE_MT_UNCACHED, 84 }, { 85 .policy = "buffered", 86 .cr_mask = CR_C, 87 .pmd = PMD_SECT_BUFFERED, 88 .pte = L_PTE_MT_BUFFERABLE, 89 }, { 90 .policy = "writethrough", 91 .cr_mask = 0, 92 .pmd = PMD_SECT_WT, 93 .pte = L_PTE_MT_WRITETHROUGH, 94 }, { 95 .policy = "writeback", 96 .cr_mask = 0, 97 .pmd = PMD_SECT_WB, 98 .pte = L_PTE_MT_WRITEBACK, 99 }, { 100 .policy = "writealloc", 101 .cr_mask = 0, 102 .pmd = PMD_SECT_WBWA, 103 .pte = L_PTE_MT_WRITEALLOC, 104 } 105 }; 106 107 #ifdef CONFIG_CPU_CP15 108 static unsigned long initial_pmd_value __initdata = 0; 109 110 /* 111 * Initialise the cache_policy variable with the initial state specified 112 * via the "pmd" value. This is used to ensure that on ARMv6 and later, 113 * the C code sets the page tables up with the same policy as the head 114 * assembly code, which avoids an illegal state where the TLBs can get 115 * confused. See comments in early_cachepolicy() for more information. 116 */ 117 void __init init_default_cache_policy(unsigned long pmd) 118 { 119 int i; 120 121 initial_pmd_value = pmd; 122 123 pmd &= PMD_SECT_CACHE_MASK; 124 125 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) 126 if (cache_policies[i].pmd == pmd) { 127 cachepolicy = i; 128 break; 129 } 130 131 if (i == ARRAY_SIZE(cache_policies)) 132 pr_err("ERROR: could not find cache policy\n"); 133 } 134 135 /* 136 * These are useful for identifying cache coherency problems by allowing 137 * the cache or the cache and writebuffer to be turned off. (Note: the 138 * write buffer should not be on and the cache off). 139 */ 140 static int __init early_cachepolicy(char *p) 141 { 142 int i, selected = -1; 143 144 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { 145 int len = strlen(cache_policies[i].policy); 146 147 if (memcmp(p, cache_policies[i].policy, len) == 0) { 148 selected = i; 149 break; 150 } 151 } 152 153 if (selected == -1) 154 pr_err("ERROR: unknown or unsupported cache policy\n"); 155 156 /* 157 * This restriction is partly to do with the way we boot; it is 158 * unpredictable to have memory mapped using two different sets of 159 * memory attributes (shared, type, and cache attribs). We can not 160 * change these attributes once the initial assembly has setup the 161 * page tables. 162 */ 163 if (cpu_architecture() >= CPU_ARCH_ARMv6 && selected != cachepolicy) { 164 pr_warn("Only cachepolicy=%s supported on ARMv6 and later\n", 165 cache_policies[cachepolicy].policy); 166 return 0; 167 } 168 169 if (selected != cachepolicy) { 170 unsigned long cr = __clear_cr(cache_policies[selected].cr_mask); 171 cachepolicy = selected; 172 flush_cache_all(); 173 set_cr(cr); 174 } 175 return 0; 176 } 177 early_param("cachepolicy", early_cachepolicy); 178 179 static int __init early_nocache(char *__unused) 180 { 181 char *p = "buffered"; 182 pr_warn("nocache is deprecated; use cachepolicy=%s\n", p); 183 early_cachepolicy(p); 184 return 0; 185 } 186 early_param("nocache", early_nocache); 187 188 static int __init early_nowrite(char *__unused) 189 { 190 char *p = "uncached"; 191 pr_warn("nowb is deprecated; use cachepolicy=%s\n", p); 192 early_cachepolicy(p); 193 return 0; 194 } 195 early_param("nowb", early_nowrite); 196 197 #ifndef CONFIG_ARM_LPAE 198 static int __init early_ecc(char *p) 199 { 200 if (memcmp(p, "on", 2) == 0) 201 ecc_mask = PMD_PROTECTION; 202 else if (memcmp(p, "off", 3) == 0) 203 ecc_mask = 0; 204 return 0; 205 } 206 early_param("ecc", early_ecc); 207 #endif 208 209 #else /* ifdef CONFIG_CPU_CP15 */ 210 211 static int __init early_cachepolicy(char *p) 212 { 213 pr_warn("cachepolicy kernel parameter not supported without cp15\n"); 214 } 215 early_param("cachepolicy", early_cachepolicy); 216 217 static int __init noalign_setup(char *__unused) 218 { 219 pr_warn("noalign kernel parameter not supported without cp15\n"); 220 } 221 __setup("noalign", noalign_setup); 222 223 #endif /* ifdef CONFIG_CPU_CP15 / else */ 224 225 #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN 226 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE 227 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE 228 229 static struct mem_type mem_types[] __ro_after_init = { 230 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ 231 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | 232 L_PTE_SHARED, 233 .prot_l1 = PMD_TYPE_TABLE, 234 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, 235 .domain = DOMAIN_IO, 236 }, 237 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ 238 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, 239 .prot_l1 = PMD_TYPE_TABLE, 240 .prot_sect = PROT_SECT_DEVICE, 241 .domain = DOMAIN_IO, 242 }, 243 [MT_DEVICE_CACHED] = { /* ioremap_cache */ 244 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED, 245 .prot_l1 = PMD_TYPE_TABLE, 246 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, 247 .domain = DOMAIN_IO, 248 }, 249 [MT_DEVICE_WC] = { /* ioremap_wc */ 250 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, 251 .prot_l1 = PMD_TYPE_TABLE, 252 .prot_sect = PROT_SECT_DEVICE, 253 .domain = DOMAIN_IO, 254 }, 255 [MT_UNCACHED] = { 256 .prot_pte = PROT_PTE_DEVICE, 257 .prot_l1 = PMD_TYPE_TABLE, 258 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 259 .domain = DOMAIN_IO, 260 }, 261 [MT_CACHECLEAN] = { 262 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 263 .domain = DOMAIN_KERNEL, 264 }, 265 #ifndef CONFIG_ARM_LPAE 266 [MT_MINICLEAN] = { 267 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, 268 .domain = DOMAIN_KERNEL, 269 }, 270 #endif 271 [MT_LOW_VECTORS] = { 272 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 273 L_PTE_RDONLY, 274 .prot_l1 = PMD_TYPE_TABLE, 275 .domain = DOMAIN_VECTORS, 276 }, 277 [MT_HIGH_VECTORS] = { 278 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 279 L_PTE_USER | L_PTE_RDONLY, 280 .prot_l1 = PMD_TYPE_TABLE, 281 .domain = DOMAIN_VECTORS, 282 }, 283 [MT_MEMORY_RWX] = { 284 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, 285 .prot_l1 = PMD_TYPE_TABLE, 286 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 287 .domain = DOMAIN_KERNEL, 288 }, 289 [MT_MEMORY_RW] = { 290 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 291 L_PTE_XN, 292 .prot_l1 = PMD_TYPE_TABLE, 293 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 294 .domain = DOMAIN_KERNEL, 295 }, 296 [MT_ROM] = { 297 .prot_sect = PMD_TYPE_SECT, 298 .domain = DOMAIN_KERNEL, 299 }, 300 [MT_MEMORY_RWX_NONCACHED] = { 301 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 302 L_PTE_MT_BUFFERABLE, 303 .prot_l1 = PMD_TYPE_TABLE, 304 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 305 .domain = DOMAIN_KERNEL, 306 }, 307 [MT_MEMORY_RW_DTCM] = { 308 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 309 L_PTE_XN, 310 .prot_l1 = PMD_TYPE_TABLE, 311 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 312 .domain = DOMAIN_KERNEL, 313 }, 314 [MT_MEMORY_RWX_ITCM] = { 315 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, 316 .prot_l1 = PMD_TYPE_TABLE, 317 .domain = DOMAIN_KERNEL, 318 }, 319 [MT_MEMORY_RW_SO] = { 320 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 321 L_PTE_MT_UNCACHED | L_PTE_XN, 322 .prot_l1 = PMD_TYPE_TABLE, 323 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S | 324 PMD_SECT_UNCACHED | PMD_SECT_XN, 325 .domain = DOMAIN_KERNEL, 326 }, 327 [MT_MEMORY_DMA_READY] = { 328 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 329 L_PTE_XN, 330 .prot_l1 = PMD_TYPE_TABLE, 331 .domain = DOMAIN_KERNEL, 332 }, 333 }; 334 335 const struct mem_type *get_mem_type(unsigned int type) 336 { 337 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; 338 } 339 EXPORT_SYMBOL(get_mem_type); 340 341 static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr); 342 343 static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS] 344 __aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata; 345 346 static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr) 347 { 348 return &bm_pte[pte_index(addr)]; 349 } 350 351 static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr) 352 { 353 return pte_offset_kernel(dir, addr); 354 } 355 356 static inline pmd_t * __init fixmap_pmd(unsigned long addr) 357 { 358 return pmd_off_k(addr); 359 } 360 361 void __init early_fixmap_init(void) 362 { 363 pmd_t *pmd; 364 365 /* 366 * The early fixmap range spans multiple pmds, for which 367 * we are not prepared: 368 */ 369 BUILD_BUG_ON((__fix_to_virt(__end_of_early_ioremap_region) >> PMD_SHIFT) 370 != FIXADDR_TOP >> PMD_SHIFT); 371 372 pmd = fixmap_pmd(FIXADDR_TOP); 373 pmd_populate_kernel(&init_mm, pmd, bm_pte); 374 375 pte_offset_fixmap = pte_offset_early_fixmap; 376 } 377 378 /* 379 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range(). 380 * As a result, this can only be called with preemption disabled, as under 381 * stop_machine(). 382 */ 383 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) 384 { 385 unsigned long vaddr = __fix_to_virt(idx); 386 pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr); 387 388 /* Make sure fixmap region does not exceed available allocation. */ 389 BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) > 390 FIXADDR_END); 391 BUG_ON(idx >= __end_of_fixed_addresses); 392 393 /* we only support device mappings until pgprot_kernel has been set */ 394 if (WARN_ON(pgprot_val(prot) != pgprot_val(FIXMAP_PAGE_IO) && 395 pgprot_val(pgprot_kernel) == 0)) 396 return; 397 398 if (pgprot_val(prot)) 399 set_pte_at(NULL, vaddr, pte, 400 pfn_pte(phys >> PAGE_SHIFT, prot)); 401 else 402 pte_clear(NULL, vaddr, pte); 403 local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); 404 } 405 406 /* 407 * Adjust the PMD section entries according to the CPU in use. 408 */ 409 static void __init build_mem_type_table(void) 410 { 411 struct cachepolicy *cp; 412 unsigned int cr = get_cr(); 413 pteval_t user_pgprot, kern_pgprot, vecs_pgprot; 414 int cpu_arch = cpu_architecture(); 415 int i; 416 417 if (cpu_arch < CPU_ARCH_ARMv6) { 418 #if defined(CONFIG_CPU_DCACHE_DISABLE) 419 if (cachepolicy > CPOLICY_BUFFERED) 420 cachepolicy = CPOLICY_BUFFERED; 421 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) 422 if (cachepolicy > CPOLICY_WRITETHROUGH) 423 cachepolicy = CPOLICY_WRITETHROUGH; 424 #endif 425 } 426 if (cpu_arch < CPU_ARCH_ARMv5) { 427 if (cachepolicy >= CPOLICY_WRITEALLOC) 428 cachepolicy = CPOLICY_WRITEBACK; 429 ecc_mask = 0; 430 } 431 432 if (is_smp()) { 433 if (cachepolicy != CPOLICY_WRITEALLOC) { 434 pr_warn("Forcing write-allocate cache policy for SMP\n"); 435 cachepolicy = CPOLICY_WRITEALLOC; 436 } 437 if (!(initial_pmd_value & PMD_SECT_S)) { 438 pr_warn("Forcing shared mappings for SMP\n"); 439 initial_pmd_value |= PMD_SECT_S; 440 } 441 } 442 443 /* 444 * Strip out features not present on earlier architectures. 445 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those 446 * without extended page tables don't have the 'Shared' bit. 447 */ 448 if (cpu_arch < CPU_ARCH_ARMv5) 449 for (i = 0; i < ARRAY_SIZE(mem_types); i++) 450 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7); 451 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3()) 452 for (i = 0; i < ARRAY_SIZE(mem_types); i++) 453 mem_types[i].prot_sect &= ~PMD_SECT_S; 454 455 /* 456 * ARMv5 and lower, bit 4 must be set for page tables (was: cache 457 * "update-able on write" bit on ARM610). However, Xscale and 458 * Xscale3 require this bit to be cleared. 459 */ 460 if (cpu_is_xscale_family()) { 461 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 462 mem_types[i].prot_sect &= ~PMD_BIT4; 463 mem_types[i].prot_l1 &= ~PMD_BIT4; 464 } 465 } else if (cpu_arch < CPU_ARCH_ARMv6) { 466 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 467 if (mem_types[i].prot_l1) 468 mem_types[i].prot_l1 |= PMD_BIT4; 469 if (mem_types[i].prot_sect) 470 mem_types[i].prot_sect |= PMD_BIT4; 471 } 472 } 473 474 /* 475 * Mark the device areas according to the CPU/architecture. 476 */ 477 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) { 478 if (!cpu_is_xsc3()) { 479 /* 480 * Mark device regions on ARMv6+ as execute-never 481 * to prevent speculative instruction fetches. 482 */ 483 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN; 484 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; 485 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; 486 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; 487 488 /* Also setup NX memory mapping */ 489 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN; 490 } 491 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { 492 /* 493 * For ARMv7 with TEX remapping, 494 * - shared device is SXCB=1100 495 * - nonshared device is SXCB=0100 496 * - write combine device mem is SXCB=0001 497 * (Uncached Normal memory) 498 */ 499 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1); 500 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1); 501 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; 502 } else if (cpu_is_xsc3()) { 503 /* 504 * For Xscale3, 505 * - shared device is TEXCB=00101 506 * - nonshared device is TEXCB=01000 507 * - write combine device mem is TEXCB=00100 508 * (Inner/Outer Uncacheable in xsc3 parlance) 509 */ 510 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED; 511 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); 512 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); 513 } else { 514 /* 515 * For ARMv6 and ARMv7 without TEX remapping, 516 * - shared device is TEXCB=00001 517 * - nonshared device is TEXCB=01000 518 * - write combine device mem is TEXCB=00100 519 * (Uncached Normal in ARMv6 parlance). 520 */ 521 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; 522 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); 523 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); 524 } 525 } else { 526 /* 527 * On others, write combining is "Uncached/Buffered" 528 */ 529 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; 530 } 531 532 /* 533 * Now deal with the memory-type mappings 534 */ 535 cp = &cache_policies[cachepolicy]; 536 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 537 538 #ifndef CONFIG_ARM_LPAE 539 /* 540 * We don't use domains on ARMv6 (since this causes problems with 541 * v6/v7 kernels), so we must use a separate memory type for user 542 * r/o, kernel r/w to map the vectors page. 543 */ 544 if (cpu_arch == CPU_ARCH_ARMv6) 545 vecs_pgprot |= L_PTE_MT_VECTORS; 546 547 /* 548 * Check is it with support for the PXN bit 549 * in the Short-descriptor translation table format descriptors. 550 */ 551 if (cpu_arch == CPU_ARCH_ARMv7 && 552 (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) >= 4) { 553 user_pmd_table |= PMD_PXNTABLE; 554 } 555 #endif 556 557 /* 558 * ARMv6 and above have extended page tables. 559 */ 560 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 561 #ifndef CONFIG_ARM_LPAE 562 /* 563 * Mark cache clean areas and XIP ROM read only 564 * from SVC mode and no access from userspace. 565 */ 566 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 567 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 568 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 569 #endif 570 571 /* 572 * If the initial page tables were created with the S bit 573 * set, then we need to do the same here for the same 574 * reasons given in early_cachepolicy(). 575 */ 576 if (initial_pmd_value & PMD_SECT_S) { 577 user_pgprot |= L_PTE_SHARED; 578 kern_pgprot |= L_PTE_SHARED; 579 vecs_pgprot |= L_PTE_SHARED; 580 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; 581 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; 582 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 583 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 584 mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S; 585 mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; 586 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; 587 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED; 588 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; 589 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; 590 mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; 591 } 592 } 593 594 /* 595 * Non-cacheable Normal - intended for memory areas that must 596 * not cause dirty cache line writebacks when used 597 */ 598 if (cpu_arch >= CPU_ARCH_ARMv6) { 599 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { 600 /* Non-cacheable Normal is XCB = 001 */ 601 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= 602 PMD_SECT_BUFFERED; 603 } else { 604 /* For both ARMv6 and non-TEX-remapping ARMv7 */ 605 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= 606 PMD_SECT_TEX(1); 607 } 608 } else { 609 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; 610 } 611 612 #ifdef CONFIG_ARM_LPAE 613 /* 614 * Do not generate access flag faults for the kernel mappings. 615 */ 616 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 617 mem_types[i].prot_pte |= PTE_EXT_AF; 618 if (mem_types[i].prot_sect) 619 mem_types[i].prot_sect |= PMD_SECT_AF; 620 } 621 kern_pgprot |= PTE_EXT_AF; 622 vecs_pgprot |= PTE_EXT_AF; 623 624 /* 625 * Set PXN for user mappings 626 */ 627 user_pgprot |= PTE_EXT_PXN; 628 #endif 629 630 for (i = 0; i < 16; i++) { 631 pteval_t v = pgprot_val(protection_map[i]); 632 protection_map[i] = __pgprot(v | user_pgprot); 633 } 634 635 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; 636 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; 637 638 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 639 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 640 L_PTE_DIRTY | kern_pgprot); 641 642 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 643 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 644 mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; 645 mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; 646 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; 647 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; 648 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; 649 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; 650 mem_types[MT_ROM].prot_sect |= cp->pmd; 651 652 switch (cp->pmd) { 653 case PMD_SECT_WT: 654 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; 655 break; 656 case PMD_SECT_WB: 657 case PMD_SECT_WBWA: 658 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; 659 break; 660 } 661 pr_info("Memory policy: %sData cache %s\n", 662 ecc_mask ? "ECC enabled, " : "", cp->policy); 663 664 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 665 struct mem_type *t = &mem_types[i]; 666 if (t->prot_l1) 667 t->prot_l1 |= PMD_DOMAIN(t->domain); 668 if (t->prot_sect) 669 t->prot_sect |= PMD_DOMAIN(t->domain); 670 } 671 } 672 673 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 674 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 675 unsigned long size, pgprot_t vma_prot) 676 { 677 if (!pfn_valid(pfn)) 678 return pgprot_noncached(vma_prot); 679 else if (file->f_flags & O_SYNC) 680 return pgprot_writecombine(vma_prot); 681 return vma_prot; 682 } 683 EXPORT_SYMBOL(phys_mem_access_prot); 684 #endif 685 686 #define vectors_base() (vectors_high() ? 0xffff0000 : 0) 687 688 static void __init *early_alloc(unsigned long sz) 689 { 690 void *ptr = memblock_alloc(sz, sz); 691 692 if (!ptr) 693 panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 694 __func__, sz, sz); 695 696 return ptr; 697 } 698 699 static void *__init late_alloc(unsigned long sz) 700 { 701 void *ptr = (void *)__get_free_pages(GFP_PGTABLE_KERNEL, get_order(sz)); 702 703 if (!ptr || !pgtable_pte_page_ctor(virt_to_page(ptr))) 704 BUG(); 705 return ptr; 706 } 707 708 static pte_t * __init arm_pte_alloc(pmd_t *pmd, unsigned long addr, 709 unsigned long prot, 710 void *(*alloc)(unsigned long sz)) 711 { 712 if (pmd_none(*pmd)) { 713 pte_t *pte = alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE); 714 __pmd_populate(pmd, __pa(pte), prot); 715 } 716 BUG_ON(pmd_bad(*pmd)); 717 return pte_offset_kernel(pmd, addr); 718 } 719 720 static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, 721 unsigned long prot) 722 { 723 return arm_pte_alloc(pmd, addr, prot, early_alloc); 724 } 725 726 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, 727 unsigned long end, unsigned long pfn, 728 const struct mem_type *type, 729 void *(*alloc)(unsigned long sz), 730 bool ng) 731 { 732 pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc); 733 do { 734 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 735 ng ? PTE_EXT_NG : 0); 736 pfn++; 737 } while (pte++, addr += PAGE_SIZE, addr != end); 738 } 739 740 static void __init __map_init_section(pmd_t *pmd, unsigned long addr, 741 unsigned long end, phys_addr_t phys, 742 const struct mem_type *type, bool ng) 743 { 744 pmd_t *p = pmd; 745 746 #ifndef CONFIG_ARM_LPAE 747 /* 748 * In classic MMU format, puds and pmds are folded in to 749 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a 750 * group of L1 entries making up one logical pointer to 751 * an L2 table (2MB), where as PMDs refer to the individual 752 * L1 entries (1MB). Hence increment to get the correct 753 * offset for odd 1MB sections. 754 * (See arch/arm/include/asm/pgtable-2level.h) 755 */ 756 if (addr & SECTION_SIZE) 757 pmd++; 758 #endif 759 do { 760 *pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0)); 761 phys += SECTION_SIZE; 762 } while (pmd++, addr += SECTION_SIZE, addr != end); 763 764 flush_pmd_entry(p); 765 } 766 767 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, 768 unsigned long end, phys_addr_t phys, 769 const struct mem_type *type, 770 void *(*alloc)(unsigned long sz), bool ng) 771 { 772 pmd_t *pmd = pmd_offset(pud, addr); 773 unsigned long next; 774 775 do { 776 /* 777 * With LPAE, we must loop over to map 778 * all the pmds for the given range. 779 */ 780 next = pmd_addr_end(addr, end); 781 782 /* 783 * Try a section mapping - addr, next and phys must all be 784 * aligned to a section boundary. 785 */ 786 if (type->prot_sect && 787 ((addr | next | phys) & ~SECTION_MASK) == 0) { 788 __map_init_section(pmd, addr, next, phys, type, ng); 789 } else { 790 alloc_init_pte(pmd, addr, next, 791 __phys_to_pfn(phys), type, alloc, ng); 792 } 793 794 phys += next - addr; 795 796 } while (pmd++, addr = next, addr != end); 797 } 798 799 static void __init alloc_init_pud(p4d_t *p4d, unsigned long addr, 800 unsigned long end, phys_addr_t phys, 801 const struct mem_type *type, 802 void *(*alloc)(unsigned long sz), bool ng) 803 { 804 pud_t *pud = pud_offset(p4d, addr); 805 unsigned long next; 806 807 do { 808 next = pud_addr_end(addr, end); 809 alloc_init_pmd(pud, addr, next, phys, type, alloc, ng); 810 phys += next - addr; 811 } while (pud++, addr = next, addr != end); 812 } 813 814 static void __init alloc_init_p4d(pgd_t *pgd, unsigned long addr, 815 unsigned long end, phys_addr_t phys, 816 const struct mem_type *type, 817 void *(*alloc)(unsigned long sz), bool ng) 818 { 819 p4d_t *p4d = p4d_offset(pgd, addr); 820 unsigned long next; 821 822 do { 823 next = p4d_addr_end(addr, end); 824 alloc_init_pud(p4d, addr, next, phys, type, alloc, ng); 825 phys += next - addr; 826 } while (p4d++, addr = next, addr != end); 827 } 828 829 #ifndef CONFIG_ARM_LPAE 830 static void __init create_36bit_mapping(struct mm_struct *mm, 831 struct map_desc *md, 832 const struct mem_type *type, 833 bool ng) 834 { 835 unsigned long addr, length, end; 836 phys_addr_t phys; 837 pgd_t *pgd; 838 839 addr = md->virtual; 840 phys = __pfn_to_phys(md->pfn); 841 length = PAGE_ALIGN(md->length); 842 843 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { 844 pr_err("MM: CPU does not support supersection mapping for 0x%08llx at 0x%08lx\n", 845 (long long)__pfn_to_phys((u64)md->pfn), addr); 846 return; 847 } 848 849 /* N.B. ARMv6 supersections are only defined to work with domain 0. 850 * Since domain assignments can in fact be arbitrary, the 851 * 'domain == 0' check below is required to insure that ARMv6 852 * supersections are only allocated for domain 0 regardless 853 * of the actual domain assignments in use. 854 */ 855 if (type->domain) { 856 pr_err("MM: invalid domain in supersection mapping for 0x%08llx at 0x%08lx\n", 857 (long long)__pfn_to_phys((u64)md->pfn), addr); 858 return; 859 } 860 861 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { 862 pr_err("MM: cannot create mapping for 0x%08llx at 0x%08lx invalid alignment\n", 863 (long long)__pfn_to_phys((u64)md->pfn), addr); 864 return; 865 } 866 867 /* 868 * Shift bits [35:32] of address into bits [23:20] of PMD 869 * (See ARMv6 spec). 870 */ 871 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); 872 873 pgd = pgd_offset(mm, addr); 874 end = addr + length; 875 do { 876 p4d_t *p4d = p4d_offset(pgd, addr); 877 pud_t *pud = pud_offset(p4d, addr); 878 pmd_t *pmd = pmd_offset(pud, addr); 879 int i; 880 881 for (i = 0; i < 16; i++) 882 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER | 883 (ng ? PMD_SECT_nG : 0)); 884 885 addr += SUPERSECTION_SIZE; 886 phys += SUPERSECTION_SIZE; 887 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; 888 } while (addr != end); 889 } 890 #endif /* !CONFIG_ARM_LPAE */ 891 892 static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md, 893 void *(*alloc)(unsigned long sz), 894 bool ng) 895 { 896 unsigned long addr, length, end; 897 phys_addr_t phys; 898 const struct mem_type *type; 899 pgd_t *pgd; 900 901 type = &mem_types[md->type]; 902 903 #ifndef CONFIG_ARM_LPAE 904 /* 905 * Catch 36-bit addresses 906 */ 907 if (md->pfn >= 0x100000) { 908 create_36bit_mapping(mm, md, type, ng); 909 return; 910 } 911 #endif 912 913 addr = md->virtual & PAGE_MASK; 914 phys = __pfn_to_phys(md->pfn); 915 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 916 917 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { 918 pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n", 919 (long long)__pfn_to_phys(md->pfn), addr); 920 return; 921 } 922 923 pgd = pgd_offset(mm, addr); 924 end = addr + length; 925 do { 926 unsigned long next = pgd_addr_end(addr, end); 927 928 alloc_init_p4d(pgd, addr, next, phys, type, alloc, ng); 929 930 phys += next - addr; 931 addr = next; 932 } while (pgd++, addr != end); 933 } 934 935 /* 936 * Create the page directory entries and any necessary 937 * page tables for the mapping specified by `md'. We 938 * are able to cope here with varying sizes and address 939 * offsets, and we take full advantage of sections and 940 * supersections. 941 */ 942 static void __init create_mapping(struct map_desc *md) 943 { 944 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { 945 pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n", 946 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); 947 return; 948 } 949 950 if ((md->type == MT_DEVICE || md->type == MT_ROM) && 951 md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START && 952 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { 953 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n", 954 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); 955 } 956 957 __create_mapping(&init_mm, md, early_alloc, false); 958 } 959 960 void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md, 961 bool ng) 962 { 963 #ifdef CONFIG_ARM_LPAE 964 p4d_t *p4d; 965 pud_t *pud; 966 967 p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual); 968 if (WARN_ON(!p4d)) 969 return; 970 pud = pud_alloc(mm, p4d, md->virtual); 971 if (WARN_ON(!pud)) 972 return; 973 pmd_alloc(mm, pud, 0); 974 #endif 975 __create_mapping(mm, md, late_alloc, ng); 976 } 977 978 /* 979 * Create the architecture specific mappings 980 */ 981 void __init iotable_init(struct map_desc *io_desc, int nr) 982 { 983 struct map_desc *md; 984 struct vm_struct *vm; 985 struct static_vm *svm; 986 987 if (!nr) 988 return; 989 990 svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm)); 991 if (!svm) 992 panic("%s: Failed to allocate %zu bytes align=0x%zx\n", 993 __func__, sizeof(*svm) * nr, __alignof__(*svm)); 994 995 for (md = io_desc; nr; md++, nr--) { 996 create_mapping(md); 997 998 vm = &svm->vm; 999 vm->addr = (void *)(md->virtual & PAGE_MASK); 1000 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 1001 vm->phys_addr = __pfn_to_phys(md->pfn); 1002 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; 1003 vm->flags |= VM_ARM_MTYPE(md->type); 1004 vm->caller = iotable_init; 1005 add_static_vm_early(svm++); 1006 } 1007 } 1008 1009 void __init vm_reserve_area_early(unsigned long addr, unsigned long size, 1010 void *caller) 1011 { 1012 struct vm_struct *vm; 1013 struct static_vm *svm; 1014 1015 svm = memblock_alloc(sizeof(*svm), __alignof__(*svm)); 1016 if (!svm) 1017 panic("%s: Failed to allocate %zu bytes align=0x%zx\n", 1018 __func__, sizeof(*svm), __alignof__(*svm)); 1019 1020 vm = &svm->vm; 1021 vm->addr = (void *)addr; 1022 vm->size = size; 1023 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; 1024 vm->caller = caller; 1025 add_static_vm_early(svm); 1026 } 1027 1028 #ifndef CONFIG_ARM_LPAE 1029 1030 /* 1031 * The Linux PMD is made of two consecutive section entries covering 2MB 1032 * (see definition in include/asm/pgtable-2level.h). However a call to 1033 * create_mapping() may optimize static mappings by using individual 1034 * 1MB section mappings. This leaves the actual PMD potentially half 1035 * initialized if the top or bottom section entry isn't used, leaving it 1036 * open to problems if a subsequent ioremap() or vmalloc() tries to use 1037 * the virtual space left free by that unused section entry. 1038 * 1039 * Let's avoid the issue by inserting dummy vm entries covering the unused 1040 * PMD halves once the static mappings are in place. 1041 */ 1042 1043 static void __init pmd_empty_section_gap(unsigned long addr) 1044 { 1045 vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap); 1046 } 1047 1048 static void __init fill_pmd_gaps(void) 1049 { 1050 struct static_vm *svm; 1051 struct vm_struct *vm; 1052 unsigned long addr, next = 0; 1053 pmd_t *pmd; 1054 1055 list_for_each_entry(svm, &static_vmlist, list) { 1056 vm = &svm->vm; 1057 addr = (unsigned long)vm->addr; 1058 if (addr < next) 1059 continue; 1060 1061 /* 1062 * Check if this vm starts on an odd section boundary. 1063 * If so and the first section entry for this PMD is free 1064 * then we block the corresponding virtual address. 1065 */ 1066 if ((addr & ~PMD_MASK) == SECTION_SIZE) { 1067 pmd = pmd_off_k(addr); 1068 if (pmd_none(*pmd)) 1069 pmd_empty_section_gap(addr & PMD_MASK); 1070 } 1071 1072 /* 1073 * Then check if this vm ends on an odd section boundary. 1074 * If so and the second section entry for this PMD is empty 1075 * then we block the corresponding virtual address. 1076 */ 1077 addr += vm->size; 1078 if ((addr & ~PMD_MASK) == SECTION_SIZE) { 1079 pmd = pmd_off_k(addr) + 1; 1080 if (pmd_none(*pmd)) 1081 pmd_empty_section_gap(addr); 1082 } 1083 1084 /* no need to look at any vm entry until we hit the next PMD */ 1085 next = (addr + PMD_SIZE - 1) & PMD_MASK; 1086 } 1087 } 1088 1089 #else 1090 #define fill_pmd_gaps() do { } while (0) 1091 #endif 1092 1093 #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H) 1094 static void __init pci_reserve_io(void) 1095 { 1096 struct static_vm *svm; 1097 1098 svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); 1099 if (svm) 1100 return; 1101 1102 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); 1103 } 1104 #else 1105 #define pci_reserve_io() do { } while (0) 1106 #endif 1107 1108 #ifdef CONFIG_DEBUG_LL 1109 void __init debug_ll_io_init(void) 1110 { 1111 struct map_desc map; 1112 1113 debug_ll_addr(&map.pfn, &map.virtual); 1114 if (!map.pfn || !map.virtual) 1115 return; 1116 map.pfn = __phys_to_pfn(map.pfn); 1117 map.virtual &= PAGE_MASK; 1118 map.length = PAGE_SIZE; 1119 map.type = MT_DEVICE; 1120 iotable_init(&map, 1); 1121 } 1122 #endif 1123 1124 static void * __initdata vmalloc_min = 1125 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); 1126 1127 /* 1128 * vmalloc=size forces the vmalloc area to be exactly 'size' 1129 * bytes. This can be used to increase (or decrease) the vmalloc 1130 * area - the default is 240m. 1131 */ 1132 static int __init early_vmalloc(char *arg) 1133 { 1134 unsigned long vmalloc_reserve = memparse(arg, NULL); 1135 1136 if (vmalloc_reserve < SZ_16M) { 1137 vmalloc_reserve = SZ_16M; 1138 pr_warn("vmalloc area too small, limiting to %luMB\n", 1139 vmalloc_reserve >> 20); 1140 } 1141 1142 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { 1143 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); 1144 pr_warn("vmalloc area is too big, limiting to %luMB\n", 1145 vmalloc_reserve >> 20); 1146 } 1147 1148 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); 1149 return 0; 1150 } 1151 early_param("vmalloc", early_vmalloc); 1152 1153 phys_addr_t arm_lowmem_limit __initdata = 0; 1154 1155 void __init adjust_lowmem_bounds(void) 1156 { 1157 phys_addr_t memblock_limit = 0; 1158 u64 vmalloc_limit; 1159 struct memblock_region *reg; 1160 phys_addr_t lowmem_limit = 0; 1161 1162 /* 1163 * Let's use our own (unoptimized) equivalent of __pa() that is 1164 * not affected by wrap-arounds when sizeof(phys_addr_t) == 4. 1165 * The result is used as the upper bound on physical memory address 1166 * and may itself be outside the valid range for which phys_addr_t 1167 * and therefore __pa() is defined. 1168 */ 1169 vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET; 1170 1171 /* 1172 * The first usable region must be PMD aligned. Mark its start 1173 * as MEMBLOCK_NOMAP if it isn't 1174 */ 1175 for_each_memblock(memory, reg) { 1176 if (!memblock_is_nomap(reg)) { 1177 if (!IS_ALIGNED(reg->base, PMD_SIZE)) { 1178 phys_addr_t len; 1179 1180 len = round_up(reg->base, PMD_SIZE) - reg->base; 1181 memblock_mark_nomap(reg->base, len); 1182 } 1183 break; 1184 } 1185 } 1186 1187 for_each_memblock(memory, reg) { 1188 phys_addr_t block_start = reg->base; 1189 phys_addr_t block_end = reg->base + reg->size; 1190 1191 if (memblock_is_nomap(reg)) 1192 continue; 1193 1194 if (reg->base < vmalloc_limit) { 1195 if (block_end > lowmem_limit) 1196 /* 1197 * Compare as u64 to ensure vmalloc_limit does 1198 * not get truncated. block_end should always 1199 * fit in phys_addr_t so there should be no 1200 * issue with assignment. 1201 */ 1202 lowmem_limit = min_t(u64, 1203 vmalloc_limit, 1204 block_end); 1205 1206 /* 1207 * Find the first non-pmd-aligned page, and point 1208 * memblock_limit at it. This relies on rounding the 1209 * limit down to be pmd-aligned, which happens at the 1210 * end of this function. 1211 * 1212 * With this algorithm, the start or end of almost any 1213 * bank can be non-pmd-aligned. The only exception is 1214 * that the start of the bank 0 must be section- 1215 * aligned, since otherwise memory would need to be 1216 * allocated when mapping the start of bank 0, which 1217 * occurs before any free memory is mapped. 1218 */ 1219 if (!memblock_limit) { 1220 if (!IS_ALIGNED(block_start, PMD_SIZE)) 1221 memblock_limit = block_start; 1222 else if (!IS_ALIGNED(block_end, PMD_SIZE)) 1223 memblock_limit = lowmem_limit; 1224 } 1225 1226 } 1227 } 1228 1229 arm_lowmem_limit = lowmem_limit; 1230 1231 high_memory = __va(arm_lowmem_limit - 1) + 1; 1232 1233 if (!memblock_limit) 1234 memblock_limit = arm_lowmem_limit; 1235 1236 /* 1237 * Round the memblock limit down to a pmd size. This 1238 * helps to ensure that we will allocate memory from the 1239 * last full pmd, which should be mapped. 1240 */ 1241 memblock_limit = round_down(memblock_limit, PMD_SIZE); 1242 1243 if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { 1244 if (memblock_end_of_DRAM() > arm_lowmem_limit) { 1245 phys_addr_t end = memblock_end_of_DRAM(); 1246 1247 pr_notice("Ignoring RAM at %pa-%pa\n", 1248 &memblock_limit, &end); 1249 pr_notice("Consider using a HIGHMEM enabled kernel.\n"); 1250 1251 memblock_remove(memblock_limit, end - memblock_limit); 1252 } 1253 } 1254 1255 memblock_set_current_limit(memblock_limit); 1256 } 1257 1258 static inline void prepare_page_table(void) 1259 { 1260 unsigned long addr; 1261 phys_addr_t end; 1262 1263 /* 1264 * Clear out all the mappings below the kernel image. 1265 */ 1266 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE) 1267 pmd_clear(pmd_off_k(addr)); 1268 1269 #ifdef CONFIG_XIP_KERNEL 1270 /* The XIP kernel is mapped in the module area -- skip over it */ 1271 addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK; 1272 #endif 1273 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE) 1274 pmd_clear(pmd_off_k(addr)); 1275 1276 /* 1277 * Find the end of the first block of lowmem. 1278 */ 1279 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size; 1280 if (end >= arm_lowmem_limit) 1281 end = arm_lowmem_limit; 1282 1283 /* 1284 * Clear out all the kernel space mappings, except for the first 1285 * memory bank, up to the vmalloc region. 1286 */ 1287 for (addr = __phys_to_virt(end); 1288 addr < VMALLOC_START; addr += PMD_SIZE) 1289 pmd_clear(pmd_off_k(addr)); 1290 } 1291 1292 #ifdef CONFIG_ARM_LPAE 1293 /* the first page is reserved for pgd */ 1294 #define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \ 1295 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t)) 1296 #else 1297 #define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) 1298 #endif 1299 1300 /* 1301 * Reserve the special regions of memory 1302 */ 1303 void __init arm_mm_memblock_reserve(void) 1304 { 1305 /* 1306 * Reserve the page tables. These are already in use, 1307 * and can only be in node 0. 1308 */ 1309 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE); 1310 1311 #ifdef CONFIG_SA1111 1312 /* 1313 * Because of the SA1111 DMA bug, we want to preserve our 1314 * precious DMA-able memory... 1315 */ 1316 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); 1317 #endif 1318 } 1319 1320 /* 1321 * Set up the device mappings. Since we clear out the page tables for all 1322 * mappings above VMALLOC_START, except early fixmap, we might remove debug 1323 * device mappings. This means earlycon can be used to debug this function 1324 * Any other function or debugging method which may touch any device _will_ 1325 * crash the kernel. 1326 */ 1327 static void __init devicemaps_init(const struct machine_desc *mdesc) 1328 { 1329 struct map_desc map; 1330 unsigned long addr; 1331 void *vectors; 1332 1333 /* 1334 * Allocate the vector page early. 1335 */ 1336 vectors = early_alloc(PAGE_SIZE * 2); 1337 1338 early_trap_init(vectors); 1339 1340 /* 1341 * Clear page table except top pmd used by early fixmaps 1342 */ 1343 for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE) 1344 pmd_clear(pmd_off_k(addr)); 1345 1346 /* 1347 * Map the kernel if it is XIP. 1348 * It is always first in the modulearea. 1349 */ 1350 #ifdef CONFIG_XIP_KERNEL 1351 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); 1352 map.virtual = MODULES_VADDR; 1353 map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK; 1354 map.type = MT_ROM; 1355 create_mapping(&map); 1356 #endif 1357 1358 /* 1359 * Map the cache flushing regions. 1360 */ 1361 #ifdef FLUSH_BASE 1362 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); 1363 map.virtual = FLUSH_BASE; 1364 map.length = SZ_1M; 1365 map.type = MT_CACHECLEAN; 1366 create_mapping(&map); 1367 #endif 1368 #ifdef FLUSH_BASE_MINICACHE 1369 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); 1370 map.virtual = FLUSH_BASE_MINICACHE; 1371 map.length = SZ_1M; 1372 map.type = MT_MINICLEAN; 1373 create_mapping(&map); 1374 #endif 1375 1376 /* 1377 * Create a mapping for the machine vectors at the high-vectors 1378 * location (0xffff0000). If we aren't using high-vectors, also 1379 * create a mapping at the low-vectors virtual address. 1380 */ 1381 map.pfn = __phys_to_pfn(virt_to_phys(vectors)); 1382 map.virtual = 0xffff0000; 1383 map.length = PAGE_SIZE; 1384 #ifdef CONFIG_KUSER_HELPERS 1385 map.type = MT_HIGH_VECTORS; 1386 #else 1387 map.type = MT_LOW_VECTORS; 1388 #endif 1389 create_mapping(&map); 1390 1391 if (!vectors_high()) { 1392 map.virtual = 0; 1393 map.length = PAGE_SIZE * 2; 1394 map.type = MT_LOW_VECTORS; 1395 create_mapping(&map); 1396 } 1397 1398 /* Now create a kernel read-only mapping */ 1399 map.pfn += 1; 1400 map.virtual = 0xffff0000 + PAGE_SIZE; 1401 map.length = PAGE_SIZE; 1402 map.type = MT_LOW_VECTORS; 1403 create_mapping(&map); 1404 1405 /* 1406 * Ask the machine support to map in the statically mapped devices. 1407 */ 1408 if (mdesc->map_io) 1409 mdesc->map_io(); 1410 else 1411 debug_ll_io_init(); 1412 fill_pmd_gaps(); 1413 1414 /* Reserve fixed i/o space in VMALLOC region */ 1415 pci_reserve_io(); 1416 1417 /* 1418 * Finally flush the caches and tlb to ensure that we're in a 1419 * consistent state wrt the writebuffer. This also ensures that 1420 * any write-allocated cache lines in the vector page are written 1421 * back. After this point, we can start to touch devices again. 1422 */ 1423 local_flush_tlb_all(); 1424 flush_cache_all(); 1425 1426 /* Enable asynchronous aborts */ 1427 early_abt_enable(); 1428 } 1429 1430 static void __init kmap_init(void) 1431 { 1432 #ifdef CONFIG_HIGHMEM 1433 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), 1434 PKMAP_BASE, _PAGE_KERNEL_TABLE); 1435 #endif 1436 1437 early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START, 1438 _PAGE_KERNEL_TABLE); 1439 } 1440 1441 static void __init map_lowmem(void) 1442 { 1443 struct memblock_region *reg; 1444 phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE); 1445 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 1446 1447 /* Map all the lowmem memory banks. */ 1448 for_each_memblock(memory, reg) { 1449 phys_addr_t start = reg->base; 1450 phys_addr_t end = start + reg->size; 1451 struct map_desc map; 1452 1453 if (memblock_is_nomap(reg)) 1454 continue; 1455 1456 if (end > arm_lowmem_limit) 1457 end = arm_lowmem_limit; 1458 if (start >= end) 1459 break; 1460 1461 if (end < kernel_x_start) { 1462 map.pfn = __phys_to_pfn(start); 1463 map.virtual = __phys_to_virt(start); 1464 map.length = end - start; 1465 map.type = MT_MEMORY_RWX; 1466 1467 create_mapping(&map); 1468 } else if (start >= kernel_x_end) { 1469 map.pfn = __phys_to_pfn(start); 1470 map.virtual = __phys_to_virt(start); 1471 map.length = end - start; 1472 map.type = MT_MEMORY_RW; 1473 1474 create_mapping(&map); 1475 } else { 1476 /* This better cover the entire kernel */ 1477 if (start < kernel_x_start) { 1478 map.pfn = __phys_to_pfn(start); 1479 map.virtual = __phys_to_virt(start); 1480 map.length = kernel_x_start - start; 1481 map.type = MT_MEMORY_RW; 1482 1483 create_mapping(&map); 1484 } 1485 1486 map.pfn = __phys_to_pfn(kernel_x_start); 1487 map.virtual = __phys_to_virt(kernel_x_start); 1488 map.length = kernel_x_end - kernel_x_start; 1489 map.type = MT_MEMORY_RWX; 1490 1491 create_mapping(&map); 1492 1493 if (kernel_x_end < end) { 1494 map.pfn = __phys_to_pfn(kernel_x_end); 1495 map.virtual = __phys_to_virt(kernel_x_end); 1496 map.length = end - kernel_x_end; 1497 map.type = MT_MEMORY_RW; 1498 1499 create_mapping(&map); 1500 } 1501 } 1502 } 1503 } 1504 1505 #ifdef CONFIG_ARM_PV_FIXUP 1506 extern unsigned long __atags_pointer; 1507 typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata); 1508 pgtables_remap lpae_pgtables_remap_asm; 1509 1510 /* 1511 * early_paging_init() recreates boot time page table setup, allowing machines 1512 * to switch over to a high (>4G) address space on LPAE systems 1513 */ 1514 static void __init early_paging_init(const struct machine_desc *mdesc) 1515 { 1516 pgtables_remap *lpae_pgtables_remap; 1517 unsigned long pa_pgd; 1518 unsigned int cr, ttbcr; 1519 long long offset; 1520 void *boot_data; 1521 1522 if (!mdesc->pv_fixup) 1523 return; 1524 1525 offset = mdesc->pv_fixup(); 1526 if (offset == 0) 1527 return; 1528 1529 /* 1530 * Get the address of the remap function in the 1:1 identity 1531 * mapping setup by the early page table assembly code. We 1532 * must get this prior to the pv update. The following barrier 1533 * ensures that this is complete before we fixup any P:V offsets. 1534 */ 1535 lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm); 1536 pa_pgd = __pa(swapper_pg_dir); 1537 boot_data = __va(__atags_pointer); 1538 barrier(); 1539 1540 pr_info("Switching physical address space to 0x%08llx\n", 1541 (u64)PHYS_OFFSET + offset); 1542 1543 /* Re-set the phys pfn offset, and the pv offset */ 1544 __pv_offset += offset; 1545 __pv_phys_pfn_offset += PFN_DOWN(offset); 1546 1547 /* Run the patch stub to update the constants */ 1548 fixup_pv_table(&__pv_table_begin, 1549 (&__pv_table_end - &__pv_table_begin) << 2); 1550 1551 /* 1552 * We changing not only the virtual to physical mapping, but also 1553 * the physical addresses used to access memory. We need to flush 1554 * all levels of cache in the system with caching disabled to 1555 * ensure that all data is written back, and nothing is prefetched 1556 * into the caches. We also need to prevent the TLB walkers 1557 * allocating into the caches too. Note that this is ARMv7 LPAE 1558 * specific. 1559 */ 1560 cr = get_cr(); 1561 set_cr(cr & ~(CR_I | CR_C)); 1562 asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr)); 1563 asm volatile("mcr p15, 0, %0, c2, c0, 2" 1564 : : "r" (ttbcr & ~(3 << 8 | 3 << 10))); 1565 flush_cache_all(); 1566 1567 /* 1568 * Fixup the page tables - this must be in the idmap region as 1569 * we need to disable the MMU to do this safely, and hence it 1570 * needs to be assembly. It's fairly simple, as we're using the 1571 * temporary tables setup by the initial assembly code. 1572 */ 1573 lpae_pgtables_remap(offset, pa_pgd, boot_data); 1574 1575 /* Re-enable the caches and cacheable TLB walks */ 1576 asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr)); 1577 set_cr(cr); 1578 } 1579 1580 #else 1581 1582 static void __init early_paging_init(const struct machine_desc *mdesc) 1583 { 1584 long long offset; 1585 1586 if (!mdesc->pv_fixup) 1587 return; 1588 1589 offset = mdesc->pv_fixup(); 1590 if (offset == 0) 1591 return; 1592 1593 pr_crit("Physical address space modification is only to support Keystone2.\n"); 1594 pr_crit("Please enable ARM_LPAE and ARM_PATCH_PHYS_VIRT support to use this\n"); 1595 pr_crit("feature. Your kernel may crash now, have a good day.\n"); 1596 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); 1597 } 1598 1599 #endif 1600 1601 static void __init early_fixmap_shutdown(void) 1602 { 1603 int i; 1604 unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1); 1605 1606 pte_offset_fixmap = pte_offset_late_fixmap; 1607 pmd_clear(fixmap_pmd(va)); 1608 local_flush_tlb_kernel_page(va); 1609 1610 for (i = 0; i < __end_of_permanent_fixed_addresses; i++) { 1611 pte_t *pte; 1612 struct map_desc map; 1613 1614 map.virtual = fix_to_virt(i); 1615 pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual); 1616 1617 /* Only i/o device mappings are supported ATM */ 1618 if (pte_none(*pte) || 1619 (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED) 1620 continue; 1621 1622 map.pfn = pte_pfn(*pte); 1623 map.type = MT_DEVICE; 1624 map.length = PAGE_SIZE; 1625 1626 create_mapping(&map); 1627 } 1628 } 1629 1630 /* 1631 * paging_init() sets up the page tables, initialises the zone memory 1632 * maps, and sets up the zero page, bad page and bad page tables. 1633 */ 1634 void __init paging_init(const struct machine_desc *mdesc) 1635 { 1636 void *zero_page; 1637 1638 prepare_page_table(); 1639 map_lowmem(); 1640 memblock_set_current_limit(arm_lowmem_limit); 1641 dma_contiguous_remap(); 1642 early_fixmap_shutdown(); 1643 devicemaps_init(mdesc); 1644 kmap_init(); 1645 tcm_init(); 1646 1647 top_pmd = pmd_off_k(0xffff0000); 1648 1649 /* allocate the zero page. */ 1650 zero_page = early_alloc(PAGE_SIZE); 1651 1652 bootmem_init(); 1653 1654 empty_zero_page = virt_to_page(zero_page); 1655 __flush_dcache_page(NULL, empty_zero_page); 1656 } 1657 1658 void __init early_mm_init(const struct machine_desc *mdesc) 1659 { 1660 build_mem_type_table(); 1661 early_paging_init(mdesc); 1662 } 1663 1664 void set_pte_at(struct mm_struct *mm, unsigned long addr, 1665 pte_t *ptep, pte_t pteval) 1666 { 1667 unsigned long ext = 0; 1668 1669 if (addr < TASK_SIZE && pte_valid_user(pteval)) { 1670 if (!pte_special(pteval)) 1671 __sync_icache_dcache(pteval); 1672 ext |= PTE_EXT_NG; 1673 } 1674 1675 set_pte_ext(ptep, pteval, ext); 1676 } 1677