1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/arm/mm/mmu.c 4 * 5 * Copyright (C) 1995-2005 Russell King 6 */ 7 #include <linux/module.h> 8 #include <linux/kernel.h> 9 #include <linux/errno.h> 10 #include <linux/init.h> 11 #include <linux/mman.h> 12 #include <linux/nodemask.h> 13 #include <linux/memblock.h> 14 #include <linux/fs.h> 15 #include <linux/vmalloc.h> 16 #include <linux/sizes.h> 17 18 #include <asm/cp15.h> 19 #include <asm/cputype.h> 20 #include <asm/cachetype.h> 21 #include <asm/sections.h> 22 #include <asm/setup.h> 23 #include <asm/smp_plat.h> 24 #include <asm/tcm.h> 25 #include <asm/tlb.h> 26 #include <asm/highmem.h> 27 #include <asm/system_info.h> 28 #include <asm/traps.h> 29 #include <asm/procinfo.h> 30 #include <asm/page.h> 31 #include <asm/pgalloc.h> 32 #include <asm/kasan_def.h> 33 34 #include <asm/mach/arch.h> 35 #include <asm/mach/map.h> 36 #include <asm/mach/pci.h> 37 #include <asm/fixmap.h> 38 39 #include "fault.h" 40 #include "mm.h" 41 42 extern unsigned long __atags_pointer; 43 44 /* 45 * The pmd table for the upper-most set of pages. 46 */ 47 pmd_t *top_pmd; 48 49 pmdval_t user_pmd_table = _PAGE_USER_TABLE; 50 51 #define CPOLICY_UNCACHED 0 52 #define CPOLICY_BUFFERED 1 53 #define CPOLICY_WRITETHROUGH 2 54 #define CPOLICY_WRITEBACK 3 55 #define CPOLICY_WRITEALLOC 4 56 57 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; 58 static unsigned int ecc_mask __initdata = 0; 59 pgprot_t pgprot_user; 60 pgprot_t pgprot_kernel; 61 62 EXPORT_SYMBOL(pgprot_user); 63 EXPORT_SYMBOL(pgprot_kernel); 64 65 struct cachepolicy { 66 const char policy[16]; 67 unsigned int cr_mask; 68 pmdval_t pmd; 69 pteval_t pte; 70 }; 71 72 static struct cachepolicy cache_policies[] __initdata = { 73 { 74 .policy = "uncached", 75 .cr_mask = CR_W|CR_C, 76 .pmd = PMD_SECT_UNCACHED, 77 .pte = L_PTE_MT_UNCACHED, 78 }, { 79 .policy = "buffered", 80 .cr_mask = CR_C, 81 .pmd = PMD_SECT_BUFFERED, 82 .pte = L_PTE_MT_BUFFERABLE, 83 }, { 84 .policy = "writethrough", 85 .cr_mask = 0, 86 .pmd = PMD_SECT_WT, 87 .pte = L_PTE_MT_WRITETHROUGH, 88 }, { 89 .policy = "writeback", 90 .cr_mask = 0, 91 .pmd = PMD_SECT_WB, 92 .pte = L_PTE_MT_WRITEBACK, 93 }, { 94 .policy = "writealloc", 95 .cr_mask = 0, 96 .pmd = PMD_SECT_WBWA, 97 .pte = L_PTE_MT_WRITEALLOC, 98 } 99 }; 100 101 #ifdef CONFIG_CPU_CP15 102 static unsigned long initial_pmd_value __initdata = 0; 103 104 /* 105 * Initialise the cache_policy variable with the initial state specified 106 * via the "pmd" value. This is used to ensure that on ARMv6 and later, 107 * the C code sets the page tables up with the same policy as the head 108 * assembly code, which avoids an illegal state where the TLBs can get 109 * confused. See comments in early_cachepolicy() for more information. 110 */ 111 void __init init_default_cache_policy(unsigned long pmd) 112 { 113 int i; 114 115 initial_pmd_value = pmd; 116 117 pmd &= PMD_SECT_CACHE_MASK; 118 119 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) 120 if (cache_policies[i].pmd == pmd) { 121 cachepolicy = i; 122 break; 123 } 124 125 if (i == ARRAY_SIZE(cache_policies)) 126 pr_err("ERROR: could not find cache policy\n"); 127 } 128 129 /* 130 * These are useful for identifying cache coherency problems by allowing 131 * the cache or the cache and writebuffer to be turned off. (Note: the 132 * write buffer should not be on and the cache off). 133 */ 134 static int __init early_cachepolicy(char *p) 135 { 136 int i, selected = -1; 137 138 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { 139 int len = strlen(cache_policies[i].policy); 140 141 if (memcmp(p, cache_policies[i].policy, len) == 0) { 142 selected = i; 143 break; 144 } 145 } 146 147 if (selected == -1) 148 pr_err("ERROR: unknown or unsupported cache policy\n"); 149 150 /* 151 * This restriction is partly to do with the way we boot; it is 152 * unpredictable to have memory mapped using two different sets of 153 * memory attributes (shared, type, and cache attribs). We can not 154 * change these attributes once the initial assembly has setup the 155 * page tables. 156 */ 157 if (cpu_architecture() >= CPU_ARCH_ARMv6 && selected != cachepolicy) { 158 pr_warn("Only cachepolicy=%s supported on ARMv6 and later\n", 159 cache_policies[cachepolicy].policy); 160 return 0; 161 } 162 163 if (selected != cachepolicy) { 164 unsigned long cr = __clear_cr(cache_policies[selected].cr_mask); 165 cachepolicy = selected; 166 flush_cache_all(); 167 set_cr(cr); 168 } 169 return 0; 170 } 171 early_param("cachepolicy", early_cachepolicy); 172 173 static int __init early_nocache(char *__unused) 174 { 175 char *p = "buffered"; 176 pr_warn("nocache is deprecated; use cachepolicy=%s\n", p); 177 early_cachepolicy(p); 178 return 0; 179 } 180 early_param("nocache", early_nocache); 181 182 static int __init early_nowrite(char *__unused) 183 { 184 char *p = "uncached"; 185 pr_warn("nowb is deprecated; use cachepolicy=%s\n", p); 186 early_cachepolicy(p); 187 return 0; 188 } 189 early_param("nowb", early_nowrite); 190 191 #ifndef CONFIG_ARM_LPAE 192 static int __init early_ecc(char *p) 193 { 194 if (memcmp(p, "on", 2) == 0) 195 ecc_mask = PMD_PROTECTION; 196 else if (memcmp(p, "off", 3) == 0) 197 ecc_mask = 0; 198 return 0; 199 } 200 early_param("ecc", early_ecc); 201 #endif 202 203 #else /* ifdef CONFIG_CPU_CP15 */ 204 205 static int __init early_cachepolicy(char *p) 206 { 207 pr_warn("cachepolicy kernel parameter not supported without cp15\n"); 208 return 0; 209 } 210 early_param("cachepolicy", early_cachepolicy); 211 212 static int __init noalign_setup(char *__unused) 213 { 214 pr_warn("noalign kernel parameter not supported without cp15\n"); 215 return 1; 216 } 217 __setup("noalign", noalign_setup); 218 219 #endif /* ifdef CONFIG_CPU_CP15 / else */ 220 221 #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN 222 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE 223 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE 224 225 static struct mem_type mem_types[] __ro_after_init = { 226 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ 227 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | 228 L_PTE_SHARED, 229 .prot_l1 = PMD_TYPE_TABLE, 230 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, 231 .domain = DOMAIN_IO, 232 }, 233 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ 234 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, 235 .prot_l1 = PMD_TYPE_TABLE, 236 .prot_sect = PROT_SECT_DEVICE, 237 .domain = DOMAIN_IO, 238 }, 239 [MT_DEVICE_CACHED] = { /* ioremap_cache */ 240 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED, 241 .prot_l1 = PMD_TYPE_TABLE, 242 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, 243 .domain = DOMAIN_IO, 244 }, 245 [MT_DEVICE_WC] = { /* ioremap_wc */ 246 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, 247 .prot_l1 = PMD_TYPE_TABLE, 248 .prot_sect = PROT_SECT_DEVICE, 249 .domain = DOMAIN_IO, 250 }, 251 [MT_UNCACHED] = { 252 .prot_pte = PROT_PTE_DEVICE, 253 .prot_l1 = PMD_TYPE_TABLE, 254 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 255 .domain = DOMAIN_IO, 256 }, 257 [MT_CACHECLEAN] = { 258 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 259 .domain = DOMAIN_KERNEL, 260 }, 261 #ifndef CONFIG_ARM_LPAE 262 [MT_MINICLEAN] = { 263 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, 264 .domain = DOMAIN_KERNEL, 265 }, 266 #endif 267 [MT_LOW_VECTORS] = { 268 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 269 L_PTE_RDONLY, 270 .prot_l1 = PMD_TYPE_TABLE, 271 .domain = DOMAIN_VECTORS, 272 }, 273 [MT_HIGH_VECTORS] = { 274 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 275 L_PTE_USER | L_PTE_RDONLY, 276 .prot_l1 = PMD_TYPE_TABLE, 277 .domain = DOMAIN_VECTORS, 278 }, 279 [MT_MEMORY_RWX] = { 280 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, 281 .prot_l1 = PMD_TYPE_TABLE, 282 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 283 .domain = DOMAIN_KERNEL, 284 }, 285 [MT_MEMORY_RW] = { 286 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 287 L_PTE_XN, 288 .prot_l1 = PMD_TYPE_TABLE, 289 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 290 .domain = DOMAIN_KERNEL, 291 }, 292 [MT_MEMORY_RO] = { 293 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 294 L_PTE_XN | L_PTE_RDONLY, 295 .prot_l1 = PMD_TYPE_TABLE, 296 #ifdef CONFIG_ARM_LPAE 297 .prot_sect = PMD_TYPE_SECT | L_PMD_SECT_RDONLY | PMD_SECT_AP2, 298 #else 299 .prot_sect = PMD_TYPE_SECT, 300 #endif 301 .domain = DOMAIN_KERNEL, 302 }, 303 [MT_ROM] = { 304 .prot_sect = PMD_TYPE_SECT, 305 .domain = DOMAIN_KERNEL, 306 }, 307 [MT_MEMORY_RWX_NONCACHED] = { 308 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 309 L_PTE_MT_BUFFERABLE, 310 .prot_l1 = PMD_TYPE_TABLE, 311 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 312 .domain = DOMAIN_KERNEL, 313 }, 314 [MT_MEMORY_RW_DTCM] = { 315 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 316 L_PTE_XN, 317 .prot_l1 = PMD_TYPE_TABLE, 318 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 319 .domain = DOMAIN_KERNEL, 320 }, 321 [MT_MEMORY_RWX_ITCM] = { 322 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, 323 .prot_l1 = PMD_TYPE_TABLE, 324 .domain = DOMAIN_KERNEL, 325 }, 326 [MT_MEMORY_RW_SO] = { 327 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 328 L_PTE_MT_UNCACHED | L_PTE_XN, 329 .prot_l1 = PMD_TYPE_TABLE, 330 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S | 331 PMD_SECT_UNCACHED | PMD_SECT_XN, 332 .domain = DOMAIN_KERNEL, 333 }, 334 [MT_MEMORY_DMA_READY] = { 335 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 336 L_PTE_XN, 337 .prot_l1 = PMD_TYPE_TABLE, 338 .domain = DOMAIN_KERNEL, 339 }, 340 }; 341 342 const struct mem_type *get_mem_type(unsigned int type) 343 { 344 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; 345 } 346 EXPORT_SYMBOL(get_mem_type); 347 348 static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr); 349 350 static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS] 351 __aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata; 352 353 static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr) 354 { 355 return &bm_pte[pte_index(addr)]; 356 } 357 358 static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr) 359 { 360 return pte_offset_kernel(dir, addr); 361 } 362 363 static inline pmd_t * __init fixmap_pmd(unsigned long addr) 364 { 365 return pmd_off_k(addr); 366 } 367 368 void __init early_fixmap_init(void) 369 { 370 pmd_t *pmd; 371 372 /* 373 * The early fixmap range spans multiple pmds, for which 374 * we are not prepared: 375 */ 376 BUILD_BUG_ON((__fix_to_virt(__end_of_early_ioremap_region) >> PMD_SHIFT) 377 != FIXADDR_TOP >> PMD_SHIFT); 378 379 pmd = fixmap_pmd(FIXADDR_TOP); 380 pmd_populate_kernel(&init_mm, pmd, bm_pte); 381 382 pte_offset_fixmap = pte_offset_early_fixmap; 383 } 384 385 /* 386 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range(). 387 * As a result, this can only be called with preemption disabled, as under 388 * stop_machine(). 389 */ 390 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) 391 { 392 unsigned long vaddr = __fix_to_virt(idx); 393 pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr); 394 395 /* Make sure fixmap region does not exceed available allocation. */ 396 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) < FIXADDR_START); 397 BUG_ON(idx >= __end_of_fixed_addresses); 398 399 /* We support only device mappings before pgprot_kernel is set. */ 400 if (WARN_ON(pgprot_val(prot) != pgprot_val(FIXMAP_PAGE_IO) && 401 pgprot_val(prot) && pgprot_val(pgprot_kernel) == 0)) 402 return; 403 404 if (pgprot_val(prot)) 405 set_pte_at(NULL, vaddr, pte, 406 pfn_pte(phys >> PAGE_SHIFT, prot)); 407 else 408 pte_clear(NULL, vaddr, pte); 409 local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); 410 } 411 412 static pgprot_t protection_map[16] __ro_after_init = { 413 [VM_NONE] = __PAGE_NONE, 414 [VM_READ] = __PAGE_READONLY, 415 [VM_WRITE] = __PAGE_COPY, 416 [VM_WRITE | VM_READ] = __PAGE_COPY, 417 [VM_EXEC] = __PAGE_READONLY_EXEC, 418 [VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC, 419 [VM_EXEC | VM_WRITE] = __PAGE_COPY_EXEC, 420 [VM_EXEC | VM_WRITE | VM_READ] = __PAGE_COPY_EXEC, 421 [VM_SHARED] = __PAGE_NONE, 422 [VM_SHARED | VM_READ] = __PAGE_READONLY, 423 [VM_SHARED | VM_WRITE] = __PAGE_SHARED, 424 [VM_SHARED | VM_WRITE | VM_READ] = __PAGE_SHARED, 425 [VM_SHARED | VM_EXEC] = __PAGE_READONLY_EXEC, 426 [VM_SHARED | VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC, 427 [VM_SHARED | VM_EXEC | VM_WRITE] = __PAGE_SHARED_EXEC, 428 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __PAGE_SHARED_EXEC 429 }; 430 DECLARE_VM_GET_PAGE_PROT 431 432 /* 433 * Adjust the PMD section entries according to the CPU in use. 434 */ 435 static void __init build_mem_type_table(void) 436 { 437 struct cachepolicy *cp; 438 unsigned int cr = get_cr(); 439 pteval_t user_pgprot, kern_pgprot, vecs_pgprot; 440 int cpu_arch = cpu_architecture(); 441 int i; 442 443 if (cpu_arch < CPU_ARCH_ARMv6) { 444 #if defined(CONFIG_CPU_DCACHE_DISABLE) 445 if (cachepolicy > CPOLICY_BUFFERED) 446 cachepolicy = CPOLICY_BUFFERED; 447 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) 448 if (cachepolicy > CPOLICY_WRITETHROUGH) 449 cachepolicy = CPOLICY_WRITETHROUGH; 450 #endif 451 } 452 if (cpu_arch < CPU_ARCH_ARMv5) { 453 if (cachepolicy >= CPOLICY_WRITEALLOC) 454 cachepolicy = CPOLICY_WRITEBACK; 455 ecc_mask = 0; 456 } 457 458 if (is_smp()) { 459 if (cachepolicy != CPOLICY_WRITEALLOC) { 460 pr_warn("Forcing write-allocate cache policy for SMP\n"); 461 cachepolicy = CPOLICY_WRITEALLOC; 462 } 463 if (!(initial_pmd_value & PMD_SECT_S)) { 464 pr_warn("Forcing shared mappings for SMP\n"); 465 initial_pmd_value |= PMD_SECT_S; 466 } 467 } 468 469 /* 470 * Strip out features not present on earlier architectures. 471 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those 472 * without extended page tables don't have the 'Shared' bit. 473 */ 474 if (cpu_arch < CPU_ARCH_ARMv5) 475 for (i = 0; i < ARRAY_SIZE(mem_types); i++) 476 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7); 477 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3()) 478 for (i = 0; i < ARRAY_SIZE(mem_types); i++) 479 mem_types[i].prot_sect &= ~PMD_SECT_S; 480 481 /* 482 * ARMv5 and lower, bit 4 must be set for page tables (was: cache 483 * "update-able on write" bit on ARM610). However, Xscale and 484 * Xscale3 require this bit to be cleared. 485 */ 486 if (cpu_is_xscale_family()) { 487 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 488 mem_types[i].prot_sect &= ~PMD_BIT4; 489 mem_types[i].prot_l1 &= ~PMD_BIT4; 490 } 491 } else if (cpu_arch < CPU_ARCH_ARMv6) { 492 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 493 if (mem_types[i].prot_l1) 494 mem_types[i].prot_l1 |= PMD_BIT4; 495 if (mem_types[i].prot_sect) 496 mem_types[i].prot_sect |= PMD_BIT4; 497 } 498 } 499 500 /* 501 * Mark the device areas according to the CPU/architecture. 502 */ 503 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) { 504 if (!cpu_is_xsc3()) { 505 /* 506 * Mark device regions on ARMv6+ as execute-never 507 * to prevent speculative instruction fetches. 508 */ 509 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN; 510 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; 511 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; 512 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; 513 514 /* Also setup NX memory mapping */ 515 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN; 516 mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_XN; 517 } 518 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { 519 /* 520 * For ARMv7 with TEX remapping, 521 * - shared device is SXCB=1100 522 * - nonshared device is SXCB=0100 523 * - write combine device mem is SXCB=0001 524 * (Uncached Normal memory) 525 */ 526 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1); 527 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1); 528 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; 529 } else if (cpu_is_xsc3()) { 530 /* 531 * For Xscale3, 532 * - shared device is TEXCB=00101 533 * - nonshared device is TEXCB=01000 534 * - write combine device mem is TEXCB=00100 535 * (Inner/Outer Uncacheable in xsc3 parlance) 536 */ 537 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED; 538 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); 539 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); 540 } else { 541 /* 542 * For ARMv6 and ARMv7 without TEX remapping, 543 * - shared device is TEXCB=00001 544 * - nonshared device is TEXCB=01000 545 * - write combine device mem is TEXCB=00100 546 * (Uncached Normal in ARMv6 parlance). 547 */ 548 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; 549 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); 550 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); 551 } 552 } else { 553 /* 554 * On others, write combining is "Uncached/Buffered" 555 */ 556 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; 557 } 558 559 /* 560 * Now deal with the memory-type mappings 561 */ 562 cp = &cache_policies[cachepolicy]; 563 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 564 565 #ifndef CONFIG_ARM_LPAE 566 /* 567 * We don't use domains on ARMv6 (since this causes problems with 568 * v6/v7 kernels), so we must use a separate memory type for user 569 * r/o, kernel r/w to map the vectors page. 570 */ 571 if (cpu_arch == CPU_ARCH_ARMv6) 572 vecs_pgprot |= L_PTE_MT_VECTORS; 573 574 /* 575 * Check is it with support for the PXN bit 576 * in the Short-descriptor translation table format descriptors. 577 */ 578 if (cpu_arch == CPU_ARCH_ARMv7 && 579 (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) >= 4) { 580 user_pmd_table |= PMD_PXNTABLE; 581 } 582 #endif 583 584 /* 585 * ARMv6 and above have extended page tables. 586 */ 587 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 588 #ifndef CONFIG_ARM_LPAE 589 /* 590 * Mark cache clean areas and XIP ROM read only 591 * from SVC mode and no access from userspace. 592 */ 593 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 594 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 595 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 596 mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 597 #endif 598 599 /* 600 * If the initial page tables were created with the S bit 601 * set, then we need to do the same here for the same 602 * reasons given in early_cachepolicy(). 603 */ 604 if (initial_pmd_value & PMD_SECT_S) { 605 user_pgprot |= L_PTE_SHARED; 606 kern_pgprot |= L_PTE_SHARED; 607 vecs_pgprot |= L_PTE_SHARED; 608 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; 609 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; 610 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 611 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 612 mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S; 613 mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; 614 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; 615 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED; 616 mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_S; 617 mem_types[MT_MEMORY_RO].prot_pte |= L_PTE_SHARED; 618 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; 619 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; 620 mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; 621 } 622 } 623 624 /* 625 * Non-cacheable Normal - intended for memory areas that must 626 * not cause dirty cache line writebacks when used 627 */ 628 if (cpu_arch >= CPU_ARCH_ARMv6) { 629 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { 630 /* Non-cacheable Normal is XCB = 001 */ 631 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= 632 PMD_SECT_BUFFERED; 633 } else { 634 /* For both ARMv6 and non-TEX-remapping ARMv7 */ 635 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= 636 PMD_SECT_TEX(1); 637 } 638 } else { 639 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; 640 } 641 642 #ifdef CONFIG_ARM_LPAE 643 /* 644 * Do not generate access flag faults for the kernel mappings. 645 */ 646 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 647 mem_types[i].prot_pte |= PTE_EXT_AF; 648 if (mem_types[i].prot_sect) 649 mem_types[i].prot_sect |= PMD_SECT_AF; 650 } 651 kern_pgprot |= PTE_EXT_AF; 652 vecs_pgprot |= PTE_EXT_AF; 653 654 /* 655 * Set PXN for user mappings 656 */ 657 user_pgprot |= PTE_EXT_PXN; 658 #endif 659 660 for (i = 0; i < 16; i++) { 661 pteval_t v = pgprot_val(protection_map[i]); 662 protection_map[i] = __pgprot(v | user_pgprot); 663 } 664 665 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; 666 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; 667 668 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 669 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 670 L_PTE_DIRTY | kern_pgprot); 671 672 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 673 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 674 mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; 675 mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; 676 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; 677 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; 678 mem_types[MT_MEMORY_RO].prot_sect |= ecc_mask | cp->pmd; 679 mem_types[MT_MEMORY_RO].prot_pte |= kern_pgprot; 680 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; 681 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; 682 mem_types[MT_ROM].prot_sect |= cp->pmd; 683 684 switch (cp->pmd) { 685 case PMD_SECT_WT: 686 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; 687 break; 688 case PMD_SECT_WB: 689 case PMD_SECT_WBWA: 690 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; 691 break; 692 } 693 pr_info("Memory policy: %sData cache %s\n", 694 ecc_mask ? "ECC enabled, " : "", cp->policy); 695 696 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 697 struct mem_type *t = &mem_types[i]; 698 if (t->prot_l1) 699 t->prot_l1 |= PMD_DOMAIN(t->domain); 700 if (t->prot_sect) 701 t->prot_sect |= PMD_DOMAIN(t->domain); 702 } 703 } 704 705 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 706 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 707 unsigned long size, pgprot_t vma_prot) 708 { 709 if (!pfn_valid(pfn)) 710 return pgprot_noncached(vma_prot); 711 else if (file->f_flags & O_SYNC) 712 return pgprot_writecombine(vma_prot); 713 return vma_prot; 714 } 715 EXPORT_SYMBOL(phys_mem_access_prot); 716 #endif 717 718 #define vectors_base() (vectors_high() ? 0xffff0000 : 0) 719 720 static void __init *early_alloc(unsigned long sz) 721 { 722 return memblock_alloc_or_panic(sz, sz); 723 724 } 725 726 static void *__init late_alloc(unsigned long sz) 727 { 728 void *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & ~__GFP_HIGHMEM, 729 get_order(sz)); 730 731 if (!ptdesc || !pagetable_pte_ctor(NULL, ptdesc)) 732 BUG(); 733 return ptdesc_address(ptdesc); 734 } 735 736 static pte_t * __init arm_pte_alloc(pmd_t *pmd, unsigned long addr, 737 unsigned long prot, 738 void *(*alloc)(unsigned long sz)) 739 { 740 if (pmd_none(*pmd)) { 741 pte_t *pte = alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE); 742 __pmd_populate(pmd, __pa(pte), prot); 743 } 744 BUG_ON(pmd_bad(*pmd)); 745 return pte_offset_kernel(pmd, addr); 746 } 747 748 static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, 749 unsigned long prot) 750 { 751 return arm_pte_alloc(pmd, addr, prot, early_alloc); 752 } 753 754 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, 755 unsigned long end, unsigned long pfn, 756 const struct mem_type *type, 757 void *(*alloc)(unsigned long sz), 758 bool ng) 759 { 760 pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc); 761 do { 762 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 763 ng ? PTE_EXT_NG : 0); 764 pfn++; 765 } while (pte++, addr += PAGE_SIZE, addr != end); 766 } 767 768 static void __init __map_init_section(pmd_t *pmd, unsigned long addr, 769 unsigned long end, phys_addr_t phys, 770 const struct mem_type *type, bool ng) 771 { 772 pmd_t *p = pmd; 773 774 #ifndef CONFIG_ARM_LPAE 775 /* 776 * In classic MMU format, puds and pmds are folded in to 777 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a 778 * group of L1 entries making up one logical pointer to 779 * an L2 table (2MB), where as PMDs refer to the individual 780 * L1 entries (1MB). Hence increment to get the correct 781 * offset for odd 1MB sections. 782 * (See arch/arm/include/asm/pgtable-2level.h) 783 */ 784 if (addr & SECTION_SIZE) 785 pmd++; 786 #endif 787 do { 788 *pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0)); 789 phys += SECTION_SIZE; 790 } while (pmd++, addr += SECTION_SIZE, addr != end); 791 792 flush_pmd_entry(p); 793 } 794 795 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, 796 unsigned long end, phys_addr_t phys, 797 const struct mem_type *type, 798 void *(*alloc)(unsigned long sz), bool ng) 799 { 800 pmd_t *pmd = pmd_offset(pud, addr); 801 unsigned long next; 802 803 do { 804 /* 805 * With LPAE, we must loop over to map 806 * all the pmds for the given range. 807 */ 808 next = pmd_addr_end(addr, end); 809 810 /* 811 * Try a section mapping - addr, next and phys must all be 812 * aligned to a section boundary. 813 */ 814 if (type->prot_sect && 815 ((addr | next | phys) & ~SECTION_MASK) == 0) { 816 __map_init_section(pmd, addr, next, phys, type, ng); 817 } else { 818 alloc_init_pte(pmd, addr, next, 819 __phys_to_pfn(phys), type, alloc, ng); 820 } 821 822 phys += next - addr; 823 824 } while (pmd++, addr = next, addr != end); 825 } 826 827 static void __init alloc_init_pud(p4d_t *p4d, unsigned long addr, 828 unsigned long end, phys_addr_t phys, 829 const struct mem_type *type, 830 void *(*alloc)(unsigned long sz), bool ng) 831 { 832 pud_t *pud = pud_offset(p4d, addr); 833 unsigned long next; 834 835 do { 836 next = pud_addr_end(addr, end); 837 alloc_init_pmd(pud, addr, next, phys, type, alloc, ng); 838 phys += next - addr; 839 } while (pud++, addr = next, addr != end); 840 } 841 842 static void __init alloc_init_p4d(pgd_t *pgd, unsigned long addr, 843 unsigned long end, phys_addr_t phys, 844 const struct mem_type *type, 845 void *(*alloc)(unsigned long sz), bool ng) 846 { 847 p4d_t *p4d = p4d_offset(pgd, addr); 848 unsigned long next; 849 850 do { 851 next = p4d_addr_end(addr, end); 852 alloc_init_pud(p4d, addr, next, phys, type, alloc, ng); 853 phys += next - addr; 854 } while (p4d++, addr = next, addr != end); 855 } 856 857 #ifndef CONFIG_ARM_LPAE 858 static void __init create_36bit_mapping(struct mm_struct *mm, 859 struct map_desc *md, 860 const struct mem_type *type, 861 bool ng) 862 { 863 unsigned long addr, length, end; 864 phys_addr_t phys; 865 pgd_t *pgd; 866 867 addr = md->virtual; 868 phys = __pfn_to_phys(md->pfn); 869 length = PAGE_ALIGN(md->length); 870 871 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { 872 pr_err("MM: CPU does not support supersection mapping for 0x%08llx at 0x%08lx\n", 873 (long long)__pfn_to_phys((u64)md->pfn), addr); 874 return; 875 } 876 877 /* N.B. ARMv6 supersections are only defined to work with domain 0. 878 * Since domain assignments can in fact be arbitrary, the 879 * 'domain == 0' check below is required to insure that ARMv6 880 * supersections are only allocated for domain 0 regardless 881 * of the actual domain assignments in use. 882 */ 883 if (type->domain) { 884 pr_err("MM: invalid domain in supersection mapping for 0x%08llx at 0x%08lx\n", 885 (long long)__pfn_to_phys((u64)md->pfn), addr); 886 return; 887 } 888 889 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { 890 pr_err("MM: cannot create mapping for 0x%08llx at 0x%08lx invalid alignment\n", 891 (long long)__pfn_to_phys((u64)md->pfn), addr); 892 return; 893 } 894 895 /* 896 * Shift bits [35:32] of address into bits [23:20] of PMD 897 * (See ARMv6 spec). 898 */ 899 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); 900 901 pgd = pgd_offset(mm, addr); 902 end = addr + length; 903 do { 904 p4d_t *p4d = p4d_offset(pgd, addr); 905 pud_t *pud = pud_offset(p4d, addr); 906 pmd_t *pmd = pmd_offset(pud, addr); 907 int i; 908 909 for (i = 0; i < 16; i++) 910 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER | 911 (ng ? PMD_SECT_nG : 0)); 912 913 addr += SUPERSECTION_SIZE; 914 phys += SUPERSECTION_SIZE; 915 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; 916 } while (addr != end); 917 } 918 #endif /* !CONFIG_ARM_LPAE */ 919 920 static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md, 921 void *(*alloc)(unsigned long sz), 922 bool ng) 923 { 924 unsigned long addr, length, end; 925 phys_addr_t phys; 926 const struct mem_type *type; 927 pgd_t *pgd; 928 929 type = &mem_types[md->type]; 930 931 #ifndef CONFIG_ARM_LPAE 932 /* 933 * Catch 36-bit addresses 934 */ 935 if (md->pfn >= 0x100000) { 936 create_36bit_mapping(mm, md, type, ng); 937 return; 938 } 939 #endif 940 941 addr = md->virtual & PAGE_MASK; 942 phys = __pfn_to_phys(md->pfn); 943 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 944 945 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { 946 pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n", 947 (long long)__pfn_to_phys(md->pfn), addr); 948 return; 949 } 950 951 pgd = pgd_offset(mm, addr); 952 end = addr + length; 953 do { 954 unsigned long next = pgd_addr_end(addr, end); 955 956 alloc_init_p4d(pgd, addr, next, phys, type, alloc, ng); 957 958 phys += next - addr; 959 addr = next; 960 } while (pgd++, addr != end); 961 } 962 963 /* 964 * Create the page directory entries and any necessary 965 * page tables for the mapping specified by `md'. We 966 * are able to cope here with varying sizes and address 967 * offsets, and we take full advantage of sections and 968 * supersections. 969 */ 970 static void __init create_mapping(struct map_desc *md) 971 { 972 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { 973 pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n", 974 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); 975 return; 976 } 977 978 if (md->type == MT_DEVICE && 979 md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START && 980 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { 981 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n", 982 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); 983 } 984 985 __create_mapping(&init_mm, md, early_alloc, false); 986 } 987 988 void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md, 989 bool ng) 990 { 991 #ifdef CONFIG_ARM_LPAE 992 p4d_t *p4d; 993 pud_t *pud; 994 995 p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual); 996 if (WARN_ON(!p4d)) 997 return; 998 pud = pud_alloc(mm, p4d, md->virtual); 999 if (WARN_ON(!pud)) 1000 return; 1001 pmd_alloc(mm, pud, 0); 1002 #endif 1003 __create_mapping(mm, md, late_alloc, ng); 1004 } 1005 1006 /* 1007 * Create the architecture specific mappings 1008 */ 1009 void __init iotable_init(struct map_desc *io_desc, int nr) 1010 { 1011 struct map_desc *md; 1012 struct vm_struct *vm; 1013 struct static_vm *svm; 1014 1015 if (!nr) 1016 return; 1017 1018 svm = memblock_alloc_or_panic(sizeof(*svm) * nr, __alignof__(*svm)); 1019 1020 for (md = io_desc; nr; md++, nr--) { 1021 create_mapping(md); 1022 1023 vm = &svm->vm; 1024 vm->addr = (void *)(md->virtual & PAGE_MASK); 1025 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 1026 vm->phys_addr = __pfn_to_phys(md->pfn); 1027 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; 1028 vm->flags |= VM_ARM_MTYPE(md->type); 1029 vm->caller = iotable_init; 1030 add_static_vm_early(svm++); 1031 } 1032 } 1033 1034 void __init vm_reserve_area_early(unsigned long addr, unsigned long size, 1035 void *caller) 1036 { 1037 struct vm_struct *vm; 1038 struct static_vm *svm; 1039 1040 svm = memblock_alloc_or_panic(sizeof(*svm), __alignof__(*svm)); 1041 1042 vm = &svm->vm; 1043 vm->addr = (void *)addr; 1044 vm->size = size; 1045 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; 1046 vm->caller = caller; 1047 add_static_vm_early(svm); 1048 } 1049 1050 #ifndef CONFIG_ARM_LPAE 1051 1052 /* 1053 * The Linux PMD is made of two consecutive section entries covering 2MB 1054 * (see definition in include/asm/pgtable-2level.h). However a call to 1055 * create_mapping() may optimize static mappings by using individual 1056 * 1MB section mappings. This leaves the actual PMD potentially half 1057 * initialized if the top or bottom section entry isn't used, leaving it 1058 * open to problems if a subsequent ioremap() or vmalloc() tries to use 1059 * the virtual space left free by that unused section entry. 1060 * 1061 * Let's avoid the issue by inserting dummy vm entries covering the unused 1062 * PMD halves once the static mappings are in place. 1063 */ 1064 1065 static void __init pmd_empty_section_gap(unsigned long addr) 1066 { 1067 vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap); 1068 } 1069 1070 static void __init fill_pmd_gaps(void) 1071 { 1072 struct static_vm *svm; 1073 struct vm_struct *vm; 1074 unsigned long addr, next = 0; 1075 pmd_t *pmd; 1076 1077 list_for_each_entry(svm, &static_vmlist, list) { 1078 vm = &svm->vm; 1079 addr = (unsigned long)vm->addr; 1080 if (addr < next) 1081 continue; 1082 1083 /* 1084 * Check if this vm starts on an odd section boundary. 1085 * If so and the first section entry for this PMD is free 1086 * then we block the corresponding virtual address. 1087 */ 1088 if ((addr & ~PMD_MASK) == SECTION_SIZE) { 1089 pmd = pmd_off_k(addr); 1090 if (pmd_none(*pmd)) 1091 pmd_empty_section_gap(addr & PMD_MASK); 1092 } 1093 1094 /* 1095 * Then check if this vm ends on an odd section boundary. 1096 * If so and the second section entry for this PMD is empty 1097 * then we block the corresponding virtual address. 1098 */ 1099 addr += vm->size; 1100 if ((addr & ~PMD_MASK) == SECTION_SIZE) { 1101 pmd = pmd_off_k(addr) + 1; 1102 if (pmd_none(*pmd)) 1103 pmd_empty_section_gap(addr); 1104 } 1105 1106 /* no need to look at any vm entry until we hit the next PMD */ 1107 next = (addr + PMD_SIZE - 1) & PMD_MASK; 1108 } 1109 } 1110 1111 #else 1112 #define fill_pmd_gaps() do { } while (0) 1113 #endif 1114 1115 #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H) 1116 static void __init pci_reserve_io(void) 1117 { 1118 struct static_vm *svm; 1119 1120 svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); 1121 if (svm) 1122 return; 1123 1124 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); 1125 } 1126 #else 1127 #define pci_reserve_io() do { } while (0) 1128 #endif 1129 1130 #ifdef CONFIG_DEBUG_LL 1131 void __init debug_ll_io_init(void) 1132 { 1133 struct map_desc map; 1134 1135 debug_ll_addr(&map.pfn, &map.virtual); 1136 if (!map.pfn || !map.virtual) 1137 return; 1138 map.pfn = __phys_to_pfn(map.pfn); 1139 map.virtual &= PAGE_MASK; 1140 map.length = PAGE_SIZE; 1141 map.type = MT_DEVICE; 1142 iotable_init(&map, 1); 1143 } 1144 #endif 1145 1146 static unsigned long __initdata vmalloc_size = 240 * SZ_1M; 1147 1148 /* 1149 * vmalloc=size forces the vmalloc area to be exactly 'size' 1150 * bytes. This can be used to increase (or decrease) the vmalloc 1151 * area - the default is 240MiB. 1152 */ 1153 static int __init early_vmalloc(char *arg) 1154 { 1155 unsigned long vmalloc_reserve = memparse(arg, NULL); 1156 unsigned long vmalloc_max; 1157 1158 if (vmalloc_reserve < SZ_16M) { 1159 vmalloc_reserve = SZ_16M; 1160 pr_warn("vmalloc area is too small, limiting to %luMiB\n", 1161 vmalloc_reserve >> 20); 1162 } 1163 1164 vmalloc_max = VMALLOC_END - (PAGE_OFFSET + SZ_32M + VMALLOC_OFFSET); 1165 if (vmalloc_reserve > vmalloc_max) { 1166 vmalloc_reserve = vmalloc_max; 1167 pr_warn("vmalloc area is too big, limiting to %luMiB\n", 1168 vmalloc_reserve >> 20); 1169 } 1170 1171 vmalloc_size = vmalloc_reserve; 1172 return 0; 1173 } 1174 early_param("vmalloc", early_vmalloc); 1175 1176 phys_addr_t arm_lowmem_limit __initdata = 0; 1177 1178 void __init adjust_lowmem_bounds(void) 1179 { 1180 phys_addr_t block_start, block_end, memblock_limit = 0; 1181 u64 vmalloc_limit, i; 1182 phys_addr_t lowmem_limit = 0; 1183 1184 /* 1185 * Let's use our own (unoptimized) equivalent of __pa() that is 1186 * not affected by wrap-arounds when sizeof(phys_addr_t) == 4. 1187 * The result is used as the upper bound on physical memory address 1188 * and may itself be outside the valid range for which phys_addr_t 1189 * and therefore __pa() is defined. 1190 */ 1191 vmalloc_limit = (u64)VMALLOC_END - vmalloc_size - VMALLOC_OFFSET - 1192 PAGE_OFFSET + PHYS_OFFSET; 1193 1194 /* 1195 * The first usable region must be PMD aligned. Mark its start 1196 * as MEMBLOCK_NOMAP if it isn't 1197 */ 1198 for_each_mem_range(i, &block_start, &block_end) { 1199 if (!IS_ALIGNED(block_start, PMD_SIZE)) { 1200 phys_addr_t len; 1201 1202 len = round_up(block_start, PMD_SIZE) - block_start; 1203 memblock_mark_nomap(block_start, len); 1204 } 1205 break; 1206 } 1207 1208 for_each_mem_range(i, &block_start, &block_end) { 1209 if (block_start < vmalloc_limit) { 1210 if (block_end > lowmem_limit) 1211 /* 1212 * Compare as u64 to ensure vmalloc_limit does 1213 * not get truncated. block_end should always 1214 * fit in phys_addr_t so there should be no 1215 * issue with assignment. 1216 */ 1217 lowmem_limit = min_t(u64, 1218 vmalloc_limit, 1219 block_end); 1220 1221 /* 1222 * Find the first non-pmd-aligned page, and point 1223 * memblock_limit at it. This relies on rounding the 1224 * limit down to be pmd-aligned, which happens at the 1225 * end of this function. 1226 * 1227 * With this algorithm, the start or end of almost any 1228 * bank can be non-pmd-aligned. The only exception is 1229 * that the start of the bank 0 must be section- 1230 * aligned, since otherwise memory would need to be 1231 * allocated when mapping the start of bank 0, which 1232 * occurs before any free memory is mapped. 1233 */ 1234 if (!memblock_limit) { 1235 if (!IS_ALIGNED(block_start, PMD_SIZE)) 1236 memblock_limit = block_start; 1237 else if (!IS_ALIGNED(block_end, PMD_SIZE)) 1238 memblock_limit = lowmem_limit; 1239 } 1240 1241 } 1242 } 1243 1244 arm_lowmem_limit = lowmem_limit; 1245 1246 high_memory = __va(arm_lowmem_limit - 1) + 1; 1247 1248 if (!memblock_limit) 1249 memblock_limit = arm_lowmem_limit; 1250 1251 /* 1252 * Round the memblock limit down to a pmd size. This 1253 * helps to ensure that we will allocate memory from the 1254 * last full pmd, which should be mapped. 1255 */ 1256 memblock_limit = round_down(memblock_limit, PMD_SIZE); 1257 1258 if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { 1259 if (memblock_end_of_DRAM() > arm_lowmem_limit) { 1260 phys_addr_t end = memblock_end_of_DRAM(); 1261 1262 pr_notice("Ignoring RAM at %pa-%pa\n", 1263 &memblock_limit, &end); 1264 pr_notice("Consider using a HIGHMEM enabled kernel.\n"); 1265 1266 memblock_remove(memblock_limit, end - memblock_limit); 1267 } 1268 } 1269 1270 memblock_set_current_limit(memblock_limit); 1271 } 1272 1273 static __init void prepare_page_table(void) 1274 { 1275 unsigned long addr; 1276 phys_addr_t end; 1277 1278 /* 1279 * Clear out all the mappings below the kernel image. 1280 */ 1281 #ifdef CONFIG_KASAN 1282 /* 1283 * KASan's shadow memory inserts itself between the TASK_SIZE 1284 * and MODULES_VADDR. Do not clear the KASan shadow memory mappings. 1285 */ 1286 for (addr = 0; addr < KASAN_SHADOW_START; addr += PMD_SIZE) 1287 pmd_clear(pmd_off_k(addr)); 1288 /* 1289 * Skip over the KASan shadow area. KASAN_SHADOW_END is sometimes 1290 * equal to MODULES_VADDR and then we exit the pmd clearing. If we 1291 * are using a thumb-compiled kernel, there there will be 8MB more 1292 * to clear as KASan always offset to 16 MB below MODULES_VADDR. 1293 */ 1294 for (addr = KASAN_SHADOW_END; addr < MODULES_VADDR; addr += PMD_SIZE) 1295 pmd_clear(pmd_off_k(addr)); 1296 #else 1297 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE) 1298 pmd_clear(pmd_off_k(addr)); 1299 #endif 1300 1301 #ifdef CONFIG_XIP_KERNEL 1302 /* The XIP kernel is mapped in the module area -- skip over it */ 1303 addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK; 1304 #endif 1305 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE) 1306 pmd_clear(pmd_off_k(addr)); 1307 1308 /* 1309 * Find the end of the first block of lowmem. 1310 */ 1311 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size; 1312 if (end >= arm_lowmem_limit) 1313 end = arm_lowmem_limit; 1314 1315 /* 1316 * Clear out all the kernel space mappings, except for the first 1317 * memory bank, up to the vmalloc region. 1318 */ 1319 for (addr = __phys_to_virt(end); 1320 addr < VMALLOC_START; addr += PMD_SIZE) 1321 pmd_clear(pmd_off_k(addr)); 1322 } 1323 1324 #ifdef CONFIG_ARM_LPAE 1325 /* the first page is reserved for pgd */ 1326 #define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \ 1327 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t)) 1328 #else 1329 #define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) 1330 #endif 1331 1332 /* 1333 * Reserve the special regions of memory 1334 */ 1335 void __init arm_mm_memblock_reserve(void) 1336 { 1337 /* 1338 * Reserve the page tables. These are already in use, 1339 * and can only be in node 0. 1340 */ 1341 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE); 1342 1343 #ifdef CONFIG_SA1111 1344 /* 1345 * Because of the SA1111 DMA bug, we want to preserve our 1346 * precious DMA-able memory... 1347 */ 1348 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); 1349 #endif 1350 } 1351 1352 /* 1353 * Set up the device mappings. Since we clear out the page tables for all 1354 * mappings above VMALLOC_START, except early fixmap, we might remove debug 1355 * device mappings. This means earlycon can be used to debug this function 1356 * Any other function or debugging method which may touch any device _will_ 1357 * crash the kernel. 1358 */ 1359 static void __init devicemaps_init(const struct machine_desc *mdesc) 1360 { 1361 struct map_desc map; 1362 unsigned long addr; 1363 void *vectors; 1364 1365 /* 1366 * Allocate the vector page early. 1367 */ 1368 vectors = early_alloc(PAGE_SIZE * 2); 1369 1370 early_trap_init(vectors); 1371 1372 /* 1373 * Clear page table except top pmd used by early fixmaps 1374 */ 1375 for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE) 1376 pmd_clear(pmd_off_k(addr)); 1377 1378 if (__atags_pointer) { 1379 /* create a read-only mapping of the device tree */ 1380 map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK); 1381 map.virtual = FDT_FIXED_BASE; 1382 map.length = FDT_FIXED_SIZE; 1383 map.type = MT_MEMORY_RO; 1384 create_mapping(&map); 1385 } 1386 1387 /* 1388 * Map the cache flushing regions. 1389 */ 1390 #ifdef FLUSH_BASE 1391 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); 1392 map.virtual = FLUSH_BASE; 1393 map.length = SZ_1M; 1394 map.type = MT_CACHECLEAN; 1395 create_mapping(&map); 1396 #endif 1397 #ifdef FLUSH_BASE_MINICACHE 1398 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); 1399 map.virtual = FLUSH_BASE_MINICACHE; 1400 map.length = SZ_1M; 1401 map.type = MT_MINICLEAN; 1402 create_mapping(&map); 1403 #endif 1404 1405 /* 1406 * Create a mapping for the machine vectors at the high-vectors 1407 * location (0xffff0000). If we aren't using high-vectors, also 1408 * create a mapping at the low-vectors virtual address. 1409 */ 1410 map.pfn = __phys_to_pfn(virt_to_phys(vectors)); 1411 map.virtual = 0xffff0000; 1412 map.length = PAGE_SIZE; 1413 #ifdef CONFIG_KUSER_HELPERS 1414 map.type = MT_HIGH_VECTORS; 1415 #else 1416 map.type = MT_LOW_VECTORS; 1417 #endif 1418 create_mapping(&map); 1419 1420 if (!vectors_high()) { 1421 map.virtual = 0; 1422 map.length = PAGE_SIZE * 2; 1423 map.type = MT_LOW_VECTORS; 1424 create_mapping(&map); 1425 } 1426 1427 /* Now create a kernel read-only mapping */ 1428 map.pfn += 1; 1429 map.virtual = 0xffff0000 + PAGE_SIZE; 1430 map.length = PAGE_SIZE; 1431 map.type = MT_LOW_VECTORS; 1432 create_mapping(&map); 1433 1434 /* 1435 * Ask the machine support to map in the statically mapped devices. 1436 */ 1437 if (mdesc->map_io) 1438 mdesc->map_io(); 1439 else 1440 debug_ll_io_init(); 1441 fill_pmd_gaps(); 1442 1443 /* Reserve fixed i/o space in VMALLOC region */ 1444 pci_reserve_io(); 1445 1446 /* 1447 * Finally flush the caches and tlb to ensure that we're in a 1448 * consistent state wrt the writebuffer. This also ensures that 1449 * any write-allocated cache lines in the vector page are written 1450 * back. After this point, we can start to touch devices again. 1451 */ 1452 local_flush_tlb_all(); 1453 flush_cache_all(); 1454 1455 /* Enable asynchronous aborts */ 1456 early_abt_enable(); 1457 } 1458 1459 static void __init kmap_init(void) 1460 { 1461 #ifdef CONFIG_HIGHMEM 1462 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), 1463 PKMAP_BASE, _PAGE_KERNEL_TABLE); 1464 #endif 1465 1466 early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START, 1467 _PAGE_KERNEL_TABLE); 1468 } 1469 1470 static void __init map_lowmem(void) 1471 { 1472 phys_addr_t start, end; 1473 u64 i; 1474 1475 /* Map all the lowmem memory banks. */ 1476 for_each_mem_range(i, &start, &end) { 1477 struct map_desc map; 1478 1479 pr_debug("map lowmem start: 0x%08llx, end: 0x%08llx\n", 1480 (long long)start, (long long)end); 1481 if (end > arm_lowmem_limit) 1482 end = arm_lowmem_limit; 1483 if (start >= end) 1484 break; 1485 1486 /* 1487 * If our kernel image is in the VMALLOC area we need to remove 1488 * the kernel physical memory from lowmem since the kernel will 1489 * be mapped separately. 1490 * 1491 * The kernel will typically be at the very start of lowmem, 1492 * but any placement relative to memory ranges is possible. 1493 * 1494 * If the memblock contains the kernel, we have to chisel out 1495 * the kernel memory from it and map each part separately. We 1496 * get 6 different theoretical cases: 1497 * 1498 * +--------+ +--------+ 1499 * +-- start --+ +--------+ | Kernel | | Kernel | 1500 * | | | Kernel | | case 2 | | case 5 | 1501 * | | | case 1 | +--------+ | | +--------+ 1502 * | Memory | +--------+ | | | Kernel | 1503 * | range | +--------+ | | | case 6 | 1504 * | | | Kernel | +--------+ | | +--------+ 1505 * | | | case 3 | | Kernel | | | 1506 * +-- end ----+ +--------+ | case 4 | | | 1507 * +--------+ +--------+ 1508 */ 1509 1510 /* Case 5: kernel covers range, don't map anything, should be rare */ 1511 if ((start > kernel_sec_start) && (end < kernel_sec_end)) 1512 break; 1513 1514 /* Cases where the kernel is starting inside the range */ 1515 if ((kernel_sec_start >= start) && (kernel_sec_start <= end)) { 1516 /* Case 6: kernel is embedded in the range, we need two mappings */ 1517 if ((start < kernel_sec_start) && (end > kernel_sec_end)) { 1518 /* Map memory below the kernel */ 1519 map.pfn = __phys_to_pfn(start); 1520 map.virtual = __phys_to_virt(start); 1521 map.length = kernel_sec_start - start; 1522 map.type = MT_MEMORY_RW; 1523 create_mapping(&map); 1524 /* Map memory above the kernel */ 1525 map.pfn = __phys_to_pfn(kernel_sec_end); 1526 map.virtual = __phys_to_virt(kernel_sec_end); 1527 map.length = end - kernel_sec_end; 1528 map.type = MT_MEMORY_RW; 1529 create_mapping(&map); 1530 break; 1531 } 1532 /* Case 1: kernel and range start at the same address, should be common */ 1533 if (kernel_sec_start == start) 1534 start = kernel_sec_end; 1535 /* Case 3: kernel and range end at the same address, should be rare */ 1536 if (kernel_sec_end == end) 1537 end = kernel_sec_start; 1538 } else if ((kernel_sec_start < start) && (kernel_sec_end > start) && (kernel_sec_end < end)) { 1539 /* Case 2: kernel ends inside range, starts below it */ 1540 start = kernel_sec_end; 1541 } else if ((kernel_sec_start > start) && (kernel_sec_start < end) && (kernel_sec_end > end)) { 1542 /* Case 4: kernel starts inside range, ends above it */ 1543 end = kernel_sec_start; 1544 } 1545 map.pfn = __phys_to_pfn(start); 1546 map.virtual = __phys_to_virt(start); 1547 map.length = end - start; 1548 map.type = MT_MEMORY_RW; 1549 create_mapping(&map); 1550 } 1551 } 1552 1553 static void __init map_kernel(void) 1554 { 1555 /* 1556 * We use the well known kernel section start and end and split the area in the 1557 * middle like this: 1558 * . . 1559 * | RW memory | 1560 * +----------------+ kernel_x_start 1561 * | Executable | 1562 * | kernel memory | 1563 * +----------------+ kernel_x_end / kernel_nx_start 1564 * | Non-executable | 1565 * | kernel memory | 1566 * +----------------+ kernel_nx_end 1567 * | RW memory | 1568 * . . 1569 * 1570 * Notice that we are dealing with section sized mappings here so all of this 1571 * will be bumped to the closest section boundary. This means that some of the 1572 * non-executable part of the kernel memory is actually mapped as executable. 1573 * This will only persist until we turn on proper memory management later on 1574 * and we remap the whole kernel with page granularity. 1575 */ 1576 #ifdef CONFIG_XIP_KERNEL 1577 phys_addr_t kernel_nx_start = kernel_sec_start; 1578 #else 1579 phys_addr_t kernel_x_start = kernel_sec_start; 1580 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 1581 phys_addr_t kernel_nx_start = kernel_x_end; 1582 #endif 1583 phys_addr_t kernel_nx_end = kernel_sec_end; 1584 struct map_desc map; 1585 1586 /* 1587 * Map the kernel if it is XIP. 1588 * It is always first in the modulearea. 1589 */ 1590 #ifdef CONFIG_XIP_KERNEL 1591 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); 1592 map.virtual = MODULES_VADDR; 1593 map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK; 1594 map.type = MT_ROM; 1595 create_mapping(&map); 1596 #else 1597 map.pfn = __phys_to_pfn(kernel_x_start); 1598 map.virtual = __phys_to_virt(kernel_x_start); 1599 map.length = kernel_x_end - kernel_x_start; 1600 map.type = MT_MEMORY_RWX; 1601 create_mapping(&map); 1602 1603 /* If the nx part is small it may end up covered by the tail of the RWX section */ 1604 if (kernel_x_end == kernel_nx_end) 1605 return; 1606 #endif 1607 map.pfn = __phys_to_pfn(kernel_nx_start); 1608 map.virtual = __phys_to_virt(kernel_nx_start); 1609 map.length = kernel_nx_end - kernel_nx_start; 1610 map.type = MT_MEMORY_RW; 1611 create_mapping(&map); 1612 } 1613 1614 #ifdef CONFIG_ARM_PV_FIXUP 1615 typedef void pgtables_remap(long long offset, unsigned long pgd); 1616 pgtables_remap lpae_pgtables_remap_asm; 1617 1618 /* 1619 * early_paging_init() recreates boot time page table setup, allowing machines 1620 * to switch over to a high (>4G) address space on LPAE systems 1621 */ 1622 static void __init early_paging_init(const struct machine_desc *mdesc) 1623 { 1624 pgtables_remap *lpae_pgtables_remap; 1625 unsigned long pa_pgd; 1626 u32 cr, ttbcr, tmp; 1627 long long offset; 1628 1629 if (!mdesc->pv_fixup) 1630 return; 1631 1632 offset = mdesc->pv_fixup(); 1633 if (offset == 0) 1634 return; 1635 1636 /* 1637 * Offset the kernel section physical offsets so that the kernel 1638 * mapping will work out later on. 1639 */ 1640 kernel_sec_start += offset; 1641 kernel_sec_end += offset; 1642 1643 /* 1644 * Get the address of the remap function in the 1:1 identity 1645 * mapping setup by the early page table assembly code. We 1646 * must get this prior to the pv update. The following barrier 1647 * ensures that this is complete before we fixup any P:V offsets. 1648 */ 1649 lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm); 1650 pa_pgd = __pa(swapper_pg_dir); 1651 barrier(); 1652 1653 pr_info("Switching physical address space to 0x%08llx\n", 1654 (u64)PHYS_OFFSET + offset); 1655 1656 /* Re-set the phys pfn offset, and the pv offset */ 1657 __pv_offset += offset; 1658 __pv_phys_pfn_offset += PFN_DOWN(offset); 1659 1660 /* Run the patch stub to update the constants */ 1661 fixup_pv_table(&__pv_table_begin, 1662 (&__pv_table_end - &__pv_table_begin) << 2); 1663 1664 /* 1665 * We changing not only the virtual to physical mapping, but also 1666 * the physical addresses used to access memory. We need to flush 1667 * all levels of cache in the system with caching disabled to 1668 * ensure that all data is written back, and nothing is prefetched 1669 * into the caches. We also need to prevent the TLB walkers 1670 * allocating into the caches too. Note that this is ARMv7 LPAE 1671 * specific. 1672 */ 1673 cr = get_cr(); 1674 set_cr(cr & ~(CR_I | CR_C)); 1675 ttbcr = cpu_get_ttbcr(); 1676 /* Disable all kind of caching of the translation table */ 1677 tmp = ttbcr & ~(TTBCR_ORGN0_MASK | TTBCR_IRGN0_MASK); 1678 cpu_set_ttbcr(tmp); 1679 flush_cache_all(); 1680 1681 /* 1682 * Fixup the page tables - this must be in the idmap region as 1683 * we need to disable the MMU to do this safely, and hence it 1684 * needs to be assembly. It's fairly simple, as we're using the 1685 * temporary tables setup by the initial assembly code. 1686 */ 1687 lpae_pgtables_remap(offset, pa_pgd); 1688 1689 /* Re-enable the caches and cacheable TLB walks */ 1690 cpu_set_ttbcr(ttbcr); 1691 set_cr(cr); 1692 } 1693 1694 #else 1695 1696 static void __init early_paging_init(const struct machine_desc *mdesc) 1697 { 1698 long long offset; 1699 1700 if (!mdesc->pv_fixup) 1701 return; 1702 1703 offset = mdesc->pv_fixup(); 1704 if (offset == 0) 1705 return; 1706 1707 pr_crit("Physical address space modification is only to support Keystone2.\n"); 1708 pr_crit("Please enable ARM_LPAE and ARM_PATCH_PHYS_VIRT support to use this\n"); 1709 pr_crit("feature. Your kernel may crash now, have a good day.\n"); 1710 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); 1711 } 1712 1713 #endif 1714 1715 static void __init early_fixmap_shutdown(void) 1716 { 1717 int i; 1718 unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1); 1719 1720 pte_offset_fixmap = pte_offset_late_fixmap; 1721 pmd_clear(fixmap_pmd(va)); 1722 local_flush_tlb_kernel_page(va); 1723 1724 for (i = 0; i < __end_of_permanent_fixed_addresses; i++) { 1725 pte_t *pte; 1726 struct map_desc map; 1727 1728 map.virtual = fix_to_virt(i); 1729 pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual); 1730 1731 /* Only i/o device mappings are supported ATM */ 1732 if (pte_none(*pte) || 1733 (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED) 1734 continue; 1735 1736 map.pfn = pte_pfn(*pte); 1737 map.type = MT_DEVICE; 1738 map.length = PAGE_SIZE; 1739 1740 create_mapping(&map); 1741 } 1742 } 1743 1744 /* 1745 * paging_init() sets up the page tables, initialises the zone memory 1746 * maps, and sets up the zero page, bad page and bad page tables. 1747 */ 1748 void __init paging_init(const struct machine_desc *mdesc) 1749 { 1750 #ifdef CONFIG_XIP_KERNEL 1751 /* Store the kernel RW RAM region start/end in these variables */ 1752 kernel_sec_start = CONFIG_PHYS_OFFSET & SECTION_MASK; 1753 kernel_sec_end = round_up(__pa(_end), SECTION_SIZE); 1754 #endif 1755 pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n", 1756 kernel_sec_start, kernel_sec_end); 1757 1758 prepare_page_table(); 1759 map_lowmem(); 1760 memblock_set_current_limit(arm_lowmem_limit); 1761 pr_debug("lowmem limit is %08llx\n", (long long)arm_lowmem_limit); 1762 /* 1763 * After this point early_alloc(), i.e. the memblock allocator, can 1764 * be used 1765 */ 1766 map_kernel(); 1767 dma_contiguous_remap(); 1768 early_fixmap_shutdown(); 1769 devicemaps_init(mdesc); 1770 kmap_init(); 1771 tcm_init(); 1772 1773 top_pmd = pmd_off_k(0xffff0000); 1774 1775 bootmem_init(); 1776 } 1777 1778 void __init early_mm_init(const struct machine_desc *mdesc) 1779 { 1780 build_mem_type_table(); 1781 early_paging_init(mdesc); 1782 } 1783 1784 void set_ptes(struct mm_struct *mm, unsigned long addr, 1785 pte_t *ptep, pte_t pteval, unsigned int nr) 1786 { 1787 unsigned long ext = 0; 1788 1789 if (addr < TASK_SIZE && pte_valid_user(pteval)) { 1790 if (!pte_special(pteval)) 1791 __sync_icache_dcache(pteval); 1792 ext |= PTE_EXT_NG; 1793 } 1794 1795 for (;;) { 1796 set_pte_ext(ptep, pteval, ext); 1797 if (--nr == 0) 1798 break; 1799 ptep++; 1800 pteval = pte_next_pfn(pteval); 1801 } 1802 } 1803