1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Copyright (C) 1995 Linus Torvalds 5 * 6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 7 */ 8 9 #include <linux/signal.h> 10 #include <linux/sched.h> 11 #include <linux/kernel.h> 12 #include <linux/errno.h> 13 #include <linux/string.h> 14 #include <linux/types.h> 15 #include <linux/ptrace.h> 16 #include <linux/mman.h> 17 #include <linux/mm.h> 18 #include <linux/hugetlb.h> 19 #include <linux/swap.h> 20 #include <linux/smp.h> 21 #include <linux/init.h> 22 #include <linux/highmem.h> 23 #include <linux/pagemap.h> 24 #include <linux/pci.h> 25 #include <linux/pfn.h> 26 #include <linux/poison.h> 27 #include <linux/memblock.h> 28 #include <linux/proc_fs.h> 29 #include <linux/memory_hotplug.h> 30 #include <linux/initrd.h> 31 #include <linux/cpumask.h> 32 #include <linux/gfp.h> 33 #include <linux/execmem.h> 34 35 #include <asm/asm.h> 36 #include <asm/bios_ebda.h> 37 #include <asm/processor.h> 38 #include <linux/uaccess.h> 39 #include <asm/dma.h> 40 #include <asm/fixmap.h> 41 #include <asm/e820/api.h> 42 #include <asm/apic.h> 43 #include <asm/bugs.h> 44 #include <asm/tlb.h> 45 #include <asm/tlbflush.h> 46 #include <asm/olpc_ofw.h> 47 #include <asm/pgalloc.h> 48 #include <asm/sections.h> 49 #include <asm/setup.h> 50 #include <asm/set_memory.h> 51 #include <asm/page_types.h> 52 #include <asm/cpu_entry_area.h> 53 #include <asm/init.h> 54 #include <asm/pgtable_areas.h> 55 #include <asm/numa.h> 56 57 #include "mm_internal.h" 58 59 unsigned long highstart_pfn, highend_pfn; 60 61 bool __read_mostly __vmalloc_start_set = false; 62 63 /* 64 * Creates a middle page table and puts a pointer to it in the 65 * given global directory entry. This only returns the gd entry 66 * in non-PAE compilation mode, since the middle layer is folded. 67 */ 68 static pmd_t * __init one_md_table_init(pgd_t *pgd) 69 { 70 p4d_t *p4d; 71 pud_t *pud; 72 pmd_t *pmd_table; 73 74 #ifdef CONFIG_X86_PAE 75 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { 76 pmd_table = (pmd_t *)alloc_low_page(); 77 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); 78 p4d = p4d_offset(pgd, 0); 79 pud = pud_offset(p4d, 0); 80 BUG_ON(pmd_table != pmd_offset(pud, 0)); 81 82 return pmd_table; 83 } 84 #endif 85 p4d = p4d_offset(pgd, 0); 86 pud = pud_offset(p4d, 0); 87 pmd_table = pmd_offset(pud, 0); 88 89 return pmd_table; 90 } 91 92 /* 93 * Create a page table and place a pointer to it in a middle page 94 * directory entry: 95 */ 96 static pte_t * __init one_page_table_init(pmd_t *pmd) 97 { 98 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { 99 pte_t *page_table = (pte_t *)alloc_low_page(); 100 101 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); 102 BUG_ON(page_table != pte_offset_kernel(pmd, 0)); 103 } 104 105 return pte_offset_kernel(pmd, 0); 106 } 107 108 pmd_t * __init populate_extra_pmd(unsigned long vaddr) 109 { 110 int pgd_idx = pgd_index(vaddr); 111 int pmd_idx = pmd_index(vaddr); 112 113 return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx; 114 } 115 116 pte_t * __init populate_extra_pte(unsigned long vaddr) 117 { 118 int pte_idx = pte_index(vaddr); 119 pmd_t *pmd; 120 121 pmd = populate_extra_pmd(vaddr); 122 return one_page_table_init(pmd) + pte_idx; 123 } 124 125 static unsigned long __init 126 page_table_range_init_count(unsigned long start, unsigned long end) 127 { 128 unsigned long count = 0; 129 #ifdef CONFIG_HIGHMEM 130 int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; 131 int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; 132 int pgd_idx, pmd_idx; 133 unsigned long vaddr; 134 135 if (pmd_idx_kmap_begin == pmd_idx_kmap_end) 136 return 0; 137 138 vaddr = start; 139 pgd_idx = pgd_index(vaddr); 140 pmd_idx = pmd_index(vaddr); 141 142 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) { 143 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); 144 pmd_idx++) { 145 if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin && 146 (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) 147 count++; 148 vaddr += PMD_SIZE; 149 } 150 pmd_idx = 0; 151 } 152 #endif 153 return count; 154 } 155 156 static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, 157 unsigned long vaddr, pte_t *lastpte, 158 void **adr) 159 { 160 #ifdef CONFIG_HIGHMEM 161 /* 162 * Something (early fixmap) may already have put a pte 163 * page here, which causes the page table allocation 164 * to become nonlinear. Attempt to fix it, and if it 165 * is still nonlinear then we have to bug. 166 */ 167 int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; 168 int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; 169 170 if (pmd_idx_kmap_begin != pmd_idx_kmap_end 171 && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin 172 && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) { 173 pte_t *newpte; 174 int i; 175 176 BUG_ON(after_bootmem); 177 newpte = *adr; 178 for (i = 0; i < PTRS_PER_PTE; i++) 179 set_pte(newpte + i, pte[i]); 180 *adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE); 181 182 set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); 183 BUG_ON(newpte != pte_offset_kernel(pmd, 0)); 184 __flush_tlb_all(); 185 186 pte = newpte; 187 } 188 BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) 189 && vaddr > fix_to_virt(FIX_KMAP_END) 190 && lastpte && lastpte + PTRS_PER_PTE != pte); 191 #endif 192 return pte; 193 } 194 195 /* 196 * This function initializes a certain range of kernel virtual memory 197 * with new bootmem page tables, everywhere page tables are missing in 198 * the given range. 199 * 200 * NOTE: The pagetables are allocated contiguous on the physical space 201 * so we can cache the place of the first one and move around without 202 * checking the pgd every time. 203 */ 204 static void __init 205 page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) 206 { 207 int pgd_idx, pmd_idx; 208 unsigned long vaddr; 209 pgd_t *pgd; 210 pmd_t *pmd; 211 pte_t *pte = NULL; 212 unsigned long count = page_table_range_init_count(start, end); 213 void *adr = NULL; 214 215 if (count) 216 adr = alloc_low_pages(count); 217 218 vaddr = start; 219 pgd_idx = pgd_index(vaddr); 220 pmd_idx = pmd_index(vaddr); 221 pgd = pgd_base + pgd_idx; 222 223 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { 224 pmd = one_md_table_init(pgd); 225 pmd = pmd + pmd_index(vaddr); 226 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); 227 pmd++, pmd_idx++) { 228 pte = page_table_kmap_check(one_page_table_init(pmd), 229 pmd, vaddr, pte, &adr); 230 231 vaddr += PMD_SIZE; 232 } 233 pmd_idx = 0; 234 } 235 } 236 237 static inline int is_x86_32_kernel_text(unsigned long addr) 238 { 239 if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) 240 return 1; 241 return 0; 242 } 243 244 /* 245 * This maps the physical memory to kernel virtual address space, a total 246 * of max_low_pfn pages, by creating page tables starting from address 247 * PAGE_OFFSET: 248 */ 249 unsigned long __init 250 kernel_physical_mapping_init(unsigned long start, 251 unsigned long end, 252 unsigned long page_size_mask, 253 pgprot_t prot) 254 { 255 int use_pse = page_size_mask == (1<<PG_LEVEL_2M); 256 unsigned long last_map_addr = end; 257 unsigned long start_pfn, end_pfn; 258 pgd_t *pgd_base = swapper_pg_dir; 259 int pgd_idx, pmd_idx, pte_ofs; 260 unsigned long pfn; 261 pgd_t *pgd; 262 pmd_t *pmd; 263 pte_t *pte; 264 unsigned pages_2m, pages_4k; 265 int mapping_iter; 266 267 start_pfn = start >> PAGE_SHIFT; 268 end_pfn = end >> PAGE_SHIFT; 269 270 /* 271 * First iteration will setup identity mapping using large/small pages 272 * based on use_pse, with other attributes same as set by 273 * the early code in head_32.S 274 * 275 * Second iteration will setup the appropriate attributes (NX, GLOBAL..) 276 * as desired for the kernel identity mapping. 277 * 278 * This two pass mechanism conforms to the TLB app note which says: 279 * 280 * "Software should not write to a paging-structure entry in a way 281 * that would change, for any linear address, both the page size 282 * and either the page frame or attributes." 283 */ 284 mapping_iter = 1; 285 286 if (!boot_cpu_has(X86_FEATURE_PSE)) 287 use_pse = 0; 288 289 repeat: 290 pages_2m = pages_4k = 0; 291 pfn = start_pfn; 292 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); 293 pgd = pgd_base + pgd_idx; 294 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { 295 pmd = one_md_table_init(pgd); 296 297 if (pfn >= end_pfn) 298 continue; 299 #ifdef CONFIG_X86_PAE 300 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); 301 pmd += pmd_idx; 302 #else 303 pmd_idx = 0; 304 #endif 305 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; 306 pmd++, pmd_idx++) { 307 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; 308 309 /* 310 * Map with big pages if possible, otherwise 311 * create normal page tables: 312 */ 313 if (use_pse) { 314 unsigned int addr2; 315 pgprot_t prot = PAGE_KERNEL_LARGE; 316 /* 317 * first pass will use the same initial 318 * identity mapping attribute + _PAGE_PSE. 319 */ 320 pgprot_t init_prot = 321 __pgprot(PTE_IDENT_ATTR | 322 _PAGE_PSE); 323 324 pfn &= PMD_MASK >> PAGE_SHIFT; 325 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + 326 PAGE_OFFSET + PAGE_SIZE-1; 327 328 if (is_x86_32_kernel_text(addr) || 329 is_x86_32_kernel_text(addr2)) 330 prot = PAGE_KERNEL_LARGE_EXEC; 331 332 pages_2m++; 333 if (mapping_iter == 1) 334 set_pmd(pmd, pfn_pmd(pfn, init_prot)); 335 else 336 set_pmd(pmd, pfn_pmd(pfn, prot)); 337 338 pfn += PTRS_PER_PTE; 339 continue; 340 } 341 pte = one_page_table_init(pmd); 342 343 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); 344 pte += pte_ofs; 345 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; 346 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { 347 pgprot_t prot = PAGE_KERNEL; 348 /* 349 * first pass will use the same initial 350 * identity mapping attribute. 351 */ 352 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); 353 354 if (is_x86_32_kernel_text(addr)) 355 prot = PAGE_KERNEL_EXEC; 356 357 pages_4k++; 358 if (mapping_iter == 1) { 359 set_pte(pte, pfn_pte(pfn, init_prot)); 360 last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE; 361 } else 362 set_pte(pte, pfn_pte(pfn, prot)); 363 } 364 } 365 } 366 if (mapping_iter == 1) { 367 /* 368 * update direct mapping page count only in the first 369 * iteration. 370 */ 371 update_page_count(PG_LEVEL_2M, pages_2m); 372 update_page_count(PG_LEVEL_4K, pages_4k); 373 374 /* 375 * local global flush tlb, which will flush the previous 376 * mappings present in both small and large page TLB's. 377 */ 378 __flush_tlb_all(); 379 380 /* 381 * Second iteration will set the actual desired PTE attributes. 382 */ 383 mapping_iter = 2; 384 goto repeat; 385 } 386 return last_map_addr; 387 } 388 389 #ifdef CONFIG_HIGHMEM 390 static void __init permanent_kmaps_init(pgd_t *pgd_base) 391 { 392 unsigned long vaddr = PKMAP_BASE; 393 394 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 395 396 pkmap_page_table = virt_to_kpte(vaddr); 397 } 398 #else 399 static inline void permanent_kmaps_init(pgd_t *pgd_base) 400 { 401 } 402 #endif /* CONFIG_HIGHMEM */ 403 404 void __init sync_initial_page_table(void) 405 { 406 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, 407 swapper_pg_dir + KERNEL_PGD_BOUNDARY, 408 KERNEL_PGD_PTRS); 409 410 /* 411 * sync back low identity map too. It is used for example 412 * in the 32-bit EFI stub. 413 */ 414 clone_pgd_range(initial_page_table, 415 swapper_pg_dir + KERNEL_PGD_BOUNDARY, 416 min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); 417 } 418 419 void __init native_pagetable_init(void) 420 { 421 unsigned long pfn, va; 422 pgd_t *pgd, *base = swapper_pg_dir; 423 p4d_t *p4d; 424 pud_t *pud; 425 pmd_t *pmd; 426 pte_t *pte; 427 428 /* 429 * Remove any mappings which extend past the end of physical 430 * memory from the boot time page table. 431 * In virtual address space, we should have at least two pages 432 * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END 433 * definition. And max_low_pfn is set to VMALLOC_END physical 434 * address. If initial memory mapping is doing right job, we 435 * should have pte used near max_low_pfn or one pmd is not present. 436 */ 437 for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) { 438 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); 439 pgd = base + pgd_index(va); 440 if (!pgd_present(*pgd)) 441 break; 442 443 p4d = p4d_offset(pgd, va); 444 pud = pud_offset(p4d, va); 445 pmd = pmd_offset(pud, va); 446 if (!pmd_present(*pmd)) 447 break; 448 449 /* should not be large page here */ 450 if (pmd_leaf(*pmd)) { 451 pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n", 452 pfn, pmd, __pa(pmd)); 453 BUG_ON(1); 454 } 455 456 pte = pte_offset_kernel(pmd, va); 457 if (!pte_present(*pte)) 458 break; 459 460 printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n", 461 pfn, pmd, __pa(pmd), pte, __pa(pte)); 462 pte_clear(NULL, va, pte); 463 } 464 paging_init(); 465 } 466 467 /* 468 * Build a proper pagetable for the kernel mappings. Up until this 469 * point, we've been running on some set of pagetables constructed by 470 * the boot process. 471 * 472 * This will be a pagetable constructed in arch/x86/kernel/head_32.S. 473 * The root of the pagetable will be swapper_pg_dir. 474 * 475 * In general, pagetable_init() assumes that the pagetable may already 476 * be partially populated, and so it avoids stomping on any existing 477 * mappings. 478 */ 479 void __init early_ioremap_page_table_range_init(void) 480 { 481 pgd_t *pgd_base = swapper_pg_dir; 482 unsigned long vaddr, end; 483 484 /* 485 * Fixed mappings, only the page table structure has to be 486 * created - mappings will be set by set_fixmap(): 487 */ 488 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 489 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; 490 page_table_range_init(vaddr, end, pgd_base); 491 early_ioremap_reset(); 492 } 493 494 static void __init pagetable_init(void) 495 { 496 pgd_t *pgd_base = swapper_pg_dir; 497 498 permanent_kmaps_init(pgd_base); 499 } 500 501 #define DEFAULT_PTE_MASK ~(_PAGE_NX | _PAGE_GLOBAL) 502 /* Bits supported by the hardware: */ 503 pteval_t __supported_pte_mask __read_mostly = DEFAULT_PTE_MASK; 504 /* Bits allowed in normal kernel mappings: */ 505 pteval_t __default_kernel_pte_mask __read_mostly = DEFAULT_PTE_MASK; 506 EXPORT_SYMBOL_GPL(__supported_pte_mask); 507 /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */ 508 EXPORT_SYMBOL(__default_kernel_pte_mask); 509 510 /* user-defined highmem size */ 511 static unsigned int highmem_pages = -1; 512 513 /* 514 * highmem=size forces highmem to be exactly 'size' bytes. 515 * This works even on boxes that have no highmem otherwise. 516 * This also works to reduce highmem size on bigger boxes. 517 */ 518 static int __init parse_highmem(char *arg) 519 { 520 if (!arg) 521 return -EINVAL; 522 523 highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT; 524 return 0; 525 } 526 early_param("highmem", parse_highmem); 527 528 #define MSG_HIGHMEM_TOO_BIG \ 529 "highmem size (%luMB) is bigger than pages available (%luMB)!\n" 530 531 #define MSG_LOWMEM_TOO_SMALL \ 532 "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n" 533 /* 534 * All of RAM fits into lowmem - but if user wants highmem 535 * artificially via the highmem=x boot parameter then create 536 * it: 537 */ 538 static void __init lowmem_pfn_init(void) 539 { 540 /* max_low_pfn is 0, we already have early_res support */ 541 max_low_pfn = max_pfn; 542 543 if (highmem_pages == -1) 544 highmem_pages = 0; 545 #ifdef CONFIG_HIGHMEM 546 if (highmem_pages >= max_pfn) { 547 printk(KERN_ERR MSG_HIGHMEM_TOO_BIG, 548 pages_to_mb(highmem_pages), pages_to_mb(max_pfn)); 549 highmem_pages = 0; 550 } 551 if (highmem_pages) { 552 if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) { 553 printk(KERN_ERR MSG_LOWMEM_TOO_SMALL, 554 pages_to_mb(highmem_pages)); 555 highmem_pages = 0; 556 } 557 max_low_pfn -= highmem_pages; 558 } 559 #else 560 if (highmem_pages) 561 printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n"); 562 #endif 563 } 564 565 #define MSG_HIGHMEM_TOO_SMALL \ 566 "only %luMB highmem pages available, ignoring highmem size of %luMB!\n" 567 568 #define MSG_HIGHMEM_TRIMMED \ 569 "Warning: only 4GB will be used. Support for for CONFIG_HIGHMEM64G was removed!\n" 570 /* 571 * We have more RAM than fits into lowmem - we try to put it into 572 * highmem, also taking the highmem=x boot parameter into account: 573 */ 574 static void __init highmem_pfn_init(void) 575 { 576 max_low_pfn = MAXMEM_PFN; 577 578 if (highmem_pages == -1) 579 highmem_pages = max_pfn - MAXMEM_PFN; 580 581 if (highmem_pages + MAXMEM_PFN < max_pfn) 582 max_pfn = MAXMEM_PFN + highmem_pages; 583 584 if (highmem_pages + MAXMEM_PFN > max_pfn) { 585 printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL, 586 pages_to_mb(max_pfn - MAXMEM_PFN), 587 pages_to_mb(highmem_pages)); 588 highmem_pages = 0; 589 } 590 #ifndef CONFIG_HIGHMEM 591 /* Maximum memory usable is what is directly addressable */ 592 printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20); 593 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); 594 max_pfn = MAXMEM_PFN; 595 #else /* !CONFIG_HIGHMEM */ 596 if (max_pfn > MAX_NONPAE_PFN) { 597 max_pfn = MAX_NONPAE_PFN; 598 printk(KERN_WARNING MSG_HIGHMEM_TRIMMED); 599 } 600 #endif /* !CONFIG_HIGHMEM */ 601 } 602 603 /* 604 * Determine low and high memory ranges: 605 */ 606 void __init find_low_pfn_range(void) 607 { 608 /* it could update max_pfn */ 609 610 if (max_pfn <= MAXMEM_PFN) 611 lowmem_pfn_init(); 612 else 613 highmem_pfn_init(); 614 } 615 616 #ifndef CONFIG_NUMA 617 void __init initmem_init(void) 618 { 619 #ifdef CONFIG_HIGHMEM 620 highstart_pfn = highend_pfn = max_pfn; 621 if (max_pfn > max_low_pfn) 622 highstart_pfn = max_low_pfn; 623 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", 624 pages_to_mb(highend_pfn - highstart_pfn)); 625 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; 626 #else 627 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; 628 #endif 629 630 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); 631 632 __vmalloc_start_set = true; 633 634 printk(KERN_NOTICE "%ldMB LOWMEM available.\n", 635 pages_to_mb(max_low_pfn)); 636 637 setup_bootmem_allocator(); 638 } 639 #endif /* !CONFIG_NUMA */ 640 641 void __init setup_bootmem_allocator(void) 642 { 643 printk(KERN_INFO " mapped low ram: 0 - %08lx\n", 644 max_pfn_mapped<<PAGE_SHIFT); 645 printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); 646 } 647 648 /* 649 * paging_init() sets up the page tables - note that the first 8MB are 650 * already mapped by head.S. 651 * 652 * This routines also unmaps the page at virtual kernel address 0, so 653 * that we can trap those pesky NULL-reference errors in the kernel. 654 */ 655 void __init paging_init(void) 656 { 657 pagetable_init(); 658 659 __flush_tlb_all(); 660 661 /* 662 * NOTE: at this point the bootmem allocator is fully available. 663 */ 664 olpc_dt_build_devicetree(); 665 sparse_init(); 666 zone_sizes_init(); 667 } 668 669 /* 670 * Test if the WP bit works in supervisor mode. It isn't supported on 386's 671 * and also on some strange 486's. All 586+'s are OK. This used to involve 672 * black magic jumps to work around some nasty CPU bugs, but fortunately the 673 * switch to using exceptions got rid of all that. 674 */ 675 static void __init test_wp_bit(void) 676 { 677 char z = 0; 678 679 printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode..."); 680 681 __set_fixmap(FIX_WP_TEST, __pa_symbol(empty_zero_page), PAGE_KERNEL_RO); 682 683 if (copy_to_kernel_nofault((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) { 684 clear_fixmap(FIX_WP_TEST); 685 printk(KERN_CONT "Ok.\n"); 686 return; 687 } 688 689 printk(KERN_CONT "No.\n"); 690 panic("Linux doesn't support CPUs with broken WP."); 691 } 692 693 void __init arch_mm_preinit(void) 694 { 695 pci_iommu_alloc(); 696 697 #ifdef CONFIG_FLATMEM 698 BUG_ON(!mem_map); 699 #endif 700 } 701 702 void __init mem_init(void) 703 { 704 after_bootmem = 1; 705 x86_init.hyper.init_after_bootmem(); 706 707 /* 708 * Check boundaries twice: Some fundamental inconsistencies can 709 * be detected at build time already. 710 */ 711 #define __FIXADDR_TOP (-PAGE_SIZE) 712 #ifdef CONFIG_HIGHMEM 713 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); 714 BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE); 715 #endif 716 #define high_memory (-128UL << 20) 717 BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); 718 #undef high_memory 719 #undef __FIXADDR_TOP 720 721 #ifdef CONFIG_HIGHMEM 722 BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); 723 BUG_ON(VMALLOC_END > PKMAP_BASE); 724 #endif 725 BUG_ON(VMALLOC_START >= VMALLOC_END); 726 BUG_ON((unsigned long)high_memory > VMALLOC_START); 727 728 test_wp_bit(); 729 } 730 731 int kernel_set_to_readonly __read_mostly; 732 733 static void mark_nxdata_nx(void) 734 { 735 /* 736 * When this called, init has already been executed and released, 737 * so everything past _etext should be NX. 738 */ 739 unsigned long start = PFN_ALIGN(_etext); 740 /* 741 * This comes from is_x86_32_kernel_text upper limit. Also HPAGE where used: 742 */ 743 unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start; 744 745 if (__supported_pte_mask & _PAGE_NX) 746 printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10); 747 set_memory_nx(start, size >> PAGE_SHIFT); 748 } 749 750 void mark_rodata_ro(void) 751 { 752 unsigned long start = PFN_ALIGN(_text); 753 unsigned long size = (unsigned long)__end_rodata - start; 754 755 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); 756 pr_info("Write protecting kernel text and read-only data: %luk\n", 757 size >> 10); 758 759 execmem_cache_make_ro(); 760 761 kernel_set_to_readonly = 1; 762 763 #ifdef CONFIG_CPA_DEBUG 764 pr_info("Testing CPA: Reverting %lx-%lx\n", start, start + size); 765 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); 766 767 pr_info("Testing CPA: write protecting again\n"); 768 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); 769 #endif 770 mark_nxdata_nx(); 771 } 772