1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2ad757b6aSThomas Gleixner /* 3ad757b6aSThomas Gleixner * 4ad757b6aSThomas Gleixner * Copyright (C) 1995 Linus Torvalds 5ad757b6aSThomas Gleixner * 6ad757b6aSThomas Gleixner * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 7ad757b6aSThomas Gleixner */ 8ad757b6aSThomas Gleixner 9ad757b6aSThomas Gleixner #include <linux/signal.h> 10ad757b6aSThomas Gleixner #include <linux/sched.h> 11ad757b6aSThomas Gleixner #include <linux/kernel.h> 12ad757b6aSThomas Gleixner #include <linux/errno.h> 13ad757b6aSThomas Gleixner #include <linux/string.h> 14ad757b6aSThomas Gleixner #include <linux/types.h> 15ad757b6aSThomas Gleixner #include <linux/ptrace.h> 16ad757b6aSThomas Gleixner #include <linux/mman.h> 17ad757b6aSThomas Gleixner #include <linux/mm.h> 18ad757b6aSThomas Gleixner #include <linux/hugetlb.h> 19ad757b6aSThomas Gleixner #include <linux/swap.h> 20ad757b6aSThomas Gleixner #include <linux/smp.h> 21ad757b6aSThomas Gleixner #include <linux/init.h> 22ad757b6aSThomas Gleixner #include <linux/highmem.h> 23ad757b6aSThomas Gleixner #include <linux/pagemap.h> 24cfb80c9eSJeremy Fitzhardinge #include <linux/pci.h> 25ad757b6aSThomas Gleixner #include <linux/pfn.h> 26ad757b6aSThomas Gleixner #include <linux/poison.h> 27a9ce6bc1SYinghai Lu #include <linux/memblock.h> 28ad757b6aSThomas Gleixner #include <linux/proc_fs.h> 29ad757b6aSThomas Gleixner #include <linux/memory_hotplug.h> 30ad757b6aSThomas Gleixner #include <linux/initrd.h> 31ad757b6aSThomas Gleixner #include <linux/cpumask.h> 325a0e3ad6STejun Heo #include <linux/gfp.h> 33ad757b6aSThomas Gleixner 34f832ff18SH. Peter Anvin #include <asm/asm.h> 3546eaa670SIngo Molnar #include <asm/bios_ebda.h> 36ad757b6aSThomas Gleixner #include <asm/processor.h> 377c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 38ad757b6aSThomas Gleixner #include <asm/dma.h> 39ad757b6aSThomas Gleixner #include <asm/fixmap.h> 4066441bd3SIngo Molnar #include <asm/e820/api.h> 41ad757b6aSThomas Gleixner #include <asm/apic.h> 428550eb99SIngo Molnar #include <asm/bugs.h> 43ad757b6aSThomas Gleixner #include <asm/tlb.h> 44ad757b6aSThomas Gleixner #include <asm/tlbflush.h> 45c10d1e26SAndres Salomon #include <asm/olpc_ofw.h> 46a5a19c63SJeremy Fitzhardinge #include <asm/pgalloc.h> 47ad757b6aSThomas Gleixner #include <asm/sections.h> 48ad757b6aSThomas Gleixner #include <asm/paravirt.h> 49551889a6SIan Campbell #include <asm/setup.h> 50d1163651SLaura Abbott #include <asm/set_memory.h> 512b72394eSPekka Enberg #include <asm/page_types.h> 5292a0f81dSThomas Gleixner #include <asm/cpu_entry_area.h> 534fcb2083SPekka Enberg #include <asm/init.h> 54186525bdSIngo Molnar #include <asm/pgtable_areas.h> 55ad757b6aSThomas Gleixner 565c51bdbeSYinghai Lu #include "mm_internal.h" 575c51bdbeSYinghai Lu 58ad757b6aSThomas Gleixner unsigned long highstart_pfn, highend_pfn; 59ad757b6aSThomas Gleixner 60dc16ecf7SJeremy Fitzhardinge bool __read_mostly __vmalloc_start_set = false; 614e29684cSYinghai Lu 62ad757b6aSThomas Gleixner /* 63ad757b6aSThomas Gleixner * Creates a middle page table and puts a pointer to it in the 64ad757b6aSThomas Gleixner * given global directory entry. This only returns the gd entry 65ad757b6aSThomas Gleixner * in non-PAE compilation mode, since the middle layer is folded. 66ad757b6aSThomas Gleixner */ 67ad757b6aSThomas Gleixner static pmd_t * __init one_md_table_init(pgd_t *pgd) 68ad757b6aSThomas Gleixner { 69e0c4f675SKirill A. Shutemov p4d_t *p4d; 70ad757b6aSThomas Gleixner pud_t *pud; 71ad757b6aSThomas Gleixner pmd_t *pmd_table; 72ad757b6aSThomas Gleixner 73ad757b6aSThomas Gleixner #ifdef CONFIG_X86_PAE 74ad757b6aSThomas Gleixner if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { 75d6be89adSJan Beulich pmd_table = (pmd_t *)alloc_low_page(); 766944a9c8SJeremy Fitzhardinge paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); 77ad757b6aSThomas Gleixner set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); 78e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, 0); 79e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, 0); 808550eb99SIngo Molnar BUG_ON(pmd_table != pmd_offset(pud, 0)); 81a376f30aSZhaolei 82a376f30aSZhaolei return pmd_table; 83ad757b6aSThomas Gleixner } 84ad757b6aSThomas Gleixner #endif 85e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, 0); 86e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, 0); 87ad757b6aSThomas Gleixner pmd_table = pmd_offset(pud, 0); 888550eb99SIngo Molnar 89ad757b6aSThomas Gleixner return pmd_table; 90ad757b6aSThomas Gleixner } 91ad757b6aSThomas Gleixner 92ad757b6aSThomas Gleixner /* 93ad757b6aSThomas Gleixner * Create a page table and place a pointer to it in a middle page 948550eb99SIngo Molnar * directory entry: 95ad757b6aSThomas Gleixner */ 96ad757b6aSThomas Gleixner static pte_t * __init one_page_table_init(pmd_t *pmd) 97ad757b6aSThomas Gleixner { 98ad757b6aSThomas Gleixner if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { 994e37a890SYinghai Lu pte_t *page_table = (pte_t *)alloc_low_page(); 100ad757b6aSThomas Gleixner 1016944a9c8SJeremy Fitzhardinge paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); 102ad757b6aSThomas Gleixner set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); 103ad757b6aSThomas Gleixner BUG_ON(page_table != pte_offset_kernel(pmd, 0)); 104ad757b6aSThomas Gleixner } 105ad757b6aSThomas Gleixner 106ad757b6aSThomas Gleixner return pte_offset_kernel(pmd, 0); 107ad757b6aSThomas Gleixner } 108ad757b6aSThomas Gleixner 109458a3e64STejun Heo pmd_t * __init populate_extra_pmd(unsigned long vaddr) 11011124411STejun Heo { 11111124411STejun Heo int pgd_idx = pgd_index(vaddr); 11211124411STejun Heo int pmd_idx = pmd_index(vaddr); 113458a3e64STejun Heo 114458a3e64STejun Heo return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx; 115458a3e64STejun Heo } 116458a3e64STejun Heo 117458a3e64STejun Heo pte_t * __init populate_extra_pte(unsigned long vaddr) 118458a3e64STejun Heo { 119458a3e64STejun Heo int pte_idx = pte_index(vaddr); 12011124411STejun Heo pmd_t *pmd; 12111124411STejun Heo 122458a3e64STejun Heo pmd = populate_extra_pmd(vaddr); 123458a3e64STejun Heo return one_page_table_init(pmd) + pte_idx; 12411124411STejun Heo } 12511124411STejun Heo 126719272c4SYinghai Lu static unsigned long __init 127719272c4SYinghai Lu page_table_range_init_count(unsigned long start, unsigned long end) 128719272c4SYinghai Lu { 129719272c4SYinghai Lu unsigned long count = 0; 130719272c4SYinghai Lu #ifdef CONFIG_HIGHMEM 131719272c4SYinghai Lu int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; 132719272c4SYinghai Lu int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; 133719272c4SYinghai Lu int pgd_idx, pmd_idx; 134719272c4SYinghai Lu unsigned long vaddr; 135719272c4SYinghai Lu 136719272c4SYinghai Lu if (pmd_idx_kmap_begin == pmd_idx_kmap_end) 137719272c4SYinghai Lu return 0; 138719272c4SYinghai Lu 139719272c4SYinghai Lu vaddr = start; 140719272c4SYinghai Lu pgd_idx = pgd_index(vaddr); 1419962eea9SMinfei Huang pmd_idx = pmd_index(vaddr); 142719272c4SYinghai Lu 143719272c4SYinghai Lu for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) { 144719272c4SYinghai Lu for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); 145719272c4SYinghai Lu pmd_idx++) { 146719272c4SYinghai Lu if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin && 147719272c4SYinghai Lu (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) 148719272c4SYinghai Lu count++; 149719272c4SYinghai Lu vaddr += PMD_SIZE; 150719272c4SYinghai Lu } 151719272c4SYinghai Lu pmd_idx = 0; 152719272c4SYinghai Lu } 153719272c4SYinghai Lu #endif 154719272c4SYinghai Lu return count; 155719272c4SYinghai Lu } 156719272c4SYinghai Lu 157a3c6018eSJan Beulich static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, 158719272c4SYinghai Lu unsigned long vaddr, pte_t *lastpte, 159719272c4SYinghai Lu void **adr) 160a3c6018eSJan Beulich { 161a3c6018eSJan Beulich #ifdef CONFIG_HIGHMEM 162a3c6018eSJan Beulich /* 163a3c6018eSJan Beulich * Something (early fixmap) may already have put a pte 164a3c6018eSJan Beulich * page here, which causes the page table allocation 165a3c6018eSJan Beulich * to become nonlinear. Attempt to fix it, and if it 166a3c6018eSJan Beulich * is still nonlinear then we have to bug. 167a3c6018eSJan Beulich */ 168a3c6018eSJan Beulich int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; 169a3c6018eSJan Beulich int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; 170a3c6018eSJan Beulich 171a3c6018eSJan Beulich if (pmd_idx_kmap_begin != pmd_idx_kmap_end 172a3c6018eSJan Beulich && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin 173719272c4SYinghai Lu && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) { 174a3c6018eSJan Beulich pte_t *newpte; 175a3c6018eSJan Beulich int i; 176a3c6018eSJan Beulich 177c464573cSPekka Enberg BUG_ON(after_bootmem); 178719272c4SYinghai Lu newpte = *adr; 179a3c6018eSJan Beulich for (i = 0; i < PTRS_PER_PTE; i++) 180a3c6018eSJan Beulich set_pte(newpte + i, pte[i]); 181719272c4SYinghai Lu *adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE); 182a3c6018eSJan Beulich 183a3c6018eSJan Beulich paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); 184a3c6018eSJan Beulich set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); 185a3c6018eSJan Beulich BUG_ON(newpte != pte_offset_kernel(pmd, 0)); 186a3c6018eSJan Beulich __flush_tlb_all(); 187a3c6018eSJan Beulich 188a3c6018eSJan Beulich paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); 189a3c6018eSJan Beulich pte = newpte; 190a3c6018eSJan Beulich } 191a3c6018eSJan Beulich BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) 192a3c6018eSJan Beulich && vaddr > fix_to_virt(FIX_KMAP_END) 193a3c6018eSJan Beulich && lastpte && lastpte + PTRS_PER_PTE != pte); 194a3c6018eSJan Beulich #endif 195a3c6018eSJan Beulich return pte; 196a3c6018eSJan Beulich } 197a3c6018eSJan Beulich 198ad757b6aSThomas Gleixner /* 199ad757b6aSThomas Gleixner * This function initializes a certain range of kernel virtual memory 200ad757b6aSThomas Gleixner * with new bootmem page tables, everywhere page tables are missing in 201ad757b6aSThomas Gleixner * the given range. 2028550eb99SIngo Molnar * 203ad757b6aSThomas Gleixner * NOTE: The pagetables are allocated contiguous on the physical space 204ad757b6aSThomas Gleixner * so we can cache the place of the first one and move around without 205ad757b6aSThomas Gleixner * checking the pgd every time. 206ad757b6aSThomas Gleixner */ 2078550eb99SIngo Molnar static void __init 2088550eb99SIngo Molnar page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) 209ad757b6aSThomas Gleixner { 210ad757b6aSThomas Gleixner int pgd_idx, pmd_idx; 211ad757b6aSThomas Gleixner unsigned long vaddr; 2128550eb99SIngo Molnar pgd_t *pgd; 2138550eb99SIngo Molnar pmd_t *pmd; 214a3c6018eSJan Beulich pte_t *pte = NULL; 215719272c4SYinghai Lu unsigned long count = page_table_range_init_count(start, end); 216719272c4SYinghai Lu void *adr = NULL; 217719272c4SYinghai Lu 218719272c4SYinghai Lu if (count) 219719272c4SYinghai Lu adr = alloc_low_pages(count); 220ad757b6aSThomas Gleixner 221ad757b6aSThomas Gleixner vaddr = start; 222ad757b6aSThomas Gleixner pgd_idx = pgd_index(vaddr); 223ad757b6aSThomas Gleixner pmd_idx = pmd_index(vaddr); 224ad757b6aSThomas Gleixner pgd = pgd_base + pgd_idx; 225ad757b6aSThomas Gleixner 226ad757b6aSThomas Gleixner for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { 227ad757b6aSThomas Gleixner pmd = one_md_table_init(pgd); 228ad757b6aSThomas Gleixner pmd = pmd + pmd_index(vaddr); 2298550eb99SIngo Molnar for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); 2308550eb99SIngo Molnar pmd++, pmd_idx++) { 231a3c6018eSJan Beulich pte = page_table_kmap_check(one_page_table_init(pmd), 232719272c4SYinghai Lu pmd, vaddr, pte, &adr); 233ad757b6aSThomas Gleixner 234ad757b6aSThomas Gleixner vaddr += PMD_SIZE; 235ad757b6aSThomas Gleixner } 236ad757b6aSThomas Gleixner pmd_idx = 0; 237ad757b6aSThomas Gleixner } 238ad757b6aSThomas Gleixner } 239ad757b6aSThomas Gleixner 240b56cd05cSJiri Olsa /* 241b56cd05cSJiri Olsa * The <linux/kallsyms.h> already defines is_kernel_text, 242b56cd05cSJiri Olsa * using '__' prefix not to get in conflict. 243b56cd05cSJiri Olsa */ 244b56cd05cSJiri Olsa static inline int __is_kernel_text(unsigned long addr) 245ad757b6aSThomas Gleixner { 2465bd5a452SMatthieu Castet if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) 247ad757b6aSThomas Gleixner return 1; 248ad757b6aSThomas Gleixner return 0; 249ad757b6aSThomas Gleixner } 250ad757b6aSThomas Gleixner 251ad757b6aSThomas Gleixner /* 252ad757b6aSThomas Gleixner * This maps the physical memory to kernel virtual address space, a total 253ad757b6aSThomas Gleixner * of max_low_pfn pages, by creating page tables starting from address 2548550eb99SIngo Molnar * PAGE_OFFSET: 255ad757b6aSThomas Gleixner */ 256e53fb04fSPekka Enberg unsigned long __init 257e53fb04fSPekka Enberg kernel_physical_mapping_init(unsigned long start, 258e53fb04fSPekka Enberg unsigned long end, 259c164fbb4SLogan Gunthorpe unsigned long page_size_mask, 260c164fbb4SLogan Gunthorpe pgprot_t prot) 261ad757b6aSThomas Gleixner { 262e53fb04fSPekka Enberg int use_pse = page_size_mask == (1<<PG_LEVEL_2M); 263c1fd1b43SPekka Enberg unsigned long last_map_addr = end; 264e53fb04fSPekka Enberg unsigned long start_pfn, end_pfn; 265e7179853SPekka Enberg pgd_t *pgd_base = swapper_pg_dir; 2668550eb99SIngo Molnar int pgd_idx, pmd_idx, pte_ofs; 267ad757b6aSThomas Gleixner unsigned long pfn; 268ad757b6aSThomas Gleixner pgd_t *pgd; 269ad757b6aSThomas Gleixner pmd_t *pmd; 270ad757b6aSThomas Gleixner pte_t *pte; 271a2699e47SSuresh Siddha unsigned pages_2m, pages_4k; 272a2699e47SSuresh Siddha int mapping_iter; 273a2699e47SSuresh Siddha 274e53fb04fSPekka Enberg start_pfn = start >> PAGE_SHIFT; 275e53fb04fSPekka Enberg end_pfn = end >> PAGE_SHIFT; 276e53fb04fSPekka Enberg 277a2699e47SSuresh Siddha /* 278a2699e47SSuresh Siddha * First iteration will setup identity mapping using large/small pages 279a2699e47SSuresh Siddha * based on use_pse, with other attributes same as set by 280a2699e47SSuresh Siddha * the early code in head_32.S 281a2699e47SSuresh Siddha * 282a2699e47SSuresh Siddha * Second iteration will setup the appropriate attributes (NX, GLOBAL..) 283a2699e47SSuresh Siddha * as desired for the kernel identity mapping. 284a2699e47SSuresh Siddha * 285a2699e47SSuresh Siddha * This two pass mechanism conforms to the TLB app note which says: 286a2699e47SSuresh Siddha * 287a2699e47SSuresh Siddha * "Software should not write to a paging-structure entry in a way 288a2699e47SSuresh Siddha * that would change, for any linear address, both the page size 289a2699e47SSuresh Siddha * and either the page frame or attributes." 290a2699e47SSuresh Siddha */ 291a2699e47SSuresh Siddha mapping_iter = 1; 292ad757b6aSThomas Gleixner 29316bf9226SBorislav Petkov if (!boot_cpu_has(X86_FEATURE_PSE)) 294a04ad82dSYinghai Lu use_pse = 0; 295a04ad82dSYinghai Lu 296a2699e47SSuresh Siddha repeat: 297a2699e47SSuresh Siddha pages_2m = pages_4k = 0; 298a04ad82dSYinghai Lu pfn = start_pfn; 299a04ad82dSYinghai Lu pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); 300ad757b6aSThomas Gleixner pgd = pgd_base + pgd_idx; 301ad757b6aSThomas Gleixner for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { 302ad757b6aSThomas Gleixner pmd = one_md_table_init(pgd); 3038550eb99SIngo Molnar 304a04ad82dSYinghai Lu if (pfn >= end_pfn) 305a04ad82dSYinghai Lu continue; 306a04ad82dSYinghai Lu #ifdef CONFIG_X86_PAE 307a04ad82dSYinghai Lu pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); 308a04ad82dSYinghai Lu pmd += pmd_idx; 309a04ad82dSYinghai Lu #else 310a04ad82dSYinghai Lu pmd_idx = 0; 311a04ad82dSYinghai Lu #endif 312a04ad82dSYinghai Lu for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; 313f3f20de8SJeremy Fitzhardinge pmd++, pmd_idx++) { 3148550eb99SIngo Molnar unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; 315ad757b6aSThomas Gleixner 3168550eb99SIngo Molnar /* 3178550eb99SIngo Molnar * Map with big pages if possible, otherwise 3188550eb99SIngo Molnar * create normal page tables: 3198550eb99SIngo Molnar */ 320a04ad82dSYinghai Lu if (use_pse) { 3218550eb99SIngo Molnar unsigned int addr2; 322f3f20de8SJeremy Fitzhardinge pgprot_t prot = PAGE_KERNEL_LARGE; 323a2699e47SSuresh Siddha /* 324a2699e47SSuresh Siddha * first pass will use the same initial 325a2699e47SSuresh Siddha * identity mapping attribute + _PAGE_PSE. 326a2699e47SSuresh Siddha */ 327a2699e47SSuresh Siddha pgprot_t init_prot = 328a2699e47SSuresh Siddha __pgprot(PTE_IDENT_ATTR | 329a2699e47SSuresh Siddha _PAGE_PSE); 330f3f20de8SJeremy Fitzhardinge 331960ddb4fSYinghai Lu pfn &= PMD_MASK >> PAGE_SHIFT; 3328550eb99SIngo Molnar addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + 333f3f20de8SJeremy Fitzhardinge PAGE_OFFSET + PAGE_SIZE-1; 334f3f20de8SJeremy Fitzhardinge 335b56cd05cSJiri Olsa if (__is_kernel_text(addr) || 336b56cd05cSJiri Olsa __is_kernel_text(addr2)) 337f3f20de8SJeremy Fitzhardinge prot = PAGE_KERNEL_LARGE_EXEC; 338f3f20de8SJeremy Fitzhardinge 339ce0c0e50SAndi Kleen pages_2m++; 340a2699e47SSuresh Siddha if (mapping_iter == 1) 341a2699e47SSuresh Siddha set_pmd(pmd, pfn_pmd(pfn, init_prot)); 342a2699e47SSuresh Siddha else 343f3f20de8SJeremy Fitzhardinge set_pmd(pmd, pfn_pmd(pfn, prot)); 344ad757b6aSThomas Gleixner 345ad757b6aSThomas Gleixner pfn += PTRS_PER_PTE; 3468550eb99SIngo Molnar continue; 3478550eb99SIngo Molnar } 348ad757b6aSThomas Gleixner pte = one_page_table_init(pmd); 349ad757b6aSThomas Gleixner 350a04ad82dSYinghai Lu pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); 351a04ad82dSYinghai Lu pte += pte_ofs; 352a04ad82dSYinghai Lu for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; 3538550eb99SIngo Molnar pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { 354f3f20de8SJeremy Fitzhardinge pgprot_t prot = PAGE_KERNEL; 355a2699e47SSuresh Siddha /* 356a2699e47SSuresh Siddha * first pass will use the same initial 357a2699e47SSuresh Siddha * identity mapping attribute. 358a2699e47SSuresh Siddha */ 359a2699e47SSuresh Siddha pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); 360f3f20de8SJeremy Fitzhardinge 361b56cd05cSJiri Olsa if (__is_kernel_text(addr)) 362f3f20de8SJeremy Fitzhardinge prot = PAGE_KERNEL_EXEC; 363f3f20de8SJeremy Fitzhardinge 364ce0c0e50SAndi Kleen pages_4k++; 365c1fd1b43SPekka Enberg if (mapping_iter == 1) { 366a2699e47SSuresh Siddha set_pte(pte, pfn_pte(pfn, init_prot)); 367c1fd1b43SPekka Enberg last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE; 368c1fd1b43SPekka Enberg } else 369f3f20de8SJeremy Fitzhardinge set_pte(pte, pfn_pte(pfn, prot)); 370ad757b6aSThomas Gleixner } 371ad757b6aSThomas Gleixner } 372ad757b6aSThomas Gleixner } 373a2699e47SSuresh Siddha if (mapping_iter == 1) { 374a2699e47SSuresh Siddha /* 375a2699e47SSuresh Siddha * update direct mapping page count only in the first 376a2699e47SSuresh Siddha * iteration. 377a2699e47SSuresh Siddha */ 378ce0c0e50SAndi Kleen update_page_count(PG_LEVEL_2M, pages_2m); 379ce0c0e50SAndi Kleen update_page_count(PG_LEVEL_4K, pages_4k); 380a2699e47SSuresh Siddha 381a2699e47SSuresh Siddha /* 382a2699e47SSuresh Siddha * local global flush tlb, which will flush the previous 383a2699e47SSuresh Siddha * mappings present in both small and large page TLB's. 384a2699e47SSuresh Siddha */ 385a2699e47SSuresh Siddha __flush_tlb_all(); 386a2699e47SSuresh Siddha 387a2699e47SSuresh Siddha /* 388a2699e47SSuresh Siddha * Second iteration will set the actual desired PTE attributes. 389a2699e47SSuresh Siddha */ 390a2699e47SSuresh Siddha mapping_iter = 2; 391a2699e47SSuresh Siddha goto repeat; 392a2699e47SSuresh Siddha } 393c1fd1b43SPekka Enberg return last_map_addr; 394ae531c26SArjan van de Ven } 395ae531c26SArjan van de Ven 396ad757b6aSThomas Gleixner pte_t *kmap_pte; 397ad757b6aSThomas Gleixner 398ad757b6aSThomas Gleixner static void __init kmap_init(void) 399ad757b6aSThomas Gleixner { 400ad757b6aSThomas Gleixner unsigned long kmap_vstart; 401ad757b6aSThomas Gleixner 4028550eb99SIngo Molnar /* 4038550eb99SIngo Molnar * Cache the first kmap pte: 4048550eb99SIngo Molnar */ 405ad757b6aSThomas Gleixner kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); 406*e05c7b1fSMike Rapoport kmap_pte = virt_to_kpte(kmap_vstart); 407ad757b6aSThomas Gleixner } 408ad757b6aSThomas Gleixner 409fd940934SKeith Packard #ifdef CONFIG_HIGHMEM 410ad757b6aSThomas Gleixner static void __init permanent_kmaps_init(pgd_t *pgd_base) 411ad757b6aSThomas Gleixner { 412*e05c7b1fSMike Rapoport unsigned long vaddr = PKMAP_BASE; 413ad757b6aSThomas Gleixner 414ad757b6aSThomas Gleixner page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 415ad757b6aSThomas Gleixner 416*e05c7b1fSMike Rapoport pkmap_page_table = virt_to_kpte(vaddr); 417ad757b6aSThomas Gleixner } 418ad757b6aSThomas Gleixner 4191d931264SYinghai Lu void __init add_highpages_with_active_regions(int nid, 4201d931264SYinghai Lu unsigned long start_pfn, unsigned long end_pfn) 421b5bc6c0eSYinghai Lu { 4228a9ca34cSTejun Heo phys_addr_t start, end; 4238a9ca34cSTejun Heo u64 i; 4241d931264SYinghai Lu 425fc6daaf9STony Luck for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) { 4268a9ca34cSTejun Heo unsigned long pfn = clamp_t(unsigned long, PFN_UP(start), 4278a9ca34cSTejun Heo start_pfn, end_pfn); 4288a9ca34cSTejun Heo unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end), 4298a9ca34cSTejun Heo start_pfn, end_pfn); 4308a9ca34cSTejun Heo for ( ; pfn < e_pfn; pfn++) 4318a9ca34cSTejun Heo if (pfn_valid(pfn)) 4325e7ccf86SJiang Liu free_highmem_page(pfn_to_page(pfn)); 433b5bc6c0eSYinghai Lu } 434ad757b6aSThomas Gleixner } 435ad757b6aSThomas Gleixner #else 436e8e32326SIngo Brueckl static inline void permanent_kmaps_init(pgd_t *pgd_base) 437e8e32326SIngo Brueckl { 438e8e32326SIngo Brueckl } 439ad757b6aSThomas Gleixner #endif /* CONFIG_HIGHMEM */ 440ad757b6aSThomas Gleixner 441945fd17aSThomas Gleixner void __init sync_initial_page_table(void) 442945fd17aSThomas Gleixner { 443945fd17aSThomas Gleixner clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, 444945fd17aSThomas Gleixner swapper_pg_dir + KERNEL_PGD_BOUNDARY, 445945fd17aSThomas Gleixner KERNEL_PGD_PTRS); 446945fd17aSThomas Gleixner 447945fd17aSThomas Gleixner /* 448945fd17aSThomas Gleixner * sync back low identity map too. It is used for example 449945fd17aSThomas Gleixner * in the 32-bit EFI stub. 450945fd17aSThomas Gleixner */ 451945fd17aSThomas Gleixner clone_pgd_range(initial_page_table, 452945fd17aSThomas Gleixner swapper_pg_dir + KERNEL_PGD_BOUNDARY, 453945fd17aSThomas Gleixner min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); 454945fd17aSThomas Gleixner } 455945fd17aSThomas Gleixner 4567737b215SAttilio Rao void __init native_pagetable_init(void) 457ad757b6aSThomas Gleixner { 458551889a6SIan Campbell unsigned long pfn, va; 45973090f89SAttilio Rao pgd_t *pgd, *base = swapper_pg_dir; 460e0c4f675SKirill A. Shutemov p4d_t *p4d; 461551889a6SIan Campbell pud_t *pud; 462551889a6SIan Campbell pmd_t *pmd; 463551889a6SIan Campbell pte_t *pte; 464ad757b6aSThomas Gleixner 465ad757b6aSThomas Gleixner /* 466551889a6SIan Campbell * Remove any mappings which extend past the end of physical 46711ed9e92SYinghai Lu * memory from the boot time page table. 46811ed9e92SYinghai Lu * In virtual address space, we should have at least two pages 46911ed9e92SYinghai Lu * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END 47011ed9e92SYinghai Lu * definition. And max_low_pfn is set to VMALLOC_END physical 47111ed9e92SYinghai Lu * address. If initial memory mapping is doing right job, we 47211ed9e92SYinghai Lu * should have pte used near max_low_pfn or one pmd is not present. 473ad757b6aSThomas Gleixner */ 47411ed9e92SYinghai Lu for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) { 475551889a6SIan Campbell va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); 476551889a6SIan Campbell pgd = base + pgd_index(va); 477551889a6SIan Campbell if (!pgd_present(*pgd)) 478551889a6SIan Campbell break; 479ad757b6aSThomas Gleixner 480e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, va); 481e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, va); 482551889a6SIan Campbell pmd = pmd_offset(pud, va); 483551889a6SIan Campbell if (!pmd_present(*pmd)) 484551889a6SIan Campbell break; 485551889a6SIan Campbell 48611ed9e92SYinghai Lu /* should not be large page here */ 48711ed9e92SYinghai Lu if (pmd_large(*pmd)) { 48811ed9e92SYinghai Lu pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n", 48911ed9e92SYinghai Lu pfn, pmd, __pa(pmd)); 49011ed9e92SYinghai Lu BUG_ON(1); 49111ed9e92SYinghai Lu } 49211ed9e92SYinghai Lu 493551889a6SIan Campbell pte = pte_offset_kernel(pmd, va); 494551889a6SIan Campbell if (!pte_present(*pte)) 495551889a6SIan Campbell break; 496551889a6SIan Campbell 49711ed9e92SYinghai Lu printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n", 49811ed9e92SYinghai Lu pfn, pmd, __pa(pmd), pte, __pa(pte)); 499551889a6SIan Campbell pte_clear(NULL, va, pte); 500551889a6SIan Campbell } 5016944a9c8SJeremy Fitzhardinge paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); 502843b8ed2SAttilio Rao paging_init(); 503ad757b6aSThomas Gleixner } 504ad757b6aSThomas Gleixner 505ad757b6aSThomas Gleixner /* 506ad757b6aSThomas Gleixner * Build a proper pagetable for the kernel mappings. Up until this 507ad757b6aSThomas Gleixner * point, we've been running on some set of pagetables constructed by 508ad757b6aSThomas Gleixner * the boot process. 509ad757b6aSThomas Gleixner * 510ad757b6aSThomas Gleixner * If we're booting on native hardware, this will be a pagetable 511551889a6SIan Campbell * constructed in arch/x86/kernel/head_32.S. The root of the 512551889a6SIan Campbell * pagetable will be swapper_pg_dir. 513ad757b6aSThomas Gleixner * 514ad757b6aSThomas Gleixner * If we're booting paravirtualized under a hypervisor, then there are 515ad757b6aSThomas Gleixner * more options: we may already be running PAE, and the pagetable may 516ad757b6aSThomas Gleixner * or may not be based in swapper_pg_dir. In any case, 5177737b215SAttilio Rao * paravirt_pagetable_init() will set up swapper_pg_dir 518ad757b6aSThomas Gleixner * appropriately for the rest of the initialization to work. 519ad757b6aSThomas Gleixner * 520ad757b6aSThomas Gleixner * In general, pagetable_init() assumes that the pagetable may already 521ad757b6aSThomas Gleixner * be partially populated, and so it avoids stomping on any existing 522ad757b6aSThomas Gleixner * mappings. 523ad757b6aSThomas Gleixner */ 524f765090aSPekka Enberg void __init early_ioremap_page_table_range_init(void) 525ad757b6aSThomas Gleixner { 526e7179853SPekka Enberg pgd_t *pgd_base = swapper_pg_dir; 5278550eb99SIngo Molnar unsigned long vaddr, end; 528ad757b6aSThomas Gleixner 529ad757b6aSThomas Gleixner /* 530ad757b6aSThomas Gleixner * Fixed mappings, only the page table structure has to be 531ad757b6aSThomas Gleixner * created - mappings will be set by set_fixmap(): 532ad757b6aSThomas Gleixner */ 533ad757b6aSThomas Gleixner vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 534ad757b6aSThomas Gleixner end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; 535ad757b6aSThomas Gleixner page_table_range_init(vaddr, end, pgd_base); 536beacfaacSHuang, Ying early_ioremap_reset(); 537e7b37895SYinghai Lu } 538e7b37895SYinghai Lu 539e7b37895SYinghai Lu static void __init pagetable_init(void) 540e7b37895SYinghai Lu { 541e7b37895SYinghai Lu pgd_t *pgd_base = swapper_pg_dir; 542e7b37895SYinghai Lu 543ad757b6aSThomas Gleixner permanent_kmaps_init(pgd_base); 544ad757b6aSThomas Gleixner } 545ad757b6aSThomas Gleixner 5468a57f484SDave Hansen #define DEFAULT_PTE_MASK ~(_PAGE_NX | _PAGE_GLOBAL) 5478a57f484SDave Hansen /* Bits supported by the hardware: */ 5488a57f484SDave Hansen pteval_t __supported_pte_mask __read_mostly = DEFAULT_PTE_MASK; 5498a57f484SDave Hansen /* Bits allowed in normal kernel mappings: */ 5508a57f484SDave Hansen pteval_t __default_kernel_pte_mask __read_mostly = DEFAULT_PTE_MASK; 5516fdc05d4SJeremy Fitzhardinge EXPORT_SYMBOL_GPL(__supported_pte_mask); 5528a57f484SDave Hansen /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */ 5538a57f484SDave Hansen EXPORT_SYMBOL(__default_kernel_pte_mask); 5546fdc05d4SJeremy Fitzhardinge 55590d967e0SYinghai Lu /* user-defined highmem size */ 55690d967e0SYinghai Lu static unsigned int highmem_pages = -1; 55790d967e0SYinghai Lu 55890d967e0SYinghai Lu /* 55990d967e0SYinghai Lu * highmem=size forces highmem to be exactly 'size' bytes. 56090d967e0SYinghai Lu * This works even on boxes that have no highmem otherwise. 56190d967e0SYinghai Lu * This also works to reduce highmem size on bigger boxes. 56290d967e0SYinghai Lu */ 56390d967e0SYinghai Lu static int __init parse_highmem(char *arg) 56490d967e0SYinghai Lu { 56590d967e0SYinghai Lu if (!arg) 56690d967e0SYinghai Lu return -EINVAL; 56790d967e0SYinghai Lu 56890d967e0SYinghai Lu highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT; 56990d967e0SYinghai Lu return 0; 57090d967e0SYinghai Lu } 57190d967e0SYinghai Lu early_param("highmem", parse_highmem); 57290d967e0SYinghai Lu 5734769843bSIngo Molnar #define MSG_HIGHMEM_TOO_BIG \ 5744769843bSIngo Molnar "highmem size (%luMB) is bigger than pages available (%luMB)!\n" 5754769843bSIngo Molnar 5764769843bSIngo Molnar #define MSG_LOWMEM_TOO_SMALL \ 5774769843bSIngo Molnar "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n" 57890d967e0SYinghai Lu /* 5794769843bSIngo Molnar * All of RAM fits into lowmem - but if user wants highmem 5804769843bSIngo Molnar * artificially via the highmem=x boot parameter then create 5814769843bSIngo Molnar * it: 58290d967e0SYinghai Lu */ 583f836e35aSYinghai Lu static void __init lowmem_pfn_init(void) 58490d967e0SYinghai Lu { 585346cafecSYinghai Lu /* max_low_pfn is 0, we already have early_res support */ 58690d967e0SYinghai Lu max_low_pfn = max_pfn; 587d88316c2SIngo Molnar 5884769843bSIngo Molnar if (highmem_pages == -1) 5894769843bSIngo Molnar highmem_pages = 0; 5904769843bSIngo Molnar #ifdef CONFIG_HIGHMEM 5914769843bSIngo Molnar if (highmem_pages >= max_pfn) { 5924769843bSIngo Molnar printk(KERN_ERR MSG_HIGHMEM_TOO_BIG, 5934769843bSIngo Molnar pages_to_mb(highmem_pages), pages_to_mb(max_pfn)); 5944769843bSIngo Molnar highmem_pages = 0; 5954769843bSIngo Molnar } 5964769843bSIngo Molnar if (highmem_pages) { 5974769843bSIngo Molnar if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) { 5984769843bSIngo Molnar printk(KERN_ERR MSG_LOWMEM_TOO_SMALL, 5994769843bSIngo Molnar pages_to_mb(highmem_pages)); 6004769843bSIngo Molnar highmem_pages = 0; 6014769843bSIngo Molnar } 6024769843bSIngo Molnar max_low_pfn -= highmem_pages; 6034769843bSIngo Molnar } 6044769843bSIngo Molnar #else 6054769843bSIngo Molnar if (highmem_pages) 6064769843bSIngo Molnar printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n"); 6074769843bSIngo Molnar #endif 6084769843bSIngo Molnar } 6094769843bSIngo Molnar 6104769843bSIngo Molnar #define MSG_HIGHMEM_TOO_SMALL \ 6114769843bSIngo Molnar "only %luMB highmem pages available, ignoring highmem size of %luMB!\n" 6124769843bSIngo Molnar 6134769843bSIngo Molnar #define MSG_HIGHMEM_TRIMMED \ 6144769843bSIngo Molnar "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n" 6154769843bSIngo Molnar /* 6164769843bSIngo Molnar * We have more RAM than fits into lowmem - we try to put it into 6174769843bSIngo Molnar * highmem, also taking the highmem=x boot parameter into account: 6184769843bSIngo Molnar */ 619f836e35aSYinghai Lu static void __init highmem_pfn_init(void) 6204769843bSIngo Molnar { 621d88316c2SIngo Molnar max_low_pfn = MAXMEM_PFN; 622d88316c2SIngo Molnar 62390d967e0SYinghai Lu if (highmem_pages == -1) 62490d967e0SYinghai Lu highmem_pages = max_pfn - MAXMEM_PFN; 6254769843bSIngo Molnar 62690d967e0SYinghai Lu if (highmem_pages + MAXMEM_PFN < max_pfn) 62790d967e0SYinghai Lu max_pfn = MAXMEM_PFN + highmem_pages; 6284769843bSIngo Molnar 62990d967e0SYinghai Lu if (highmem_pages + MAXMEM_PFN > max_pfn) { 6304769843bSIngo Molnar printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL, 63190d967e0SYinghai Lu pages_to_mb(max_pfn - MAXMEM_PFN), 63290d967e0SYinghai Lu pages_to_mb(highmem_pages)); 63390d967e0SYinghai Lu highmem_pages = 0; 63490d967e0SYinghai Lu } 63590d967e0SYinghai Lu #ifndef CONFIG_HIGHMEM 63690d967e0SYinghai Lu /* Maximum memory usable is what is directly addressable */ 6374769843bSIngo Molnar printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20); 63890d967e0SYinghai Lu if (max_pfn > MAX_NONPAE_PFN) 6394769843bSIngo Molnar printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n"); 64090d967e0SYinghai Lu else 64190d967e0SYinghai Lu printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); 64290d967e0SYinghai Lu max_pfn = MAXMEM_PFN; 64390d967e0SYinghai Lu #else /* !CONFIG_HIGHMEM */ 64490d967e0SYinghai Lu #ifndef CONFIG_HIGHMEM64G 64590d967e0SYinghai Lu if (max_pfn > MAX_NONPAE_PFN) { 64690d967e0SYinghai Lu max_pfn = MAX_NONPAE_PFN; 6474769843bSIngo Molnar printk(KERN_WARNING MSG_HIGHMEM_TRIMMED); 64890d967e0SYinghai Lu } 64990d967e0SYinghai Lu #endif /* !CONFIG_HIGHMEM64G */ 65090d967e0SYinghai Lu #endif /* !CONFIG_HIGHMEM */ 65190d967e0SYinghai Lu } 6524769843bSIngo Molnar 65390d967e0SYinghai Lu /* 65490d967e0SYinghai Lu * Determine low and high memory ranges: 65590d967e0SYinghai Lu */ 65690d967e0SYinghai Lu void __init find_low_pfn_range(void) 65790d967e0SYinghai Lu { 65890d967e0SYinghai Lu /* it could update max_pfn */ 65990d967e0SYinghai Lu 660d88316c2SIngo Molnar if (max_pfn <= MAXMEM_PFN) 6614769843bSIngo Molnar lowmem_pfn_init(); 662d88316c2SIngo Molnar else 663d88316c2SIngo Molnar highmem_pfn_init(); 66490d967e0SYinghai Lu } 66590d967e0SYinghai Lu 666b2ac82a0SYinghai Lu #ifndef CONFIG_NEED_MULTIPLE_NODES 667d8fc3afcSTejun Heo void __init initmem_init(void) 668b2ac82a0SYinghai Lu { 669b2ac82a0SYinghai Lu #ifdef CONFIG_HIGHMEM 670b2ac82a0SYinghai Lu highstart_pfn = highend_pfn = max_pfn; 671b2ac82a0SYinghai Lu if (max_pfn > max_low_pfn) 672b2ac82a0SYinghai Lu highstart_pfn = max_low_pfn; 673b2ac82a0SYinghai Lu printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", 674b2ac82a0SYinghai Lu pages_to_mb(highend_pfn - highstart_pfn)); 675b2ac82a0SYinghai Lu high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; 676b2ac82a0SYinghai Lu #else 677b2ac82a0SYinghai Lu high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; 678b2ac82a0SYinghai Lu #endif 6790608f70cSTejun Heo 680d7dc899aSStefan Agner memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); 6810608f70cSTejun Heo sparse_memory_present_with_active_regions(0); 6820608f70cSTejun Heo 683b2ac82a0SYinghai Lu #ifdef CONFIG_FLATMEM 68446a84132SJiang Liu max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn; 685b2ac82a0SYinghai Lu #endif 686dc16ecf7SJeremy Fitzhardinge __vmalloc_start_set = true; 687dc16ecf7SJeremy Fitzhardinge 688b2ac82a0SYinghai Lu printk(KERN_NOTICE "%ldMB LOWMEM available.\n", 689b2ac82a0SYinghai Lu pages_to_mb(max_low_pfn)); 690b2ac82a0SYinghai Lu 691b2ac82a0SYinghai Lu setup_bootmem_allocator(); 692b2ac82a0SYinghai Lu } 693cb95a13aSYinghai Lu #endif /* !CONFIG_NEED_MULTIPLE_NODES */ 694b2ac82a0SYinghai Lu 695b2ac82a0SYinghai Lu void __init setup_bootmem_allocator(void) 696b2ac82a0SYinghai Lu { 697b2ac82a0SYinghai Lu printk(KERN_INFO " mapped low ram: 0 - %08lx\n", 698b2ac82a0SYinghai Lu max_pfn_mapped<<PAGE_SHIFT); 699fc5efe39SYinghai Lu printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); 7004e29684cSYinghai Lu } 7014e29684cSYinghai Lu 702a04ad82dSYinghai Lu /* 703ad757b6aSThomas Gleixner * paging_init() sets up the page tables - note that the first 8MB are 704ad757b6aSThomas Gleixner * already mapped by head.S. 705ad757b6aSThomas Gleixner * 706ad757b6aSThomas Gleixner * This routines also unmaps the page at virtual kernel address 0, so 707ad757b6aSThomas Gleixner * that we can trap those pesky NULL-reference errors in the kernel. 708ad757b6aSThomas Gleixner */ 709ad757b6aSThomas Gleixner void __init paging_init(void) 710ad757b6aSThomas Gleixner { 711ad757b6aSThomas Gleixner pagetable_init(); 712ad757b6aSThomas Gleixner 713ad757b6aSThomas Gleixner __flush_tlb_all(); 714ad757b6aSThomas Gleixner 715ad757b6aSThomas Gleixner kmap_init(); 71611cd0bc1SYinghai Lu 71711cd0bc1SYinghai Lu /* 71811cd0bc1SYinghai Lu * NOTE: at this point the bootmem allocator is fully available. 71911cd0bc1SYinghai Lu */ 720c10d1e26SAndres Salomon olpc_dt_build_devicetree(); 721797390d8STejun Heo sparse_memory_present_with_active_regions(MAX_NUMNODES); 72211cd0bc1SYinghai Lu sparse_init(); 72311cd0bc1SYinghai Lu zone_sizes_init(); 724ad757b6aSThomas Gleixner } 725ad757b6aSThomas Gleixner 726ad757b6aSThomas Gleixner /* 727ad757b6aSThomas Gleixner * Test if the WP bit works in supervisor mode. It isn't supported on 386's 728f7f17a67SDmitri Vorobiev * and also on some strange 486's. All 586+'s are OK. This used to involve 729f7f17a67SDmitri Vorobiev * black magic jumps to work around some nasty CPU bugs, but fortunately the 730f7f17a67SDmitri Vorobiev * switch to using exceptions got rid of all that. 731ad757b6aSThomas Gleixner */ 732ad757b6aSThomas Gleixner static void __init test_wp_bit(void) 733ad757b6aSThomas Gleixner { 7344af17110SAndy Lutomirski char z = 0; 7356415813bSMathias Krause 736952a6c2cSBorislav Petkov printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode..."); 737ad757b6aSThomas Gleixner 7384af17110SAndy Lutomirski __set_fixmap(FIX_WP_TEST, __pa_symbol(empty_zero_page), PAGE_KERNEL_RO); 739ad757b6aSThomas Gleixner 740952a6c2cSBorislav Petkov if (probe_kernel_write((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) { 741952a6c2cSBorislav Petkov clear_fixmap(FIX_WP_TEST); 742952a6c2cSBorislav Petkov printk(KERN_CONT "Ok.\n"); 743952a6c2cSBorislav Petkov return; 744ad757b6aSThomas Gleixner } 7454af17110SAndy Lutomirski 746952a6c2cSBorislav Petkov printk(KERN_CONT "No.\n"); 747952a6c2cSBorislav Petkov panic("Linux doesn't support CPUs with broken WP."); 748ad757b6aSThomas Gleixner } 749ad757b6aSThomas Gleixner 750ad757b6aSThomas Gleixner void __init mem_init(void) 751ad757b6aSThomas Gleixner { 752cfb80c9eSJeremy Fitzhardinge pci_iommu_alloc(); 753cfb80c9eSJeremy Fitzhardinge 754ad757b6aSThomas Gleixner #ifdef CONFIG_FLATMEM 755ad757b6aSThomas Gleixner BUG_ON(!mem_map); 756ad757b6aSThomas Gleixner #endif 757855c743aSStanislaw Gruszka /* 758855c743aSStanislaw Gruszka * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to 759c6ffc5caSMike Rapoport * be done before memblock_free_all(). Memblock use free low memory for 760855c743aSStanislaw Gruszka * temporary data (see find_range_array()) and for this purpose can use 761855c743aSStanislaw Gruszka * pages that was already passed to the buddy allocator, hence marked as 762855c743aSStanislaw Gruszka * not accessible in the page tables when compiled with 763855c743aSStanislaw Gruszka * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not 764855c743aSStanislaw Gruszka * important here. 765855c743aSStanislaw Gruszka */ 766855c743aSStanislaw Gruszka set_highmem_pages_init(); 767855c743aSStanislaw Gruszka 768ad757b6aSThomas Gleixner /* this will put all low memory onto the freelists */ 769c6ffc5caSMike Rapoport memblock_free_all(); 770ad757b6aSThomas Gleixner 7714e37a890SYinghai Lu after_bootmem = 1; 7726f84f8d1SPavel Tatashin x86_init.hyper.init_after_bootmem(); 7734e37a890SYinghai Lu 77446a84132SJiang Liu mem_init_print_info(NULL); 775ad757b6aSThomas Gleixner 776beeb4195SJan Beulich /* 777beeb4195SJan Beulich * Check boundaries twice: Some fundamental inconsistencies can 778beeb4195SJan Beulich * be detected at build time already. 779beeb4195SJan Beulich */ 780beeb4195SJan Beulich #define __FIXADDR_TOP (-PAGE_SIZE) 781beeb4195SJan Beulich #ifdef CONFIG_HIGHMEM 782beeb4195SJan Beulich BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); 783beeb4195SJan Beulich BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE); 784beeb4195SJan Beulich #endif 785beeb4195SJan Beulich #define high_memory (-128UL << 20) 786beeb4195SJan Beulich BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); 787beeb4195SJan Beulich #undef high_memory 788beeb4195SJan Beulich #undef __FIXADDR_TOP 789beeb4195SJan Beulich 790ad757b6aSThomas Gleixner #ifdef CONFIG_HIGHMEM 791ad757b6aSThomas Gleixner BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); 792ad757b6aSThomas Gleixner BUG_ON(VMALLOC_END > PKMAP_BASE); 793ad757b6aSThomas Gleixner #endif 794beeb4195SJan Beulich BUG_ON(VMALLOC_START >= VMALLOC_END); 795ad757b6aSThomas Gleixner BUG_ON((unsigned long)high_memory > VMALLOC_START); 796ad757b6aSThomas Gleixner 797ad757b6aSThomas Gleixner test_wp_bit(); 798ad757b6aSThomas Gleixner } 799ad757b6aSThomas Gleixner 800ad757b6aSThomas Gleixner #ifdef CONFIG_MEMORY_HOTPLUG 801940519f0SMichal Hocko int arch_add_memory(int nid, u64 start, u64 size, 802f5637d3bSLogan Gunthorpe struct mhp_params *params) 803ad757b6aSThomas Gleixner { 804ad757b6aSThomas Gleixner unsigned long start_pfn = start >> PAGE_SHIFT; 805ad757b6aSThomas Gleixner unsigned long nr_pages = size >> PAGE_SHIFT; 806bfeb022fSLogan Gunthorpe int ret; 807bfeb022fSLogan Gunthorpe 808bfeb022fSLogan Gunthorpe /* 809bfeb022fSLogan Gunthorpe * The page tables were already mapped at boot so if the caller 810bfeb022fSLogan Gunthorpe * requests a different mapping type then we must change all the 811bfeb022fSLogan Gunthorpe * pages with __set_memory_prot(). 812bfeb022fSLogan Gunthorpe */ 813bfeb022fSLogan Gunthorpe if (params->pgprot.pgprot != PAGE_KERNEL.pgprot) { 814bfeb022fSLogan Gunthorpe ret = __set_memory_prot(start, nr_pages, params->pgprot); 815bfeb022fSLogan Gunthorpe if (ret) 816bfeb022fSLogan Gunthorpe return ret; 817bfeb022fSLogan Gunthorpe } 818ad757b6aSThomas Gleixner 819f5637d3bSLogan Gunthorpe return __add_pages(nid, start_pfn, nr_pages, params); 820ad757b6aSThomas Gleixner } 82124d335caSWen Congyang 822ac5c9426SDavid Hildenbrand void arch_remove_memory(int nid, u64 start, u64 size, 823ac5c9426SDavid Hildenbrand struct vmem_altmap *altmap) 82424d335caSWen Congyang { 82524d335caSWen Congyang unsigned long start_pfn = start >> PAGE_SHIFT; 82624d335caSWen Congyang unsigned long nr_pages = size >> PAGE_SHIFT; 82724d335caSWen Congyang 828feee6b29SDavid Hildenbrand __remove_pages(start_pfn, nr_pages, altmap); 82924d335caSWen Congyang } 83024d335caSWen Congyang #endif 831ad757b6aSThomas Gleixner 832502f6604SSuresh Siddha int kernel_set_to_readonly __read_mostly; 83316239630SSteven Rostedt 8345bd5a452SMatthieu Castet static void mark_nxdata_nx(void) 8355bd5a452SMatthieu Castet { 8365bd5a452SMatthieu Castet /* 8375bd5a452SMatthieu Castet * When this called, init has already been executed and released, 8380d2eb44fSLucas De Marchi * so everything past _etext should be NX. 8395bd5a452SMatthieu Castet */ 8405bd5a452SMatthieu Castet unsigned long start = PFN_ALIGN(_etext); 8415bd5a452SMatthieu Castet /* 842b56cd05cSJiri Olsa * This comes from __is_kernel_text upper limit. Also HPAGE where used: 8435bd5a452SMatthieu Castet */ 8445bd5a452SMatthieu Castet unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start; 8455bd5a452SMatthieu Castet 8465bd5a452SMatthieu Castet if (__supported_pte_mask & _PAGE_NX) 8475bd5a452SMatthieu Castet printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10); 848185be151SChristoph Hellwig set_memory_nx(start, size >> PAGE_SHIFT); 8495bd5a452SMatthieu Castet } 8505bd5a452SMatthieu Castet 851ad757b6aSThomas Gleixner void mark_rodata_ro(void) 852ad757b6aSThomas Gleixner { 853ad757b6aSThomas Gleixner unsigned long start = PFN_ALIGN(_text); 8542a25dc7cSThomas Gleixner unsigned long size = (unsigned long)__end_rodata - start; 855ad757b6aSThomas Gleixner 8566d238cc4SArjan van de Ven set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); 8572a25dc7cSThomas Gleixner pr_info("Write protecting kernel text and read-only data: %luk\n", 858d7d119d7SIngo Molnar size >> 10); 8590c42f392SAndi Kleen 86016239630SSteven Rostedt kernel_set_to_readonly = 1; 86116239630SSteven Rostedt 8620c42f392SAndi Kleen #ifdef CONFIG_CPA_DEBUG 8632a25dc7cSThomas Gleixner pr_info("Testing CPA: Reverting %lx-%lx\n", start, start + size); 8646d238cc4SArjan van de Ven set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); 8650c42f392SAndi Kleen 8662a25dc7cSThomas Gleixner pr_info("Testing CPA: write protecting again\n"); 8676d238cc4SArjan van de Ven set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); 8680c42f392SAndi Kleen #endif 8695bd5a452SMatthieu Castet mark_nxdata_nx(); 870e1a58320SStephen Smalley if (__supported_pte_mask & _PAGE_NX) 871e1a58320SStephen Smalley debug_checkwx(); 872ad757b6aSThomas Gleixner } 873