1ad757b6aSThomas Gleixner /* 2ad757b6aSThomas Gleixner * 3ad757b6aSThomas Gleixner * Copyright (C) 1995 Linus Torvalds 4ad757b6aSThomas Gleixner * 5ad757b6aSThomas Gleixner * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 6ad757b6aSThomas Gleixner */ 7ad757b6aSThomas Gleixner 8ad757b6aSThomas Gleixner #include <linux/signal.h> 9ad757b6aSThomas Gleixner #include <linux/sched.h> 10ad757b6aSThomas Gleixner #include <linux/kernel.h> 11ad757b6aSThomas Gleixner #include <linux/errno.h> 12ad757b6aSThomas Gleixner #include <linux/string.h> 13ad757b6aSThomas Gleixner #include <linux/types.h> 14ad757b6aSThomas Gleixner #include <linux/ptrace.h> 15ad757b6aSThomas Gleixner #include <linux/mman.h> 16ad757b6aSThomas Gleixner #include <linux/mm.h> 17ad757b6aSThomas Gleixner #include <linux/hugetlb.h> 18ad757b6aSThomas Gleixner #include <linux/swap.h> 19ad757b6aSThomas Gleixner #include <linux/smp.h> 20ad757b6aSThomas Gleixner #include <linux/init.h> 21ad757b6aSThomas Gleixner #include <linux/highmem.h> 22ad757b6aSThomas Gleixner #include <linux/pagemap.h> 23cfb80c9eSJeremy Fitzhardinge #include <linux/pci.h> 24ad757b6aSThomas Gleixner #include <linux/pfn.h> 25ad757b6aSThomas Gleixner #include <linux/poison.h> 26ad757b6aSThomas Gleixner #include <linux/bootmem.h> 27a9ce6bc1SYinghai Lu #include <linux/memblock.h> 28ad757b6aSThomas Gleixner #include <linux/proc_fs.h> 29ad757b6aSThomas Gleixner #include <linux/memory_hotplug.h> 30ad757b6aSThomas Gleixner #include <linux/initrd.h> 31ad757b6aSThomas Gleixner #include <linux/cpumask.h> 325a0e3ad6STejun Heo #include <linux/gfp.h> 33ad757b6aSThomas Gleixner 34f832ff18SH. Peter Anvin #include <asm/asm.h> 3546eaa670SIngo Molnar #include <asm/bios_ebda.h> 36ad757b6aSThomas Gleixner #include <asm/processor.h> 377c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 38ad757b6aSThomas Gleixner #include <asm/pgtable.h> 39ad757b6aSThomas Gleixner #include <asm/dma.h> 40ad757b6aSThomas Gleixner #include <asm/fixmap.h> 4166441bd3SIngo Molnar #include <asm/e820/api.h> 42ad757b6aSThomas Gleixner #include <asm/apic.h> 438550eb99SIngo Molnar #include <asm/bugs.h> 44ad757b6aSThomas Gleixner #include <asm/tlb.h> 45ad757b6aSThomas Gleixner #include <asm/tlbflush.h> 46c10d1e26SAndres Salomon #include <asm/olpc_ofw.h> 47a5a19c63SJeremy Fitzhardinge #include <asm/pgalloc.h> 48ad757b6aSThomas Gleixner #include <asm/sections.h> 49ad757b6aSThomas Gleixner #include <asm/paravirt.h> 50551889a6SIan Campbell #include <asm/setup.h> 51d1163651SLaura Abbott #include <asm/set_memory.h> 522b72394eSPekka Enberg #include <asm/page_types.h> 5392a0f81dSThomas Gleixner #include <asm/cpu_entry_area.h> 544fcb2083SPekka Enberg #include <asm/init.h> 55ad757b6aSThomas Gleixner 565c51bdbeSYinghai Lu #include "mm_internal.h" 575c51bdbeSYinghai Lu 58ad757b6aSThomas Gleixner unsigned long highstart_pfn, highend_pfn; 59ad757b6aSThomas Gleixner 60dc16ecf7SJeremy Fitzhardinge bool __read_mostly __vmalloc_start_set = false; 614e29684cSYinghai Lu 62ad757b6aSThomas Gleixner /* 63ad757b6aSThomas Gleixner * Creates a middle page table and puts a pointer to it in the 64ad757b6aSThomas Gleixner * given global directory entry. This only returns the gd entry 65ad757b6aSThomas Gleixner * in non-PAE compilation mode, since the middle layer is folded. 66ad757b6aSThomas Gleixner */ 67ad757b6aSThomas Gleixner static pmd_t * __init one_md_table_init(pgd_t *pgd) 68ad757b6aSThomas Gleixner { 69e0c4f675SKirill A. Shutemov p4d_t *p4d; 70ad757b6aSThomas Gleixner pud_t *pud; 71ad757b6aSThomas Gleixner pmd_t *pmd_table; 72ad757b6aSThomas Gleixner 73ad757b6aSThomas Gleixner #ifdef CONFIG_X86_PAE 74ad757b6aSThomas Gleixner if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { 75d6be89adSJan Beulich pmd_table = (pmd_t *)alloc_low_page(); 766944a9c8SJeremy Fitzhardinge paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); 77ad757b6aSThomas Gleixner set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); 78e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, 0); 79e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, 0); 808550eb99SIngo Molnar BUG_ON(pmd_table != pmd_offset(pud, 0)); 81a376f30aSZhaolei 82a376f30aSZhaolei return pmd_table; 83ad757b6aSThomas Gleixner } 84ad757b6aSThomas Gleixner #endif 85e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, 0); 86e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, 0); 87ad757b6aSThomas Gleixner pmd_table = pmd_offset(pud, 0); 888550eb99SIngo Molnar 89ad757b6aSThomas Gleixner return pmd_table; 90ad757b6aSThomas Gleixner } 91ad757b6aSThomas Gleixner 92ad757b6aSThomas Gleixner /* 93ad757b6aSThomas Gleixner * Create a page table and place a pointer to it in a middle page 948550eb99SIngo Molnar * directory entry: 95ad757b6aSThomas Gleixner */ 96ad757b6aSThomas Gleixner static pte_t * __init one_page_table_init(pmd_t *pmd) 97ad757b6aSThomas Gleixner { 98ad757b6aSThomas Gleixner if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { 994e37a890SYinghai Lu pte_t *page_table = (pte_t *)alloc_low_page(); 100ad757b6aSThomas Gleixner 1016944a9c8SJeremy Fitzhardinge paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); 102ad757b6aSThomas Gleixner set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); 103ad757b6aSThomas Gleixner BUG_ON(page_table != pte_offset_kernel(pmd, 0)); 104ad757b6aSThomas Gleixner } 105ad757b6aSThomas Gleixner 106ad757b6aSThomas Gleixner return pte_offset_kernel(pmd, 0); 107ad757b6aSThomas Gleixner } 108ad757b6aSThomas Gleixner 109458a3e64STejun Heo pmd_t * __init populate_extra_pmd(unsigned long vaddr) 11011124411STejun Heo { 11111124411STejun Heo int pgd_idx = pgd_index(vaddr); 11211124411STejun Heo int pmd_idx = pmd_index(vaddr); 113458a3e64STejun Heo 114458a3e64STejun Heo return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx; 115458a3e64STejun Heo } 116458a3e64STejun Heo 117458a3e64STejun Heo pte_t * __init populate_extra_pte(unsigned long vaddr) 118458a3e64STejun Heo { 119458a3e64STejun Heo int pte_idx = pte_index(vaddr); 12011124411STejun Heo pmd_t *pmd; 12111124411STejun Heo 122458a3e64STejun Heo pmd = populate_extra_pmd(vaddr); 123458a3e64STejun Heo return one_page_table_init(pmd) + pte_idx; 12411124411STejun Heo } 12511124411STejun Heo 126719272c4SYinghai Lu static unsigned long __init 127719272c4SYinghai Lu page_table_range_init_count(unsigned long start, unsigned long end) 128719272c4SYinghai Lu { 129719272c4SYinghai Lu unsigned long count = 0; 130719272c4SYinghai Lu #ifdef CONFIG_HIGHMEM 131719272c4SYinghai Lu int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; 132719272c4SYinghai Lu int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; 133719272c4SYinghai Lu int pgd_idx, pmd_idx; 134719272c4SYinghai Lu unsigned long vaddr; 135719272c4SYinghai Lu 136719272c4SYinghai Lu if (pmd_idx_kmap_begin == pmd_idx_kmap_end) 137719272c4SYinghai Lu return 0; 138719272c4SYinghai Lu 139719272c4SYinghai Lu vaddr = start; 140719272c4SYinghai Lu pgd_idx = pgd_index(vaddr); 1419962eea9SMinfei Huang pmd_idx = pmd_index(vaddr); 142719272c4SYinghai Lu 143719272c4SYinghai Lu for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) { 144719272c4SYinghai Lu for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); 145719272c4SYinghai Lu pmd_idx++) { 146719272c4SYinghai Lu if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin && 147719272c4SYinghai Lu (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) 148719272c4SYinghai Lu count++; 149719272c4SYinghai Lu vaddr += PMD_SIZE; 150719272c4SYinghai Lu } 151719272c4SYinghai Lu pmd_idx = 0; 152719272c4SYinghai Lu } 153719272c4SYinghai Lu #endif 154719272c4SYinghai Lu return count; 155719272c4SYinghai Lu } 156719272c4SYinghai Lu 157a3c6018eSJan Beulich static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, 158719272c4SYinghai Lu unsigned long vaddr, pte_t *lastpte, 159719272c4SYinghai Lu void **adr) 160a3c6018eSJan Beulich { 161a3c6018eSJan Beulich #ifdef CONFIG_HIGHMEM 162a3c6018eSJan Beulich /* 163a3c6018eSJan Beulich * Something (early fixmap) may already have put a pte 164a3c6018eSJan Beulich * page here, which causes the page table allocation 165a3c6018eSJan Beulich * to become nonlinear. Attempt to fix it, and if it 166a3c6018eSJan Beulich * is still nonlinear then we have to bug. 167a3c6018eSJan Beulich */ 168a3c6018eSJan Beulich int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; 169a3c6018eSJan Beulich int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; 170a3c6018eSJan Beulich 171a3c6018eSJan Beulich if (pmd_idx_kmap_begin != pmd_idx_kmap_end 172a3c6018eSJan Beulich && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin 173719272c4SYinghai Lu && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) { 174a3c6018eSJan Beulich pte_t *newpte; 175a3c6018eSJan Beulich int i; 176a3c6018eSJan Beulich 177c464573cSPekka Enberg BUG_ON(after_bootmem); 178719272c4SYinghai Lu newpte = *adr; 179a3c6018eSJan Beulich for (i = 0; i < PTRS_PER_PTE; i++) 180a3c6018eSJan Beulich set_pte(newpte + i, pte[i]); 181719272c4SYinghai Lu *adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE); 182a3c6018eSJan Beulich 183a3c6018eSJan Beulich paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); 184a3c6018eSJan Beulich set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); 185a3c6018eSJan Beulich BUG_ON(newpte != pte_offset_kernel(pmd, 0)); 186a3c6018eSJan Beulich __flush_tlb_all(); 187a3c6018eSJan Beulich 188a3c6018eSJan Beulich paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); 189a3c6018eSJan Beulich pte = newpte; 190a3c6018eSJan Beulich } 191a3c6018eSJan Beulich BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) 192a3c6018eSJan Beulich && vaddr > fix_to_virt(FIX_KMAP_END) 193a3c6018eSJan Beulich && lastpte && lastpte + PTRS_PER_PTE != pte); 194a3c6018eSJan Beulich #endif 195a3c6018eSJan Beulich return pte; 196a3c6018eSJan Beulich } 197a3c6018eSJan Beulich 198ad757b6aSThomas Gleixner /* 199ad757b6aSThomas Gleixner * This function initializes a certain range of kernel virtual memory 200ad757b6aSThomas Gleixner * with new bootmem page tables, everywhere page tables are missing in 201ad757b6aSThomas Gleixner * the given range. 2028550eb99SIngo Molnar * 203ad757b6aSThomas Gleixner * NOTE: The pagetables are allocated contiguous on the physical space 204ad757b6aSThomas Gleixner * so we can cache the place of the first one and move around without 205ad757b6aSThomas Gleixner * checking the pgd every time. 206ad757b6aSThomas Gleixner */ 2078550eb99SIngo Molnar static void __init 2088550eb99SIngo Molnar page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) 209ad757b6aSThomas Gleixner { 210ad757b6aSThomas Gleixner int pgd_idx, pmd_idx; 211ad757b6aSThomas Gleixner unsigned long vaddr; 2128550eb99SIngo Molnar pgd_t *pgd; 2138550eb99SIngo Molnar pmd_t *pmd; 214a3c6018eSJan Beulich pte_t *pte = NULL; 215719272c4SYinghai Lu unsigned long count = page_table_range_init_count(start, end); 216719272c4SYinghai Lu void *adr = NULL; 217719272c4SYinghai Lu 218719272c4SYinghai Lu if (count) 219719272c4SYinghai Lu adr = alloc_low_pages(count); 220ad757b6aSThomas Gleixner 221ad757b6aSThomas Gleixner vaddr = start; 222ad757b6aSThomas Gleixner pgd_idx = pgd_index(vaddr); 223ad757b6aSThomas Gleixner pmd_idx = pmd_index(vaddr); 224ad757b6aSThomas Gleixner pgd = pgd_base + pgd_idx; 225ad757b6aSThomas Gleixner 226ad757b6aSThomas Gleixner for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { 227ad757b6aSThomas Gleixner pmd = one_md_table_init(pgd); 228ad757b6aSThomas Gleixner pmd = pmd + pmd_index(vaddr); 2298550eb99SIngo Molnar for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); 2308550eb99SIngo Molnar pmd++, pmd_idx++) { 231a3c6018eSJan Beulich pte = page_table_kmap_check(one_page_table_init(pmd), 232719272c4SYinghai Lu pmd, vaddr, pte, &adr); 233ad757b6aSThomas Gleixner 234ad757b6aSThomas Gleixner vaddr += PMD_SIZE; 235ad757b6aSThomas Gleixner } 236ad757b6aSThomas Gleixner pmd_idx = 0; 237ad757b6aSThomas Gleixner } 238ad757b6aSThomas Gleixner } 239ad757b6aSThomas Gleixner 240ad757b6aSThomas Gleixner static inline int is_kernel_text(unsigned long addr) 241ad757b6aSThomas Gleixner { 2425bd5a452SMatthieu Castet if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) 243ad757b6aSThomas Gleixner return 1; 244ad757b6aSThomas Gleixner return 0; 245ad757b6aSThomas Gleixner } 246ad757b6aSThomas Gleixner 247ad757b6aSThomas Gleixner /* 248ad757b6aSThomas Gleixner * This maps the physical memory to kernel virtual address space, a total 249ad757b6aSThomas Gleixner * of max_low_pfn pages, by creating page tables starting from address 2508550eb99SIngo Molnar * PAGE_OFFSET: 251ad757b6aSThomas Gleixner */ 252e53fb04fSPekka Enberg unsigned long __init 253e53fb04fSPekka Enberg kernel_physical_mapping_init(unsigned long start, 254e53fb04fSPekka Enberg unsigned long end, 255e53fb04fSPekka Enberg unsigned long page_size_mask) 256ad757b6aSThomas Gleixner { 257e53fb04fSPekka Enberg int use_pse = page_size_mask == (1<<PG_LEVEL_2M); 258c1fd1b43SPekka Enberg unsigned long last_map_addr = end; 259e53fb04fSPekka Enberg unsigned long start_pfn, end_pfn; 260e7179853SPekka Enberg pgd_t *pgd_base = swapper_pg_dir; 2618550eb99SIngo Molnar int pgd_idx, pmd_idx, pte_ofs; 262ad757b6aSThomas Gleixner unsigned long pfn; 263ad757b6aSThomas Gleixner pgd_t *pgd; 264ad757b6aSThomas Gleixner pmd_t *pmd; 265ad757b6aSThomas Gleixner pte_t *pte; 266a2699e47SSuresh Siddha unsigned pages_2m, pages_4k; 267a2699e47SSuresh Siddha int mapping_iter; 268a2699e47SSuresh Siddha 269e53fb04fSPekka Enberg start_pfn = start >> PAGE_SHIFT; 270e53fb04fSPekka Enberg end_pfn = end >> PAGE_SHIFT; 271e53fb04fSPekka Enberg 272a2699e47SSuresh Siddha /* 273a2699e47SSuresh Siddha * First iteration will setup identity mapping using large/small pages 274a2699e47SSuresh Siddha * based on use_pse, with other attributes same as set by 275a2699e47SSuresh Siddha * the early code in head_32.S 276a2699e47SSuresh Siddha * 277a2699e47SSuresh Siddha * Second iteration will setup the appropriate attributes (NX, GLOBAL..) 278a2699e47SSuresh Siddha * as desired for the kernel identity mapping. 279a2699e47SSuresh Siddha * 280a2699e47SSuresh Siddha * This two pass mechanism conforms to the TLB app note which says: 281a2699e47SSuresh Siddha * 282a2699e47SSuresh Siddha * "Software should not write to a paging-structure entry in a way 283a2699e47SSuresh Siddha * that would change, for any linear address, both the page size 284a2699e47SSuresh Siddha * and either the page frame or attributes." 285a2699e47SSuresh Siddha */ 286a2699e47SSuresh Siddha mapping_iter = 1; 287ad757b6aSThomas Gleixner 28816bf9226SBorislav Petkov if (!boot_cpu_has(X86_FEATURE_PSE)) 289a04ad82dSYinghai Lu use_pse = 0; 290a04ad82dSYinghai Lu 291a2699e47SSuresh Siddha repeat: 292a2699e47SSuresh Siddha pages_2m = pages_4k = 0; 293a04ad82dSYinghai Lu pfn = start_pfn; 294a04ad82dSYinghai Lu pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); 295ad757b6aSThomas Gleixner pgd = pgd_base + pgd_idx; 296ad757b6aSThomas Gleixner for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { 297ad757b6aSThomas Gleixner pmd = one_md_table_init(pgd); 2988550eb99SIngo Molnar 299a04ad82dSYinghai Lu if (pfn >= end_pfn) 300a04ad82dSYinghai Lu continue; 301a04ad82dSYinghai Lu #ifdef CONFIG_X86_PAE 302a04ad82dSYinghai Lu pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); 303a04ad82dSYinghai Lu pmd += pmd_idx; 304a04ad82dSYinghai Lu #else 305a04ad82dSYinghai Lu pmd_idx = 0; 306a04ad82dSYinghai Lu #endif 307a04ad82dSYinghai Lu for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; 308f3f20de8SJeremy Fitzhardinge pmd++, pmd_idx++) { 3098550eb99SIngo Molnar unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; 310ad757b6aSThomas Gleixner 3118550eb99SIngo Molnar /* 3128550eb99SIngo Molnar * Map with big pages if possible, otherwise 3138550eb99SIngo Molnar * create normal page tables: 3148550eb99SIngo Molnar */ 315a04ad82dSYinghai Lu if (use_pse) { 3168550eb99SIngo Molnar unsigned int addr2; 317f3f20de8SJeremy Fitzhardinge pgprot_t prot = PAGE_KERNEL_LARGE; 318a2699e47SSuresh Siddha /* 319a2699e47SSuresh Siddha * first pass will use the same initial 320a2699e47SSuresh Siddha * identity mapping attribute + _PAGE_PSE. 321a2699e47SSuresh Siddha */ 322a2699e47SSuresh Siddha pgprot_t init_prot = 323a2699e47SSuresh Siddha __pgprot(PTE_IDENT_ATTR | 324a2699e47SSuresh Siddha _PAGE_PSE); 325f3f20de8SJeremy Fitzhardinge 326960ddb4fSYinghai Lu pfn &= PMD_MASK >> PAGE_SHIFT; 3278550eb99SIngo Molnar addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + 328f3f20de8SJeremy Fitzhardinge PAGE_OFFSET + PAGE_SIZE-1; 329f3f20de8SJeremy Fitzhardinge 3308550eb99SIngo Molnar if (is_kernel_text(addr) || 3318550eb99SIngo Molnar is_kernel_text(addr2)) 332f3f20de8SJeremy Fitzhardinge prot = PAGE_KERNEL_LARGE_EXEC; 333f3f20de8SJeremy Fitzhardinge 334ce0c0e50SAndi Kleen pages_2m++; 335a2699e47SSuresh Siddha if (mapping_iter == 1) 336a2699e47SSuresh Siddha set_pmd(pmd, pfn_pmd(pfn, init_prot)); 337a2699e47SSuresh Siddha else 338f3f20de8SJeremy Fitzhardinge set_pmd(pmd, pfn_pmd(pfn, prot)); 339ad757b6aSThomas Gleixner 340ad757b6aSThomas Gleixner pfn += PTRS_PER_PTE; 3418550eb99SIngo Molnar continue; 3428550eb99SIngo Molnar } 343ad757b6aSThomas Gleixner pte = one_page_table_init(pmd); 344ad757b6aSThomas Gleixner 345a04ad82dSYinghai Lu pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); 346a04ad82dSYinghai Lu pte += pte_ofs; 347a04ad82dSYinghai Lu for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; 3488550eb99SIngo Molnar pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { 349f3f20de8SJeremy Fitzhardinge pgprot_t prot = PAGE_KERNEL; 350a2699e47SSuresh Siddha /* 351a2699e47SSuresh Siddha * first pass will use the same initial 352a2699e47SSuresh Siddha * identity mapping attribute. 353a2699e47SSuresh Siddha */ 354a2699e47SSuresh Siddha pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); 355f3f20de8SJeremy Fitzhardinge 3568550eb99SIngo Molnar if (is_kernel_text(addr)) 357f3f20de8SJeremy Fitzhardinge prot = PAGE_KERNEL_EXEC; 358f3f20de8SJeremy Fitzhardinge 359ce0c0e50SAndi Kleen pages_4k++; 360c1fd1b43SPekka Enberg if (mapping_iter == 1) { 361a2699e47SSuresh Siddha set_pte(pte, pfn_pte(pfn, init_prot)); 362c1fd1b43SPekka Enberg last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE; 363c1fd1b43SPekka Enberg } else 364f3f20de8SJeremy Fitzhardinge set_pte(pte, pfn_pte(pfn, prot)); 365ad757b6aSThomas Gleixner } 366ad757b6aSThomas Gleixner } 367ad757b6aSThomas Gleixner } 368a2699e47SSuresh Siddha if (mapping_iter == 1) { 369a2699e47SSuresh Siddha /* 370a2699e47SSuresh Siddha * update direct mapping page count only in the first 371a2699e47SSuresh Siddha * iteration. 372a2699e47SSuresh Siddha */ 373ce0c0e50SAndi Kleen update_page_count(PG_LEVEL_2M, pages_2m); 374ce0c0e50SAndi Kleen update_page_count(PG_LEVEL_4K, pages_4k); 375a2699e47SSuresh Siddha 376a2699e47SSuresh Siddha /* 377a2699e47SSuresh Siddha * local global flush tlb, which will flush the previous 378a2699e47SSuresh Siddha * mappings present in both small and large page TLB's. 379a2699e47SSuresh Siddha */ 380a2699e47SSuresh Siddha __flush_tlb_all(); 381a2699e47SSuresh Siddha 382a2699e47SSuresh Siddha /* 383a2699e47SSuresh Siddha * Second iteration will set the actual desired PTE attributes. 384a2699e47SSuresh Siddha */ 385a2699e47SSuresh Siddha mapping_iter = 2; 386a2699e47SSuresh Siddha goto repeat; 387a2699e47SSuresh Siddha } 388c1fd1b43SPekka Enberg return last_map_addr; 389ae531c26SArjan van de Ven } 390ae531c26SArjan van de Ven 391ad757b6aSThomas Gleixner pte_t *kmap_pte; 392ad757b6aSThomas Gleixner 3938550eb99SIngo Molnar static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr) 3948550eb99SIngo Molnar { 395e0c4f675SKirill A. Shutemov pgd_t *pgd = pgd_offset_k(vaddr); 396e0c4f675SKirill A. Shutemov p4d_t *p4d = p4d_offset(pgd, vaddr); 397e0c4f675SKirill A. Shutemov pud_t *pud = pud_offset(p4d, vaddr); 398e0c4f675SKirill A. Shutemov pmd_t *pmd = pmd_offset(pud, vaddr); 399e0c4f675SKirill A. Shutemov return pte_offset_kernel(pmd, vaddr); 4008550eb99SIngo Molnar } 401ad757b6aSThomas Gleixner 402ad757b6aSThomas Gleixner static void __init kmap_init(void) 403ad757b6aSThomas Gleixner { 404ad757b6aSThomas Gleixner unsigned long kmap_vstart; 405ad757b6aSThomas Gleixner 4068550eb99SIngo Molnar /* 4078550eb99SIngo Molnar * Cache the first kmap pte: 4088550eb99SIngo Molnar */ 409ad757b6aSThomas Gleixner kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); 410ad757b6aSThomas Gleixner kmap_pte = kmap_get_fixmap_pte(kmap_vstart); 411ad757b6aSThomas Gleixner } 412ad757b6aSThomas Gleixner 413fd940934SKeith Packard #ifdef CONFIG_HIGHMEM 414ad757b6aSThomas Gleixner static void __init permanent_kmaps_init(pgd_t *pgd_base) 415ad757b6aSThomas Gleixner { 4168550eb99SIngo Molnar unsigned long vaddr; 417ad757b6aSThomas Gleixner pgd_t *pgd; 418e0c4f675SKirill A. Shutemov p4d_t *p4d; 419ad757b6aSThomas Gleixner pud_t *pud; 420ad757b6aSThomas Gleixner pmd_t *pmd; 421ad757b6aSThomas Gleixner pte_t *pte; 422ad757b6aSThomas Gleixner 423ad757b6aSThomas Gleixner vaddr = PKMAP_BASE; 424ad757b6aSThomas Gleixner page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 425ad757b6aSThomas Gleixner 426ad757b6aSThomas Gleixner pgd = swapper_pg_dir + pgd_index(vaddr); 427e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, vaddr); 428e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, vaddr); 429ad757b6aSThomas Gleixner pmd = pmd_offset(pud, vaddr); 430ad757b6aSThomas Gleixner pte = pte_offset_kernel(pmd, vaddr); 431ad757b6aSThomas Gleixner pkmap_page_table = pte; 432ad757b6aSThomas Gleixner } 433ad757b6aSThomas Gleixner 4341d931264SYinghai Lu void __init add_highpages_with_active_regions(int nid, 4351d931264SYinghai Lu unsigned long start_pfn, unsigned long end_pfn) 436b5bc6c0eSYinghai Lu { 4378a9ca34cSTejun Heo phys_addr_t start, end; 4388a9ca34cSTejun Heo u64 i; 4391d931264SYinghai Lu 440fc6daaf9STony Luck for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) { 4418a9ca34cSTejun Heo unsigned long pfn = clamp_t(unsigned long, PFN_UP(start), 4428a9ca34cSTejun Heo start_pfn, end_pfn); 4438a9ca34cSTejun Heo unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end), 4448a9ca34cSTejun Heo start_pfn, end_pfn); 4458a9ca34cSTejun Heo for ( ; pfn < e_pfn; pfn++) 4468a9ca34cSTejun Heo if (pfn_valid(pfn)) 4475e7ccf86SJiang Liu free_highmem_page(pfn_to_page(pfn)); 448b5bc6c0eSYinghai Lu } 449ad757b6aSThomas Gleixner } 450ad757b6aSThomas Gleixner #else 451e8e32326SIngo Brueckl static inline void permanent_kmaps_init(pgd_t *pgd_base) 452e8e32326SIngo Brueckl { 453e8e32326SIngo Brueckl } 454ad757b6aSThomas Gleixner #endif /* CONFIG_HIGHMEM */ 455ad757b6aSThomas Gleixner 456945fd17aSThomas Gleixner void __init sync_initial_page_table(void) 457945fd17aSThomas Gleixner { 458945fd17aSThomas Gleixner clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, 459945fd17aSThomas Gleixner swapper_pg_dir + KERNEL_PGD_BOUNDARY, 460945fd17aSThomas Gleixner KERNEL_PGD_PTRS); 461945fd17aSThomas Gleixner 462945fd17aSThomas Gleixner /* 463945fd17aSThomas Gleixner * sync back low identity map too. It is used for example 464945fd17aSThomas Gleixner * in the 32-bit EFI stub. 465945fd17aSThomas Gleixner */ 466945fd17aSThomas Gleixner clone_pgd_range(initial_page_table, 467945fd17aSThomas Gleixner swapper_pg_dir + KERNEL_PGD_BOUNDARY, 468945fd17aSThomas Gleixner min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); 469945fd17aSThomas Gleixner } 470945fd17aSThomas Gleixner 4717737b215SAttilio Rao void __init native_pagetable_init(void) 472ad757b6aSThomas Gleixner { 473551889a6SIan Campbell unsigned long pfn, va; 47473090f89SAttilio Rao pgd_t *pgd, *base = swapper_pg_dir; 475e0c4f675SKirill A. Shutemov p4d_t *p4d; 476551889a6SIan Campbell pud_t *pud; 477551889a6SIan Campbell pmd_t *pmd; 478551889a6SIan Campbell pte_t *pte; 479ad757b6aSThomas Gleixner 480ad757b6aSThomas Gleixner /* 481551889a6SIan Campbell * Remove any mappings which extend past the end of physical 48211ed9e92SYinghai Lu * memory from the boot time page table. 48311ed9e92SYinghai Lu * In virtual address space, we should have at least two pages 48411ed9e92SYinghai Lu * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END 48511ed9e92SYinghai Lu * definition. And max_low_pfn is set to VMALLOC_END physical 48611ed9e92SYinghai Lu * address. If initial memory mapping is doing right job, we 48711ed9e92SYinghai Lu * should have pte used near max_low_pfn or one pmd is not present. 488ad757b6aSThomas Gleixner */ 48911ed9e92SYinghai Lu for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) { 490551889a6SIan Campbell va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); 491551889a6SIan Campbell pgd = base + pgd_index(va); 492551889a6SIan Campbell if (!pgd_present(*pgd)) 493551889a6SIan Campbell break; 494ad757b6aSThomas Gleixner 495e0c4f675SKirill A. Shutemov p4d = p4d_offset(pgd, va); 496e0c4f675SKirill A. Shutemov pud = pud_offset(p4d, va); 497551889a6SIan Campbell pmd = pmd_offset(pud, va); 498551889a6SIan Campbell if (!pmd_present(*pmd)) 499551889a6SIan Campbell break; 500551889a6SIan Campbell 50111ed9e92SYinghai Lu /* should not be large page here */ 50211ed9e92SYinghai Lu if (pmd_large(*pmd)) { 50311ed9e92SYinghai Lu pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n", 50411ed9e92SYinghai Lu pfn, pmd, __pa(pmd)); 50511ed9e92SYinghai Lu BUG_ON(1); 50611ed9e92SYinghai Lu } 50711ed9e92SYinghai Lu 508551889a6SIan Campbell pte = pte_offset_kernel(pmd, va); 509551889a6SIan Campbell if (!pte_present(*pte)) 510551889a6SIan Campbell break; 511551889a6SIan Campbell 51211ed9e92SYinghai Lu printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n", 51311ed9e92SYinghai Lu pfn, pmd, __pa(pmd), pte, __pa(pte)); 514551889a6SIan Campbell pte_clear(NULL, va, pte); 515551889a6SIan Campbell } 5166944a9c8SJeremy Fitzhardinge paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); 517843b8ed2SAttilio Rao paging_init(); 518ad757b6aSThomas Gleixner } 519ad757b6aSThomas Gleixner 520ad757b6aSThomas Gleixner /* 521ad757b6aSThomas Gleixner * Build a proper pagetable for the kernel mappings. Up until this 522ad757b6aSThomas Gleixner * point, we've been running on some set of pagetables constructed by 523ad757b6aSThomas Gleixner * the boot process. 524ad757b6aSThomas Gleixner * 525ad757b6aSThomas Gleixner * If we're booting on native hardware, this will be a pagetable 526551889a6SIan Campbell * constructed in arch/x86/kernel/head_32.S. The root of the 527551889a6SIan Campbell * pagetable will be swapper_pg_dir. 528ad757b6aSThomas Gleixner * 529ad757b6aSThomas Gleixner * If we're booting paravirtualized under a hypervisor, then there are 530ad757b6aSThomas Gleixner * more options: we may already be running PAE, and the pagetable may 531ad757b6aSThomas Gleixner * or may not be based in swapper_pg_dir. In any case, 5327737b215SAttilio Rao * paravirt_pagetable_init() will set up swapper_pg_dir 533ad757b6aSThomas Gleixner * appropriately for the rest of the initialization to work. 534ad757b6aSThomas Gleixner * 535ad757b6aSThomas Gleixner * In general, pagetable_init() assumes that the pagetable may already 536ad757b6aSThomas Gleixner * be partially populated, and so it avoids stomping on any existing 537ad757b6aSThomas Gleixner * mappings. 538ad757b6aSThomas Gleixner */ 539f765090aSPekka Enberg void __init early_ioremap_page_table_range_init(void) 540ad757b6aSThomas Gleixner { 541e7179853SPekka Enberg pgd_t *pgd_base = swapper_pg_dir; 5428550eb99SIngo Molnar unsigned long vaddr, end; 543ad757b6aSThomas Gleixner 544ad757b6aSThomas Gleixner /* 545ad757b6aSThomas Gleixner * Fixed mappings, only the page table structure has to be 546ad757b6aSThomas Gleixner * created - mappings will be set by set_fixmap(): 547ad757b6aSThomas Gleixner */ 548ad757b6aSThomas Gleixner vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 549ad757b6aSThomas Gleixner end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; 550ad757b6aSThomas Gleixner page_table_range_init(vaddr, end, pgd_base); 551beacfaacSHuang, Ying early_ioremap_reset(); 552e7b37895SYinghai Lu } 553e7b37895SYinghai Lu 554e7b37895SYinghai Lu static void __init pagetable_init(void) 555e7b37895SYinghai Lu { 556e7b37895SYinghai Lu pgd_t *pgd_base = swapper_pg_dir; 557e7b37895SYinghai Lu 558ad757b6aSThomas Gleixner permanent_kmaps_init(pgd_base); 559ad757b6aSThomas Gleixner } 560ad757b6aSThomas Gleixner 561*8a57f484SDave Hansen #define DEFAULT_PTE_MASK ~(_PAGE_NX | _PAGE_GLOBAL) 562*8a57f484SDave Hansen /* Bits supported by the hardware: */ 563*8a57f484SDave Hansen pteval_t __supported_pte_mask __read_mostly = DEFAULT_PTE_MASK; 564*8a57f484SDave Hansen /* Bits allowed in normal kernel mappings: */ 565*8a57f484SDave Hansen pteval_t __default_kernel_pte_mask __read_mostly = DEFAULT_PTE_MASK; 5666fdc05d4SJeremy Fitzhardinge EXPORT_SYMBOL_GPL(__supported_pte_mask); 567*8a57f484SDave Hansen /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */ 568*8a57f484SDave Hansen EXPORT_SYMBOL(__default_kernel_pte_mask); 5696fdc05d4SJeremy Fitzhardinge 57090d967e0SYinghai Lu /* user-defined highmem size */ 57190d967e0SYinghai Lu static unsigned int highmem_pages = -1; 57290d967e0SYinghai Lu 57390d967e0SYinghai Lu /* 57490d967e0SYinghai Lu * highmem=size forces highmem to be exactly 'size' bytes. 57590d967e0SYinghai Lu * This works even on boxes that have no highmem otherwise. 57690d967e0SYinghai Lu * This also works to reduce highmem size on bigger boxes. 57790d967e0SYinghai Lu */ 57890d967e0SYinghai Lu static int __init parse_highmem(char *arg) 57990d967e0SYinghai Lu { 58090d967e0SYinghai Lu if (!arg) 58190d967e0SYinghai Lu return -EINVAL; 58290d967e0SYinghai Lu 58390d967e0SYinghai Lu highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT; 58490d967e0SYinghai Lu return 0; 58590d967e0SYinghai Lu } 58690d967e0SYinghai Lu early_param("highmem", parse_highmem); 58790d967e0SYinghai Lu 5884769843bSIngo Molnar #define MSG_HIGHMEM_TOO_BIG \ 5894769843bSIngo Molnar "highmem size (%luMB) is bigger than pages available (%luMB)!\n" 5904769843bSIngo Molnar 5914769843bSIngo Molnar #define MSG_LOWMEM_TOO_SMALL \ 5924769843bSIngo Molnar "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n" 59390d967e0SYinghai Lu /* 5944769843bSIngo Molnar * All of RAM fits into lowmem - but if user wants highmem 5954769843bSIngo Molnar * artificially via the highmem=x boot parameter then create 5964769843bSIngo Molnar * it: 59790d967e0SYinghai Lu */ 598f836e35aSYinghai Lu static void __init lowmem_pfn_init(void) 59990d967e0SYinghai Lu { 600346cafecSYinghai Lu /* max_low_pfn is 0, we already have early_res support */ 60190d967e0SYinghai Lu max_low_pfn = max_pfn; 602d88316c2SIngo Molnar 6034769843bSIngo Molnar if (highmem_pages == -1) 6044769843bSIngo Molnar highmem_pages = 0; 6054769843bSIngo Molnar #ifdef CONFIG_HIGHMEM 6064769843bSIngo Molnar if (highmem_pages >= max_pfn) { 6074769843bSIngo Molnar printk(KERN_ERR MSG_HIGHMEM_TOO_BIG, 6084769843bSIngo Molnar pages_to_mb(highmem_pages), pages_to_mb(max_pfn)); 6094769843bSIngo Molnar highmem_pages = 0; 6104769843bSIngo Molnar } 6114769843bSIngo Molnar if (highmem_pages) { 6124769843bSIngo Molnar if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) { 6134769843bSIngo Molnar printk(KERN_ERR MSG_LOWMEM_TOO_SMALL, 6144769843bSIngo Molnar pages_to_mb(highmem_pages)); 6154769843bSIngo Molnar highmem_pages = 0; 6164769843bSIngo Molnar } 6174769843bSIngo Molnar max_low_pfn -= highmem_pages; 6184769843bSIngo Molnar } 6194769843bSIngo Molnar #else 6204769843bSIngo Molnar if (highmem_pages) 6214769843bSIngo Molnar printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n"); 6224769843bSIngo Molnar #endif 6234769843bSIngo Molnar } 6244769843bSIngo Molnar 6254769843bSIngo Molnar #define MSG_HIGHMEM_TOO_SMALL \ 6264769843bSIngo Molnar "only %luMB highmem pages available, ignoring highmem size of %luMB!\n" 6274769843bSIngo Molnar 6284769843bSIngo Molnar #define MSG_HIGHMEM_TRIMMED \ 6294769843bSIngo Molnar "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n" 6304769843bSIngo Molnar /* 6314769843bSIngo Molnar * We have more RAM than fits into lowmem - we try to put it into 6324769843bSIngo Molnar * highmem, also taking the highmem=x boot parameter into account: 6334769843bSIngo Molnar */ 634f836e35aSYinghai Lu static void __init highmem_pfn_init(void) 6354769843bSIngo Molnar { 636d88316c2SIngo Molnar max_low_pfn = MAXMEM_PFN; 637d88316c2SIngo Molnar 63890d967e0SYinghai Lu if (highmem_pages == -1) 63990d967e0SYinghai Lu highmem_pages = max_pfn - MAXMEM_PFN; 6404769843bSIngo Molnar 64190d967e0SYinghai Lu if (highmem_pages + MAXMEM_PFN < max_pfn) 64290d967e0SYinghai Lu max_pfn = MAXMEM_PFN + highmem_pages; 6434769843bSIngo Molnar 64490d967e0SYinghai Lu if (highmem_pages + MAXMEM_PFN > max_pfn) { 6454769843bSIngo Molnar printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL, 64690d967e0SYinghai Lu pages_to_mb(max_pfn - MAXMEM_PFN), 64790d967e0SYinghai Lu pages_to_mb(highmem_pages)); 64890d967e0SYinghai Lu highmem_pages = 0; 64990d967e0SYinghai Lu } 65090d967e0SYinghai Lu #ifndef CONFIG_HIGHMEM 65190d967e0SYinghai Lu /* Maximum memory usable is what is directly addressable */ 6524769843bSIngo Molnar printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20); 65390d967e0SYinghai Lu if (max_pfn > MAX_NONPAE_PFN) 6544769843bSIngo Molnar printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n"); 65590d967e0SYinghai Lu else 65690d967e0SYinghai Lu printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); 65790d967e0SYinghai Lu max_pfn = MAXMEM_PFN; 65890d967e0SYinghai Lu #else /* !CONFIG_HIGHMEM */ 65990d967e0SYinghai Lu #ifndef CONFIG_HIGHMEM64G 66090d967e0SYinghai Lu if (max_pfn > MAX_NONPAE_PFN) { 66190d967e0SYinghai Lu max_pfn = MAX_NONPAE_PFN; 6624769843bSIngo Molnar printk(KERN_WARNING MSG_HIGHMEM_TRIMMED); 66390d967e0SYinghai Lu } 66490d967e0SYinghai Lu #endif /* !CONFIG_HIGHMEM64G */ 66590d967e0SYinghai Lu #endif /* !CONFIG_HIGHMEM */ 66690d967e0SYinghai Lu } 6674769843bSIngo Molnar 66890d967e0SYinghai Lu /* 66990d967e0SYinghai Lu * Determine low and high memory ranges: 67090d967e0SYinghai Lu */ 67190d967e0SYinghai Lu void __init find_low_pfn_range(void) 67290d967e0SYinghai Lu { 67390d967e0SYinghai Lu /* it could update max_pfn */ 67490d967e0SYinghai Lu 675d88316c2SIngo Molnar if (max_pfn <= MAXMEM_PFN) 6764769843bSIngo Molnar lowmem_pfn_init(); 677d88316c2SIngo Molnar else 678d88316c2SIngo Molnar highmem_pfn_init(); 67990d967e0SYinghai Lu } 68090d967e0SYinghai Lu 681b2ac82a0SYinghai Lu #ifndef CONFIG_NEED_MULTIPLE_NODES 682d8fc3afcSTejun Heo void __init initmem_init(void) 683b2ac82a0SYinghai Lu { 684b2ac82a0SYinghai Lu #ifdef CONFIG_HIGHMEM 685b2ac82a0SYinghai Lu highstart_pfn = highend_pfn = max_pfn; 686b2ac82a0SYinghai Lu if (max_pfn > max_low_pfn) 687b2ac82a0SYinghai Lu highstart_pfn = max_low_pfn; 688b2ac82a0SYinghai Lu printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", 689b2ac82a0SYinghai Lu pages_to_mb(highend_pfn - highstart_pfn)); 690b2ac82a0SYinghai Lu high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; 691b2ac82a0SYinghai Lu #else 692b2ac82a0SYinghai Lu high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; 693b2ac82a0SYinghai Lu #endif 6940608f70cSTejun Heo 695e7e8de59STang Chen memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); 6960608f70cSTejun Heo sparse_memory_present_with_active_regions(0); 6970608f70cSTejun Heo 698b2ac82a0SYinghai Lu #ifdef CONFIG_FLATMEM 69946a84132SJiang Liu max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn; 700b2ac82a0SYinghai Lu #endif 701dc16ecf7SJeremy Fitzhardinge __vmalloc_start_set = true; 702dc16ecf7SJeremy Fitzhardinge 703b2ac82a0SYinghai Lu printk(KERN_NOTICE "%ldMB LOWMEM available.\n", 704b2ac82a0SYinghai Lu pages_to_mb(max_low_pfn)); 705b2ac82a0SYinghai Lu 706b2ac82a0SYinghai Lu setup_bootmem_allocator(); 707b2ac82a0SYinghai Lu } 708cb95a13aSYinghai Lu #endif /* !CONFIG_NEED_MULTIPLE_NODES */ 709b2ac82a0SYinghai Lu 710b2ac82a0SYinghai Lu void __init setup_bootmem_allocator(void) 711b2ac82a0SYinghai Lu { 712b2ac82a0SYinghai Lu printk(KERN_INFO " mapped low ram: 0 - %08lx\n", 713b2ac82a0SYinghai Lu max_pfn_mapped<<PAGE_SHIFT); 714fc5efe39SYinghai Lu printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); 7154e29684cSYinghai Lu } 7164e29684cSYinghai Lu 717a04ad82dSYinghai Lu /* 718ad757b6aSThomas Gleixner * paging_init() sets up the page tables - note that the first 8MB are 719ad757b6aSThomas Gleixner * already mapped by head.S. 720ad757b6aSThomas Gleixner * 721ad757b6aSThomas Gleixner * This routines also unmaps the page at virtual kernel address 0, so 722ad757b6aSThomas Gleixner * that we can trap those pesky NULL-reference errors in the kernel. 723ad757b6aSThomas Gleixner */ 724ad757b6aSThomas Gleixner void __init paging_init(void) 725ad757b6aSThomas Gleixner { 726ad757b6aSThomas Gleixner pagetable_init(); 727ad757b6aSThomas Gleixner 728ad757b6aSThomas Gleixner __flush_tlb_all(); 729ad757b6aSThomas Gleixner 730ad757b6aSThomas Gleixner kmap_init(); 73111cd0bc1SYinghai Lu 73211cd0bc1SYinghai Lu /* 73311cd0bc1SYinghai Lu * NOTE: at this point the bootmem allocator is fully available. 73411cd0bc1SYinghai Lu */ 735c10d1e26SAndres Salomon olpc_dt_build_devicetree(); 736797390d8STejun Heo sparse_memory_present_with_active_regions(MAX_NUMNODES); 73711cd0bc1SYinghai Lu sparse_init(); 73811cd0bc1SYinghai Lu zone_sizes_init(); 739ad757b6aSThomas Gleixner } 740ad757b6aSThomas Gleixner 741ad757b6aSThomas Gleixner /* 742ad757b6aSThomas Gleixner * Test if the WP bit works in supervisor mode. It isn't supported on 386's 743f7f17a67SDmitri Vorobiev * and also on some strange 486's. All 586+'s are OK. This used to involve 744f7f17a67SDmitri Vorobiev * black magic jumps to work around some nasty CPU bugs, but fortunately the 745f7f17a67SDmitri Vorobiev * switch to using exceptions got rid of all that. 746ad757b6aSThomas Gleixner */ 747ad757b6aSThomas Gleixner static void __init test_wp_bit(void) 748ad757b6aSThomas Gleixner { 7494af17110SAndy Lutomirski char z = 0; 7506415813bSMathias Krause 751952a6c2cSBorislav Petkov printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode..."); 752ad757b6aSThomas Gleixner 7534af17110SAndy Lutomirski __set_fixmap(FIX_WP_TEST, __pa_symbol(empty_zero_page), PAGE_KERNEL_RO); 754ad757b6aSThomas Gleixner 755952a6c2cSBorislav Petkov if (probe_kernel_write((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) { 756952a6c2cSBorislav Petkov clear_fixmap(FIX_WP_TEST); 757952a6c2cSBorislav Petkov printk(KERN_CONT "Ok.\n"); 758952a6c2cSBorislav Petkov return; 759ad757b6aSThomas Gleixner } 7604af17110SAndy Lutomirski 761952a6c2cSBorislav Petkov printk(KERN_CONT "No.\n"); 762952a6c2cSBorislav Petkov panic("Linux doesn't support CPUs with broken WP."); 763ad757b6aSThomas Gleixner } 764ad757b6aSThomas Gleixner 765ad757b6aSThomas Gleixner void __init mem_init(void) 766ad757b6aSThomas Gleixner { 767cfb80c9eSJeremy Fitzhardinge pci_iommu_alloc(); 768cfb80c9eSJeremy Fitzhardinge 769ad757b6aSThomas Gleixner #ifdef CONFIG_FLATMEM 770ad757b6aSThomas Gleixner BUG_ON(!mem_map); 771ad757b6aSThomas Gleixner #endif 772855c743aSStanislaw Gruszka /* 773855c743aSStanislaw Gruszka * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to 774855c743aSStanislaw Gruszka * be done before free_all_bootmem(). Memblock use free low memory for 775855c743aSStanislaw Gruszka * temporary data (see find_range_array()) and for this purpose can use 776855c743aSStanislaw Gruszka * pages that was already passed to the buddy allocator, hence marked as 777855c743aSStanislaw Gruszka * not accessible in the page tables when compiled with 778855c743aSStanislaw Gruszka * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not 779855c743aSStanislaw Gruszka * important here. 780855c743aSStanislaw Gruszka */ 781855c743aSStanislaw Gruszka set_highmem_pages_init(); 782855c743aSStanislaw Gruszka 783ad757b6aSThomas Gleixner /* this will put all low memory onto the freelists */ 7840c988534SJiang Liu free_all_bootmem(); 785ad757b6aSThomas Gleixner 7864e37a890SYinghai Lu after_bootmem = 1; 7874e37a890SYinghai Lu 78846a84132SJiang Liu mem_init_print_info(NULL); 789d7d119d7SIngo Molnar printk(KERN_INFO "virtual kernel memory layout:\n" 790ad757b6aSThomas Gleixner " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 79192a0f81dSThomas Gleixner " cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n" 792ad757b6aSThomas Gleixner #ifdef CONFIG_HIGHMEM 793ad757b6aSThomas Gleixner " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 794ad757b6aSThomas Gleixner #endif 795ad757b6aSThomas Gleixner " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 796ad757b6aSThomas Gleixner " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" 797ad757b6aSThomas Gleixner " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" 798ad757b6aSThomas Gleixner " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" 799ad757b6aSThomas Gleixner " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", 800ad757b6aSThomas Gleixner FIXADDR_START, FIXADDR_TOP, 801ad757b6aSThomas Gleixner (FIXADDR_TOP - FIXADDR_START) >> 10, 802ad757b6aSThomas Gleixner 80392a0f81dSThomas Gleixner CPU_ENTRY_AREA_BASE, 80492a0f81dSThomas Gleixner CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE, 80592a0f81dSThomas Gleixner CPU_ENTRY_AREA_MAP_SIZE >> 10, 80692a0f81dSThomas Gleixner 807ad757b6aSThomas Gleixner #ifdef CONFIG_HIGHMEM 808ad757b6aSThomas Gleixner PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, 809ad757b6aSThomas Gleixner (LAST_PKMAP*PAGE_SIZE) >> 10, 810ad757b6aSThomas Gleixner #endif 811ad757b6aSThomas Gleixner 812ad757b6aSThomas Gleixner VMALLOC_START, VMALLOC_END, 813ad757b6aSThomas Gleixner (VMALLOC_END - VMALLOC_START) >> 20, 814ad757b6aSThomas Gleixner 815ad757b6aSThomas Gleixner (unsigned long)__va(0), (unsigned long)high_memory, 816ad757b6aSThomas Gleixner ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, 817ad757b6aSThomas Gleixner 818ad757b6aSThomas Gleixner (unsigned long)&__init_begin, (unsigned long)&__init_end, 8198550eb99SIngo Molnar ((unsigned long)&__init_end - 8208550eb99SIngo Molnar (unsigned long)&__init_begin) >> 10, 821ad757b6aSThomas Gleixner 822ad757b6aSThomas Gleixner (unsigned long)&_etext, (unsigned long)&_edata, 823ad757b6aSThomas Gleixner ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, 824ad757b6aSThomas Gleixner 825ad757b6aSThomas Gleixner (unsigned long)&_text, (unsigned long)&_etext, 826ad757b6aSThomas Gleixner ((unsigned long)&_etext - (unsigned long)&_text) >> 10); 827ad757b6aSThomas Gleixner 828beeb4195SJan Beulich /* 829beeb4195SJan Beulich * Check boundaries twice: Some fundamental inconsistencies can 830beeb4195SJan Beulich * be detected at build time already. 831beeb4195SJan Beulich */ 832beeb4195SJan Beulich #define __FIXADDR_TOP (-PAGE_SIZE) 833beeb4195SJan Beulich #ifdef CONFIG_HIGHMEM 834beeb4195SJan Beulich BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); 835beeb4195SJan Beulich BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE); 836beeb4195SJan Beulich #endif 837beeb4195SJan Beulich #define high_memory (-128UL << 20) 838beeb4195SJan Beulich BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); 839beeb4195SJan Beulich #undef high_memory 840beeb4195SJan Beulich #undef __FIXADDR_TOP 841beeb4195SJan Beulich 842ad757b6aSThomas Gleixner #ifdef CONFIG_HIGHMEM 843ad757b6aSThomas Gleixner BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); 844ad757b6aSThomas Gleixner BUG_ON(VMALLOC_END > PKMAP_BASE); 845ad757b6aSThomas Gleixner #endif 846beeb4195SJan Beulich BUG_ON(VMALLOC_START >= VMALLOC_END); 847ad757b6aSThomas Gleixner BUG_ON((unsigned long)high_memory > VMALLOC_START); 848ad757b6aSThomas Gleixner 849ad757b6aSThomas Gleixner test_wp_bit(); 850ad757b6aSThomas Gleixner } 851ad757b6aSThomas Gleixner 852ad757b6aSThomas Gleixner #ifdef CONFIG_MEMORY_HOTPLUG 85324e6d5a5SChristoph Hellwig int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, 85424e6d5a5SChristoph Hellwig bool want_memblock) 855ad757b6aSThomas Gleixner { 856ad757b6aSThomas Gleixner unsigned long start_pfn = start >> PAGE_SHIFT; 857ad757b6aSThomas Gleixner unsigned long nr_pages = size >> PAGE_SHIFT; 858ad757b6aSThomas Gleixner 85924e6d5a5SChristoph Hellwig return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); 860ad757b6aSThomas Gleixner } 86124d335caSWen Congyang 86224d335caSWen Congyang #ifdef CONFIG_MEMORY_HOTREMOVE 863da024512SChristoph Hellwig int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) 86424d335caSWen Congyang { 86524d335caSWen Congyang unsigned long start_pfn = start >> PAGE_SHIFT; 86624d335caSWen Congyang unsigned long nr_pages = size >> PAGE_SHIFT; 86724d335caSWen Congyang struct zone *zone; 86824d335caSWen Congyang 86924d335caSWen Congyang zone = page_zone(pfn_to_page(start_pfn)); 870da024512SChristoph Hellwig return __remove_pages(zone, start_pfn, nr_pages, altmap); 87124d335caSWen Congyang } 87224d335caSWen Congyang #endif 873ad757b6aSThomas Gleixner #endif 874ad757b6aSThomas Gleixner 875502f6604SSuresh Siddha int kernel_set_to_readonly __read_mostly; 87616239630SSteven Rostedt 87716239630SSteven Rostedt void set_kernel_text_rw(void) 87816239630SSteven Rostedt { 87916239630SSteven Rostedt unsigned long start = PFN_ALIGN(_text); 88016239630SSteven Rostedt unsigned long size = PFN_ALIGN(_etext) - start; 88116239630SSteven Rostedt 88216239630SSteven Rostedt if (!kernel_set_to_readonly) 88316239630SSteven Rostedt return; 88416239630SSteven Rostedt 88516239630SSteven Rostedt pr_debug("Set kernel text: %lx - %lx for read write\n", 88616239630SSteven Rostedt start, start+size); 88716239630SSteven Rostedt 88816239630SSteven Rostedt set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); 88916239630SSteven Rostedt } 89016239630SSteven Rostedt 89116239630SSteven Rostedt void set_kernel_text_ro(void) 89216239630SSteven Rostedt { 89316239630SSteven Rostedt unsigned long start = PFN_ALIGN(_text); 89416239630SSteven Rostedt unsigned long size = PFN_ALIGN(_etext) - start; 89516239630SSteven Rostedt 89616239630SSteven Rostedt if (!kernel_set_to_readonly) 89716239630SSteven Rostedt return; 89816239630SSteven Rostedt 89916239630SSteven Rostedt pr_debug("Set kernel text: %lx - %lx for read only\n", 90016239630SSteven Rostedt start, start+size); 90116239630SSteven Rostedt 90216239630SSteven Rostedt set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); 90316239630SSteven Rostedt } 90416239630SSteven Rostedt 9055bd5a452SMatthieu Castet static void mark_nxdata_nx(void) 9065bd5a452SMatthieu Castet { 9075bd5a452SMatthieu Castet /* 9085bd5a452SMatthieu Castet * When this called, init has already been executed and released, 9090d2eb44fSLucas De Marchi * so everything past _etext should be NX. 9105bd5a452SMatthieu Castet */ 9115bd5a452SMatthieu Castet unsigned long start = PFN_ALIGN(_etext); 9125bd5a452SMatthieu Castet /* 9135bd5a452SMatthieu Castet * This comes from is_kernel_text upper limit. Also HPAGE where used: 9145bd5a452SMatthieu Castet */ 9155bd5a452SMatthieu Castet unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start; 9165bd5a452SMatthieu Castet 9175bd5a452SMatthieu Castet if (__supported_pte_mask & _PAGE_NX) 9185bd5a452SMatthieu Castet printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10); 9195bd5a452SMatthieu Castet set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT); 9205bd5a452SMatthieu Castet } 9215bd5a452SMatthieu Castet 922ad757b6aSThomas Gleixner void mark_rodata_ro(void) 923ad757b6aSThomas Gleixner { 924ad757b6aSThomas Gleixner unsigned long start = PFN_ALIGN(_text); 925ad757b6aSThomas Gleixner unsigned long size = PFN_ALIGN(_etext) - start; 926ad757b6aSThomas Gleixner 9276d238cc4SArjan van de Ven set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); 928d7d119d7SIngo Molnar printk(KERN_INFO "Write protecting the kernel text: %luk\n", 929d7d119d7SIngo Molnar size >> 10); 9300c42f392SAndi Kleen 93116239630SSteven Rostedt kernel_set_to_readonly = 1; 93216239630SSteven Rostedt 9330c42f392SAndi Kleen #ifdef CONFIG_CPA_DEBUG 934d7d119d7SIngo Molnar printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", 935d7d119d7SIngo Molnar start, start+size); 9366d238cc4SArjan van de Ven set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); 9370c42f392SAndi Kleen 938d7d119d7SIngo Molnar printk(KERN_INFO "Testing CPA: write protecting again\n"); 9396d238cc4SArjan van de Ven set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); 9400c42f392SAndi Kleen #endif 9418f0f996eSSteven Rostedt 942ad757b6aSThomas Gleixner start += size; 943ad757b6aSThomas Gleixner size = (unsigned long)__end_rodata - start; 9446d238cc4SArjan van de Ven set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); 945d7d119d7SIngo Molnar printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", 946ad757b6aSThomas Gleixner size >> 10); 947ad757b6aSThomas Gleixner 9480c42f392SAndi Kleen #ifdef CONFIG_CPA_DEBUG 949d7d119d7SIngo Molnar printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size); 9506d238cc4SArjan van de Ven set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); 9510c42f392SAndi Kleen 952d7d119d7SIngo Molnar printk(KERN_INFO "Testing CPA: write protecting again\n"); 9536d238cc4SArjan van de Ven set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); 9540c42f392SAndi Kleen #endif 9555bd5a452SMatthieu Castet mark_nxdata_nx(); 956e1a58320SStephen Smalley if (__supported_pte_mask & _PAGE_NX) 957e1a58320SStephen Smalley debug_checkwx(); 958ad757b6aSThomas Gleixner } 959