xref: /linux/arch/x86/mm/init_32.c (revision e53fb04fce6d246ebed755b904ed1b0b814a754c)
1ad757b6aSThomas Gleixner /*
2ad757b6aSThomas Gleixner  *
3ad757b6aSThomas Gleixner  *  Copyright (C) 1995  Linus Torvalds
4ad757b6aSThomas Gleixner  *
5ad757b6aSThomas Gleixner  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6ad757b6aSThomas Gleixner  */
7ad757b6aSThomas Gleixner 
8ad757b6aSThomas Gleixner #include <linux/module.h>
9ad757b6aSThomas Gleixner #include <linux/signal.h>
10ad757b6aSThomas Gleixner #include <linux/sched.h>
11ad757b6aSThomas Gleixner #include <linux/kernel.h>
12ad757b6aSThomas Gleixner #include <linux/errno.h>
13ad757b6aSThomas Gleixner #include <linux/string.h>
14ad757b6aSThomas Gleixner #include <linux/types.h>
15ad757b6aSThomas Gleixner #include <linux/ptrace.h>
16ad757b6aSThomas Gleixner #include <linux/mman.h>
17ad757b6aSThomas Gleixner #include <linux/mm.h>
18ad757b6aSThomas Gleixner #include <linux/hugetlb.h>
19ad757b6aSThomas Gleixner #include <linux/swap.h>
20ad757b6aSThomas Gleixner #include <linux/smp.h>
21ad757b6aSThomas Gleixner #include <linux/init.h>
22ad757b6aSThomas Gleixner #include <linux/highmem.h>
23ad757b6aSThomas Gleixner #include <linux/pagemap.h>
24cfb80c9eSJeremy Fitzhardinge #include <linux/pci.h>
25ad757b6aSThomas Gleixner #include <linux/pfn.h>
26ad757b6aSThomas Gleixner #include <linux/poison.h>
27ad757b6aSThomas Gleixner #include <linux/bootmem.h>
28ad757b6aSThomas Gleixner #include <linux/slab.h>
29ad757b6aSThomas Gleixner #include <linux/proc_fs.h>
30ad757b6aSThomas Gleixner #include <linux/memory_hotplug.h>
31ad757b6aSThomas Gleixner #include <linux/initrd.h>
32ad757b6aSThomas Gleixner #include <linux/cpumask.h>
33ad757b6aSThomas Gleixner 
34f832ff18SH. Peter Anvin #include <asm/asm.h>
3546eaa670SIngo Molnar #include <asm/bios_ebda.h>
36ad757b6aSThomas Gleixner #include <asm/processor.h>
37ad757b6aSThomas Gleixner #include <asm/system.h>
38ad757b6aSThomas Gleixner #include <asm/uaccess.h>
39ad757b6aSThomas Gleixner #include <asm/pgtable.h>
40ad757b6aSThomas Gleixner #include <asm/dma.h>
41ad757b6aSThomas Gleixner #include <asm/fixmap.h>
42ad757b6aSThomas Gleixner #include <asm/e820.h>
43ad757b6aSThomas Gleixner #include <asm/apic.h>
448550eb99SIngo Molnar #include <asm/bugs.h>
45ad757b6aSThomas Gleixner #include <asm/tlb.h>
46ad757b6aSThomas Gleixner #include <asm/tlbflush.h>
47a5a19c63SJeremy Fitzhardinge #include <asm/pgalloc.h>
48ad757b6aSThomas Gleixner #include <asm/sections.h>
49ad757b6aSThomas Gleixner #include <asm/paravirt.h>
50551889a6SIan Campbell #include <asm/setup.h>
517bfeab9aSHarvey Harrison #include <asm/cacheflush.h>
52ad757b6aSThomas Gleixner 
53f361a450SYinghai Lu unsigned long max_low_pfn_mapped;
5467794292SThomas Gleixner unsigned long max_pfn_mapped;
557d1116a9SAndi Kleen 
56ad757b6aSThomas Gleixner DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57ad757b6aSThomas Gleixner unsigned long highstart_pfn, highend_pfn;
58ad757b6aSThomas Gleixner 
598550eb99SIngo Molnar static noinline int do_test_wp_bit(void);
60ad757b6aSThomas Gleixner 
614e29684cSYinghai Lu 
62298af9d8SPekka Enberg extern unsigned long __initdata e820_table_start;
63298af9d8SPekka Enberg extern unsigned long __meminitdata e820_table_end;
64298af9d8SPekka Enberg extern unsigned long __meminitdata e820_table_top;
654bbd4fa0SPekka Enberg 
66d6be89adSJan Beulich static __init void *alloc_low_page(void)
674e29684cSYinghai Lu {
68298af9d8SPekka Enberg 	unsigned long pfn = e820_table_end++;
694e29684cSYinghai Lu 	void *adr;
704e29684cSYinghai Lu 
71298af9d8SPekka Enberg 	if (pfn >= e820_table_top)
724e29684cSYinghai Lu 		panic("alloc_low_page: ran out of memory");
734e29684cSYinghai Lu 
744e29684cSYinghai Lu 	adr = __va(pfn * PAGE_SIZE);
754e29684cSYinghai Lu 	memset(adr, 0, PAGE_SIZE);
764e29684cSYinghai Lu 	return adr;
774e29684cSYinghai Lu }
784e29684cSYinghai Lu 
79ad757b6aSThomas Gleixner /*
80ad757b6aSThomas Gleixner  * Creates a middle page table and puts a pointer to it in the
81ad757b6aSThomas Gleixner  * given global directory entry. This only returns the gd entry
82ad757b6aSThomas Gleixner  * in non-PAE compilation mode, since the middle layer is folded.
83ad757b6aSThomas Gleixner  */
84ad757b6aSThomas Gleixner static pmd_t * __init one_md_table_init(pgd_t *pgd)
85ad757b6aSThomas Gleixner {
86ad757b6aSThomas Gleixner 	pud_t *pud;
87ad757b6aSThomas Gleixner 	pmd_t *pmd_table;
88ad757b6aSThomas Gleixner 
89ad757b6aSThomas Gleixner #ifdef CONFIG_X86_PAE
90ad757b6aSThomas Gleixner 	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
91c464573cSPekka Enberg 		if (after_bootmem)
92ad757b6aSThomas Gleixner 			pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
934e29684cSYinghai Lu 		else
94d6be89adSJan Beulich 			pmd_table = (pmd_t *)alloc_low_page();
956944a9c8SJeremy Fitzhardinge 		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
96ad757b6aSThomas Gleixner 		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
97ad757b6aSThomas Gleixner 		pud = pud_offset(pgd, 0);
988550eb99SIngo Molnar 		BUG_ON(pmd_table != pmd_offset(pud, 0));
99a376f30aSZhaolei 
100a376f30aSZhaolei 		return pmd_table;
101ad757b6aSThomas Gleixner 	}
102ad757b6aSThomas Gleixner #endif
103ad757b6aSThomas Gleixner 	pud = pud_offset(pgd, 0);
104ad757b6aSThomas Gleixner 	pmd_table = pmd_offset(pud, 0);
1058550eb99SIngo Molnar 
106ad757b6aSThomas Gleixner 	return pmd_table;
107ad757b6aSThomas Gleixner }
108ad757b6aSThomas Gleixner 
109ad757b6aSThomas Gleixner /*
110ad757b6aSThomas Gleixner  * Create a page table and place a pointer to it in a middle page
1118550eb99SIngo Molnar  * directory entry:
112ad757b6aSThomas Gleixner  */
113ad757b6aSThomas Gleixner static pte_t * __init one_page_table_init(pmd_t *pmd)
114ad757b6aSThomas Gleixner {
115ad757b6aSThomas Gleixner 	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
116509a80c4SIngo Molnar 		pte_t *page_table = NULL;
117509a80c4SIngo Molnar 
118c464573cSPekka Enberg 		if (after_bootmem) {
119509a80c4SIngo Molnar #ifdef CONFIG_DEBUG_PAGEALLOC
120509a80c4SIngo Molnar 			page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
121509a80c4SIngo Molnar #endif
1224e29684cSYinghai Lu 			if (!page_table)
123509a80c4SIngo Molnar 				page_table =
124509a80c4SIngo Molnar 				(pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
125d6be89adSJan Beulich 		} else
126d6be89adSJan Beulich 			page_table = (pte_t *)alloc_low_page();
127ad757b6aSThomas Gleixner 
1286944a9c8SJeremy Fitzhardinge 		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
129ad757b6aSThomas Gleixner 		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
130ad757b6aSThomas Gleixner 		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
131ad757b6aSThomas Gleixner 	}
132ad757b6aSThomas Gleixner 
133ad757b6aSThomas Gleixner 	return pte_offset_kernel(pmd, 0);
134ad757b6aSThomas Gleixner }
135ad757b6aSThomas Gleixner 
136a3c6018eSJan Beulich static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
137a3c6018eSJan Beulich 					   unsigned long vaddr, pte_t *lastpte)
138a3c6018eSJan Beulich {
139a3c6018eSJan Beulich #ifdef CONFIG_HIGHMEM
140a3c6018eSJan Beulich 	/*
141a3c6018eSJan Beulich 	 * Something (early fixmap) may already have put a pte
142a3c6018eSJan Beulich 	 * page here, which causes the page table allocation
143a3c6018eSJan Beulich 	 * to become nonlinear. Attempt to fix it, and if it
144a3c6018eSJan Beulich 	 * is still nonlinear then we have to bug.
145a3c6018eSJan Beulich 	 */
146a3c6018eSJan Beulich 	int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
147a3c6018eSJan Beulich 	int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
148a3c6018eSJan Beulich 
149a3c6018eSJan Beulich 	if (pmd_idx_kmap_begin != pmd_idx_kmap_end
150a3c6018eSJan Beulich 	    && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
151a3c6018eSJan Beulich 	    && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
152298af9d8SPekka Enberg 	    && ((__pa(pte) >> PAGE_SHIFT) < e820_table_start
153298af9d8SPekka Enberg 		|| (__pa(pte) >> PAGE_SHIFT) >= e820_table_end)) {
154a3c6018eSJan Beulich 		pte_t *newpte;
155a3c6018eSJan Beulich 		int i;
156a3c6018eSJan Beulich 
157c464573cSPekka Enberg 		BUG_ON(after_bootmem);
158a3c6018eSJan Beulich 		newpte = alloc_low_page();
159a3c6018eSJan Beulich 		for (i = 0; i < PTRS_PER_PTE; i++)
160a3c6018eSJan Beulich 			set_pte(newpte + i, pte[i]);
161a3c6018eSJan Beulich 
162a3c6018eSJan Beulich 		paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
163a3c6018eSJan Beulich 		set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
164a3c6018eSJan Beulich 		BUG_ON(newpte != pte_offset_kernel(pmd, 0));
165a3c6018eSJan Beulich 		__flush_tlb_all();
166a3c6018eSJan Beulich 
167a3c6018eSJan Beulich 		paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
168a3c6018eSJan Beulich 		pte = newpte;
169a3c6018eSJan Beulich 	}
170a3c6018eSJan Beulich 	BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
171a3c6018eSJan Beulich 	       && vaddr > fix_to_virt(FIX_KMAP_END)
172a3c6018eSJan Beulich 	       && lastpte && lastpte + PTRS_PER_PTE != pte);
173a3c6018eSJan Beulich #endif
174a3c6018eSJan Beulich 	return pte;
175a3c6018eSJan Beulich }
176a3c6018eSJan Beulich 
177ad757b6aSThomas Gleixner /*
178ad757b6aSThomas Gleixner  * This function initializes a certain range of kernel virtual memory
179ad757b6aSThomas Gleixner  * with new bootmem page tables, everywhere page tables are missing in
180ad757b6aSThomas Gleixner  * the given range.
1818550eb99SIngo Molnar  *
182ad757b6aSThomas Gleixner  * NOTE: The pagetables are allocated contiguous on the physical space
183ad757b6aSThomas Gleixner  * so we can cache the place of the first one and move around without
184ad757b6aSThomas Gleixner  * checking the pgd every time.
185ad757b6aSThomas Gleixner  */
1868550eb99SIngo Molnar static void __init
1878550eb99SIngo Molnar page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
188ad757b6aSThomas Gleixner {
189ad757b6aSThomas Gleixner 	int pgd_idx, pmd_idx;
190ad757b6aSThomas Gleixner 	unsigned long vaddr;
1918550eb99SIngo Molnar 	pgd_t *pgd;
1928550eb99SIngo Molnar 	pmd_t *pmd;
193a3c6018eSJan Beulich 	pte_t *pte = NULL;
194ad757b6aSThomas Gleixner 
195ad757b6aSThomas Gleixner 	vaddr = start;
196ad757b6aSThomas Gleixner 	pgd_idx = pgd_index(vaddr);
197ad757b6aSThomas Gleixner 	pmd_idx = pmd_index(vaddr);
198ad757b6aSThomas Gleixner 	pgd = pgd_base + pgd_idx;
199ad757b6aSThomas Gleixner 
200ad757b6aSThomas Gleixner 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
201ad757b6aSThomas Gleixner 		pmd = one_md_table_init(pgd);
202ad757b6aSThomas Gleixner 		pmd = pmd + pmd_index(vaddr);
2038550eb99SIngo Molnar 		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
2048550eb99SIngo Molnar 							pmd++, pmd_idx++) {
205a3c6018eSJan Beulich 			pte = page_table_kmap_check(one_page_table_init(pmd),
206a3c6018eSJan Beulich 			                            pmd, vaddr, pte);
207ad757b6aSThomas Gleixner 
208ad757b6aSThomas Gleixner 			vaddr += PMD_SIZE;
209ad757b6aSThomas Gleixner 		}
210ad757b6aSThomas Gleixner 		pmd_idx = 0;
211ad757b6aSThomas Gleixner 	}
212ad757b6aSThomas Gleixner }
213ad757b6aSThomas Gleixner 
214ad757b6aSThomas Gleixner static inline int is_kernel_text(unsigned long addr)
215ad757b6aSThomas Gleixner {
216ad757b6aSThomas Gleixner 	if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
217ad757b6aSThomas Gleixner 		return 1;
218ad757b6aSThomas Gleixner 	return 0;
219ad757b6aSThomas Gleixner }
220ad757b6aSThomas Gleixner 
221ad757b6aSThomas Gleixner /*
222ad757b6aSThomas Gleixner  * This maps the physical memory to kernel virtual address space, a total
223ad757b6aSThomas Gleixner  * of max_low_pfn pages, by creating page tables starting from address
2248550eb99SIngo Molnar  * PAGE_OFFSET:
225ad757b6aSThomas Gleixner  */
226*e53fb04fSPekka Enberg unsigned long __init
227*e53fb04fSPekka Enberg kernel_physical_mapping_init(unsigned long start,
228*e53fb04fSPekka Enberg 			     unsigned long end,
229*e53fb04fSPekka Enberg 			     unsigned long page_size_mask)
230ad757b6aSThomas Gleixner {
231*e53fb04fSPekka Enberg 	int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
232*e53fb04fSPekka Enberg 	unsigned long start_pfn, end_pfn;
233e7179853SPekka Enberg 	pgd_t *pgd_base = swapper_pg_dir;
2348550eb99SIngo Molnar 	int pgd_idx, pmd_idx, pte_ofs;
235ad757b6aSThomas Gleixner 	unsigned long pfn;
236ad757b6aSThomas Gleixner 	pgd_t *pgd;
237ad757b6aSThomas Gleixner 	pmd_t *pmd;
238ad757b6aSThomas Gleixner 	pte_t *pte;
239a2699e47SSuresh Siddha 	unsigned pages_2m, pages_4k;
240a2699e47SSuresh Siddha 	int mapping_iter;
241a2699e47SSuresh Siddha 
242*e53fb04fSPekka Enberg 	start_pfn = start >> PAGE_SHIFT;
243*e53fb04fSPekka Enberg 	end_pfn = end >> PAGE_SHIFT;
244*e53fb04fSPekka Enberg 
245a2699e47SSuresh Siddha 	/*
246a2699e47SSuresh Siddha 	 * First iteration will setup identity mapping using large/small pages
247a2699e47SSuresh Siddha 	 * based on use_pse, with other attributes same as set by
248a2699e47SSuresh Siddha 	 * the early code in head_32.S
249a2699e47SSuresh Siddha 	 *
250a2699e47SSuresh Siddha 	 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
251a2699e47SSuresh Siddha 	 * as desired for the kernel identity mapping.
252a2699e47SSuresh Siddha 	 *
253a2699e47SSuresh Siddha 	 * This two pass mechanism conforms to the TLB app note which says:
254a2699e47SSuresh Siddha 	 *
255a2699e47SSuresh Siddha 	 *     "Software should not write to a paging-structure entry in a way
256a2699e47SSuresh Siddha 	 *      that would change, for any linear address, both the page size
257a2699e47SSuresh Siddha 	 *      and either the page frame or attributes."
258a2699e47SSuresh Siddha 	 */
259a2699e47SSuresh Siddha 	mapping_iter = 1;
260ad757b6aSThomas Gleixner 
261a04ad82dSYinghai Lu 	if (!cpu_has_pse)
262a04ad82dSYinghai Lu 		use_pse = 0;
263a04ad82dSYinghai Lu 
264a2699e47SSuresh Siddha repeat:
265a2699e47SSuresh Siddha 	pages_2m = pages_4k = 0;
266a04ad82dSYinghai Lu 	pfn = start_pfn;
267a04ad82dSYinghai Lu 	pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
268ad757b6aSThomas Gleixner 	pgd = pgd_base + pgd_idx;
269ad757b6aSThomas Gleixner 	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
270ad757b6aSThomas Gleixner 		pmd = one_md_table_init(pgd);
2718550eb99SIngo Molnar 
272a04ad82dSYinghai Lu 		if (pfn >= end_pfn)
273a04ad82dSYinghai Lu 			continue;
274a04ad82dSYinghai Lu #ifdef CONFIG_X86_PAE
275a04ad82dSYinghai Lu 		pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
276a04ad82dSYinghai Lu 		pmd += pmd_idx;
277a04ad82dSYinghai Lu #else
278a04ad82dSYinghai Lu 		pmd_idx = 0;
279a04ad82dSYinghai Lu #endif
280a04ad82dSYinghai Lu 		for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
281f3f20de8SJeremy Fitzhardinge 		     pmd++, pmd_idx++) {
2828550eb99SIngo Molnar 			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
283ad757b6aSThomas Gleixner 
2848550eb99SIngo Molnar 			/*
2858550eb99SIngo Molnar 			 * Map with big pages if possible, otherwise
2868550eb99SIngo Molnar 			 * create normal page tables:
2878550eb99SIngo Molnar 			 */
288a04ad82dSYinghai Lu 			if (use_pse) {
2898550eb99SIngo Molnar 				unsigned int addr2;
290f3f20de8SJeremy Fitzhardinge 				pgprot_t prot = PAGE_KERNEL_LARGE;
291a2699e47SSuresh Siddha 				/*
292a2699e47SSuresh Siddha 				 * first pass will use the same initial
293a2699e47SSuresh Siddha 				 * identity mapping attribute + _PAGE_PSE.
294a2699e47SSuresh Siddha 				 */
295a2699e47SSuresh Siddha 				pgprot_t init_prot =
296a2699e47SSuresh Siddha 					__pgprot(PTE_IDENT_ATTR |
297a2699e47SSuresh Siddha 						 _PAGE_PSE);
298f3f20de8SJeremy Fitzhardinge 
2998550eb99SIngo Molnar 				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
300f3f20de8SJeremy Fitzhardinge 					PAGE_OFFSET + PAGE_SIZE-1;
301f3f20de8SJeremy Fitzhardinge 
3028550eb99SIngo Molnar 				if (is_kernel_text(addr) ||
3038550eb99SIngo Molnar 				    is_kernel_text(addr2))
304f3f20de8SJeremy Fitzhardinge 					prot = PAGE_KERNEL_LARGE_EXEC;
305f3f20de8SJeremy Fitzhardinge 
306ce0c0e50SAndi Kleen 				pages_2m++;
307a2699e47SSuresh Siddha 				if (mapping_iter == 1)
308a2699e47SSuresh Siddha 					set_pmd(pmd, pfn_pmd(pfn, init_prot));
309a2699e47SSuresh Siddha 				else
310f3f20de8SJeremy Fitzhardinge 					set_pmd(pmd, pfn_pmd(pfn, prot));
311ad757b6aSThomas Gleixner 
312ad757b6aSThomas Gleixner 				pfn += PTRS_PER_PTE;
3138550eb99SIngo Molnar 				continue;
3148550eb99SIngo Molnar 			}
315ad757b6aSThomas Gleixner 			pte = one_page_table_init(pmd);
316ad757b6aSThomas Gleixner 
317a04ad82dSYinghai Lu 			pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
318a04ad82dSYinghai Lu 			pte += pte_ofs;
319a04ad82dSYinghai Lu 			for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
3208550eb99SIngo Molnar 			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
321f3f20de8SJeremy Fitzhardinge 				pgprot_t prot = PAGE_KERNEL;
322a2699e47SSuresh Siddha 				/*
323a2699e47SSuresh Siddha 				 * first pass will use the same initial
324a2699e47SSuresh Siddha 				 * identity mapping attribute.
325a2699e47SSuresh Siddha 				 */
326a2699e47SSuresh Siddha 				pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
327f3f20de8SJeremy Fitzhardinge 
3288550eb99SIngo Molnar 				if (is_kernel_text(addr))
329f3f20de8SJeremy Fitzhardinge 					prot = PAGE_KERNEL_EXEC;
330f3f20de8SJeremy Fitzhardinge 
331ce0c0e50SAndi Kleen 				pages_4k++;
332a2699e47SSuresh Siddha 				if (mapping_iter == 1)
333a2699e47SSuresh Siddha 					set_pte(pte, pfn_pte(pfn, init_prot));
334a2699e47SSuresh Siddha 				else
335f3f20de8SJeremy Fitzhardinge 					set_pte(pte, pfn_pte(pfn, prot));
336ad757b6aSThomas Gleixner 			}
337ad757b6aSThomas Gleixner 		}
338ad757b6aSThomas Gleixner 	}
339a2699e47SSuresh Siddha 	if (mapping_iter == 1) {
340a2699e47SSuresh Siddha 		/*
341a2699e47SSuresh Siddha 		 * update direct mapping page count only in the first
342a2699e47SSuresh Siddha 		 * iteration.
343a2699e47SSuresh Siddha 		 */
344ce0c0e50SAndi Kleen 		update_page_count(PG_LEVEL_2M, pages_2m);
345ce0c0e50SAndi Kleen 		update_page_count(PG_LEVEL_4K, pages_4k);
346a2699e47SSuresh Siddha 
347a2699e47SSuresh Siddha 		/*
348a2699e47SSuresh Siddha 		 * local global flush tlb, which will flush the previous
349a2699e47SSuresh Siddha 		 * mappings present in both small and large page TLB's.
350a2699e47SSuresh Siddha 		 */
351a2699e47SSuresh Siddha 		__flush_tlb_all();
352a2699e47SSuresh Siddha 
353a2699e47SSuresh Siddha 		/*
354a2699e47SSuresh Siddha 		 * Second iteration will set the actual desired PTE attributes.
355a2699e47SSuresh Siddha 		 */
356a2699e47SSuresh Siddha 		mapping_iter = 2;
357a2699e47SSuresh Siddha 		goto repeat;
358a2699e47SSuresh Siddha 	}
359*e53fb04fSPekka Enberg 	return 0;
360ad757b6aSThomas Gleixner }
361ad757b6aSThomas Gleixner 
362ad757b6aSThomas Gleixner pte_t *kmap_pte;
363ad757b6aSThomas Gleixner pgprot_t kmap_prot;
364ad757b6aSThomas Gleixner 
3658550eb99SIngo Molnar static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
3668550eb99SIngo Molnar {
3678550eb99SIngo Molnar 	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
3688550eb99SIngo Molnar 			vaddr), vaddr), vaddr);
3698550eb99SIngo Molnar }
370ad757b6aSThomas Gleixner 
371ad757b6aSThomas Gleixner static void __init kmap_init(void)
372ad757b6aSThomas Gleixner {
373ad757b6aSThomas Gleixner 	unsigned long kmap_vstart;
374ad757b6aSThomas Gleixner 
3758550eb99SIngo Molnar 	/*
3768550eb99SIngo Molnar 	 * Cache the first kmap pte:
3778550eb99SIngo Molnar 	 */
378ad757b6aSThomas Gleixner 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
379ad757b6aSThomas Gleixner 	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
380ad757b6aSThomas Gleixner 
381ad757b6aSThomas Gleixner 	kmap_prot = PAGE_KERNEL;
382ad757b6aSThomas Gleixner }
383ad757b6aSThomas Gleixner 
384fd940934SKeith Packard #ifdef CONFIG_HIGHMEM
385ad757b6aSThomas Gleixner static void __init permanent_kmaps_init(pgd_t *pgd_base)
386ad757b6aSThomas Gleixner {
3878550eb99SIngo Molnar 	unsigned long vaddr;
388ad757b6aSThomas Gleixner 	pgd_t *pgd;
389ad757b6aSThomas Gleixner 	pud_t *pud;
390ad757b6aSThomas Gleixner 	pmd_t *pmd;
391ad757b6aSThomas Gleixner 	pte_t *pte;
392ad757b6aSThomas Gleixner 
393ad757b6aSThomas Gleixner 	vaddr = PKMAP_BASE;
394ad757b6aSThomas Gleixner 	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
395ad757b6aSThomas Gleixner 
396ad757b6aSThomas Gleixner 	pgd = swapper_pg_dir + pgd_index(vaddr);
397ad757b6aSThomas Gleixner 	pud = pud_offset(pgd, vaddr);
398ad757b6aSThomas Gleixner 	pmd = pmd_offset(pud, vaddr);
399ad757b6aSThomas Gleixner 	pte = pte_offset_kernel(pmd, vaddr);
400ad757b6aSThomas Gleixner 	pkmap_page_table = pte;
401ad757b6aSThomas Gleixner }
402ad757b6aSThomas Gleixner 
403cc9f7a0cSYinghai Lu static void __init add_one_highpage_init(struct page *page, int pfn)
404ad757b6aSThomas Gleixner {
405ad757b6aSThomas Gleixner 	ClearPageReserved(page);
406180c06efSJeremy Fitzhardinge 	init_page_count(page);
407180c06efSJeremy Fitzhardinge 	__free_page(page);
408180c06efSJeremy Fitzhardinge 	totalhigh_pages++;
409ad757b6aSThomas Gleixner }
410ad757b6aSThomas Gleixner 
411b5bc6c0eSYinghai Lu struct add_highpages_data {
412b5bc6c0eSYinghai Lu 	unsigned long start_pfn;
413b5bc6c0eSYinghai Lu 	unsigned long end_pfn;
414b5bc6c0eSYinghai Lu };
415b5bc6c0eSYinghai Lu 
416d52d53b8SYinghai Lu static int __init add_highpages_work_fn(unsigned long start_pfn,
417b5bc6c0eSYinghai Lu 					 unsigned long end_pfn, void *datax)
418b5bc6c0eSYinghai Lu {
419b5bc6c0eSYinghai Lu 	int node_pfn;
420b5bc6c0eSYinghai Lu 	struct page *page;
421b5bc6c0eSYinghai Lu 	unsigned long final_start_pfn, final_end_pfn;
422b5bc6c0eSYinghai Lu 	struct add_highpages_data *data;
423b5bc6c0eSYinghai Lu 
424b5bc6c0eSYinghai Lu 	data = (struct add_highpages_data *)datax;
425b5bc6c0eSYinghai Lu 
426b5bc6c0eSYinghai Lu 	final_start_pfn = max(start_pfn, data->start_pfn);
427b5bc6c0eSYinghai Lu 	final_end_pfn = min(end_pfn, data->end_pfn);
428b5bc6c0eSYinghai Lu 	if (final_start_pfn >= final_end_pfn)
429d52d53b8SYinghai Lu 		return 0;
430b5bc6c0eSYinghai Lu 
431b5bc6c0eSYinghai Lu 	for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
432b5bc6c0eSYinghai Lu 	     node_pfn++) {
433b5bc6c0eSYinghai Lu 		if (!pfn_valid(node_pfn))
434b5bc6c0eSYinghai Lu 			continue;
435b5bc6c0eSYinghai Lu 		page = pfn_to_page(node_pfn);
436cc9f7a0cSYinghai Lu 		add_one_highpage_init(page, node_pfn);
437b5bc6c0eSYinghai Lu 	}
438b5bc6c0eSYinghai Lu 
439d52d53b8SYinghai Lu 	return 0;
440d52d53b8SYinghai Lu 
441b5bc6c0eSYinghai Lu }
442b5bc6c0eSYinghai Lu 
443b5bc6c0eSYinghai Lu void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
444cc9f7a0cSYinghai Lu 					      unsigned long end_pfn)
445b5bc6c0eSYinghai Lu {
446b5bc6c0eSYinghai Lu 	struct add_highpages_data data;
447b5bc6c0eSYinghai Lu 
448b5bc6c0eSYinghai Lu 	data.start_pfn = start_pfn;
449b5bc6c0eSYinghai Lu 	data.end_pfn = end_pfn;
450b5bc6c0eSYinghai Lu 
451b5bc6c0eSYinghai Lu 	work_with_active_regions(nid, add_highpages_work_fn, &data);
452ad757b6aSThomas Gleixner }
453ad757b6aSThomas Gleixner 
454ad757b6aSThomas Gleixner #else
455e8e32326SIngo Brueckl static inline void permanent_kmaps_init(pgd_t *pgd_base)
456e8e32326SIngo Brueckl {
457e8e32326SIngo Brueckl }
458ad757b6aSThomas Gleixner #endif /* CONFIG_HIGHMEM */
459ad757b6aSThomas Gleixner 
460ad757b6aSThomas Gleixner void __init native_pagetable_setup_start(pgd_t *base)
461ad757b6aSThomas Gleixner {
462551889a6SIan Campbell 	unsigned long pfn, va;
463551889a6SIan Campbell 	pgd_t *pgd;
464551889a6SIan Campbell 	pud_t *pud;
465551889a6SIan Campbell 	pmd_t *pmd;
466551889a6SIan Campbell 	pte_t *pte;
467ad757b6aSThomas Gleixner 
468ad757b6aSThomas Gleixner 	/*
469551889a6SIan Campbell 	 * Remove any mappings which extend past the end of physical
470551889a6SIan Campbell 	 * memory from the boot time page table:
471ad757b6aSThomas Gleixner 	 */
472551889a6SIan Campbell 	for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
473551889a6SIan Campbell 		va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
474551889a6SIan Campbell 		pgd = base + pgd_index(va);
475551889a6SIan Campbell 		if (!pgd_present(*pgd))
476551889a6SIan Campbell 			break;
477ad757b6aSThomas Gleixner 
478551889a6SIan Campbell 		pud = pud_offset(pgd, va);
479551889a6SIan Campbell 		pmd = pmd_offset(pud, va);
480551889a6SIan Campbell 		if (!pmd_present(*pmd))
481551889a6SIan Campbell 			break;
482551889a6SIan Campbell 
483551889a6SIan Campbell 		pte = pte_offset_kernel(pmd, va);
484551889a6SIan Campbell 		if (!pte_present(*pte))
485551889a6SIan Campbell 			break;
486551889a6SIan Campbell 
487551889a6SIan Campbell 		pte_clear(NULL, va, pte);
488551889a6SIan Campbell 	}
4896944a9c8SJeremy Fitzhardinge 	paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
490ad757b6aSThomas Gleixner }
491ad757b6aSThomas Gleixner 
492ad757b6aSThomas Gleixner void __init native_pagetable_setup_done(pgd_t *base)
493ad757b6aSThomas Gleixner {
494ad757b6aSThomas Gleixner }
495ad757b6aSThomas Gleixner 
496ad757b6aSThomas Gleixner /*
497ad757b6aSThomas Gleixner  * Build a proper pagetable for the kernel mappings.  Up until this
498ad757b6aSThomas Gleixner  * point, we've been running on some set of pagetables constructed by
499ad757b6aSThomas Gleixner  * the boot process.
500ad757b6aSThomas Gleixner  *
501ad757b6aSThomas Gleixner  * If we're booting on native hardware, this will be a pagetable
502551889a6SIan Campbell  * constructed in arch/x86/kernel/head_32.S.  The root of the
503551889a6SIan Campbell  * pagetable will be swapper_pg_dir.
504ad757b6aSThomas Gleixner  *
505ad757b6aSThomas Gleixner  * If we're booting paravirtualized under a hypervisor, then there are
506ad757b6aSThomas Gleixner  * more options: we may already be running PAE, and the pagetable may
507ad757b6aSThomas Gleixner  * or may not be based in swapper_pg_dir.  In any case,
508ad757b6aSThomas Gleixner  * paravirt_pagetable_setup_start() will set up swapper_pg_dir
509ad757b6aSThomas Gleixner  * appropriately for the rest of the initialization to work.
510ad757b6aSThomas Gleixner  *
511ad757b6aSThomas Gleixner  * In general, pagetable_init() assumes that the pagetable may already
512ad757b6aSThomas Gleixner  * be partially populated, and so it avoids stomping on any existing
513ad757b6aSThomas Gleixner  * mappings.
514ad757b6aSThomas Gleixner  */
515f765090aSPekka Enberg void __init early_ioremap_page_table_range_init(void)
516ad757b6aSThomas Gleixner {
517e7179853SPekka Enberg 	pgd_t *pgd_base = swapper_pg_dir;
5188550eb99SIngo Molnar 	unsigned long vaddr, end;
519ad757b6aSThomas Gleixner 
520ad757b6aSThomas Gleixner 	/*
521ad757b6aSThomas Gleixner 	 * Fixed mappings, only the page table structure has to be
522ad757b6aSThomas Gleixner 	 * created - mappings will be set by set_fixmap():
523ad757b6aSThomas Gleixner 	 */
524ad757b6aSThomas Gleixner 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
525ad757b6aSThomas Gleixner 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
526ad757b6aSThomas Gleixner 	page_table_range_init(vaddr, end, pgd_base);
527beacfaacSHuang, Ying 	early_ioremap_reset();
528e7b37895SYinghai Lu }
529e7b37895SYinghai Lu 
530e7b37895SYinghai Lu static void __init pagetable_init(void)
531e7b37895SYinghai Lu {
532e7b37895SYinghai Lu 	pgd_t *pgd_base = swapper_pg_dir;
533e7b37895SYinghai Lu 
534ad757b6aSThomas Gleixner 	permanent_kmaps_init(pgd_base);
535ad757b6aSThomas Gleixner }
536ad757b6aSThomas Gleixner 
537a6eb84bcSRafael J. Wysocki #ifdef CONFIG_ACPI_SLEEP
538ad757b6aSThomas Gleixner /*
539a6eb84bcSRafael J. Wysocki  * ACPI suspend needs this for resume, because things like the intel-agp
540ad757b6aSThomas Gleixner  * driver might have split up a kernel 4MB mapping.
541ad757b6aSThomas Gleixner  */
542a6eb84bcSRafael J. Wysocki char swsusp_pg_dir[PAGE_SIZE]
543ad757b6aSThomas Gleixner 	__attribute__ ((aligned(PAGE_SIZE)));
544ad757b6aSThomas Gleixner 
545ad757b6aSThomas Gleixner static inline void save_pg_dir(void)
546ad757b6aSThomas Gleixner {
547ad757b6aSThomas Gleixner 	memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
548ad757b6aSThomas Gleixner }
549a6eb84bcSRafael J. Wysocki #else /* !CONFIG_ACPI_SLEEP */
550ad757b6aSThomas Gleixner static inline void save_pg_dir(void)
551ad757b6aSThomas Gleixner {
552ad757b6aSThomas Gleixner }
553a6eb84bcSRafael J. Wysocki #endif /* !CONFIG_ACPI_SLEEP */
554ad757b6aSThomas Gleixner 
555ad757b6aSThomas Gleixner void zap_low_mappings(void)
556ad757b6aSThomas Gleixner {
557ad757b6aSThomas Gleixner 	int i;
558ad757b6aSThomas Gleixner 
559ad757b6aSThomas Gleixner 	/*
560ad757b6aSThomas Gleixner 	 * Zap initial low-memory mappings.
561ad757b6aSThomas Gleixner 	 *
562ad757b6aSThomas Gleixner 	 * Note that "pgd_clear()" doesn't do it for
563ad757b6aSThomas Gleixner 	 * us, because pgd_clear() is a no-op on i386.
564ad757b6aSThomas Gleixner 	 */
56568db065cSJeremy Fitzhardinge 	for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
566ad757b6aSThomas Gleixner #ifdef CONFIG_X86_PAE
567ad757b6aSThomas Gleixner 		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
568ad757b6aSThomas Gleixner #else
569ad757b6aSThomas Gleixner 		set_pgd(swapper_pg_dir+i, __pgd(0));
570ad757b6aSThomas Gleixner #endif
5718550eb99SIngo Molnar 	}
572ad757b6aSThomas Gleixner 	flush_tlb_all();
573ad757b6aSThomas Gleixner }
574ad757b6aSThomas Gleixner 
5758550eb99SIngo Molnar int nx_enabled;
576ad757b6aSThomas Gleixner 
577be43d728SJeremy Fitzhardinge pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
5786fdc05d4SJeremy Fitzhardinge EXPORT_SYMBOL_GPL(__supported_pte_mask);
5796fdc05d4SJeremy Fitzhardinge 
580ad757b6aSThomas Gleixner #ifdef CONFIG_X86_PAE
581ad757b6aSThomas Gleixner 
5828550eb99SIngo Molnar static int disable_nx __initdata;
583ad757b6aSThomas Gleixner 
584ad757b6aSThomas Gleixner /*
585ad757b6aSThomas Gleixner  * noexec = on|off
586ad757b6aSThomas Gleixner  *
587ad757b6aSThomas Gleixner  * Control non executable mappings.
588ad757b6aSThomas Gleixner  *
589ad757b6aSThomas Gleixner  * on      Enable
590ad757b6aSThomas Gleixner  * off     Disable
591ad757b6aSThomas Gleixner  */
592ad757b6aSThomas Gleixner static int __init noexec_setup(char *str)
593ad757b6aSThomas Gleixner {
594ad757b6aSThomas Gleixner 	if (!str || !strcmp(str, "on")) {
595ad757b6aSThomas Gleixner 		if (cpu_has_nx) {
596ad757b6aSThomas Gleixner 			__supported_pte_mask |= _PAGE_NX;
597ad757b6aSThomas Gleixner 			disable_nx = 0;
598ad757b6aSThomas Gleixner 		}
5998550eb99SIngo Molnar 	} else {
6008550eb99SIngo Molnar 		if (!strcmp(str, "off")) {
601ad757b6aSThomas Gleixner 			disable_nx = 1;
602ad757b6aSThomas Gleixner 			__supported_pte_mask &= ~_PAGE_NX;
6038550eb99SIngo Molnar 		} else {
604ad757b6aSThomas Gleixner 			return -EINVAL;
6058550eb99SIngo Molnar 		}
6068550eb99SIngo Molnar 	}
607ad757b6aSThomas Gleixner 
608ad757b6aSThomas Gleixner 	return 0;
609ad757b6aSThomas Gleixner }
610ad757b6aSThomas Gleixner early_param("noexec", noexec_setup);
611ad757b6aSThomas Gleixner 
612ad757b6aSThomas Gleixner static void __init set_nx(void)
613ad757b6aSThomas Gleixner {
614ad757b6aSThomas Gleixner 	unsigned int v[4], l, h;
615ad757b6aSThomas Gleixner 
616ad757b6aSThomas Gleixner 	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
617ad757b6aSThomas Gleixner 		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
6188550eb99SIngo Molnar 
619ad757b6aSThomas Gleixner 		if ((v[3] & (1 << 20)) && !disable_nx) {
620ad757b6aSThomas Gleixner 			rdmsr(MSR_EFER, l, h);
621ad757b6aSThomas Gleixner 			l |= EFER_NX;
622ad757b6aSThomas Gleixner 			wrmsr(MSR_EFER, l, h);
623ad757b6aSThomas Gleixner 			nx_enabled = 1;
624ad757b6aSThomas Gleixner 			__supported_pte_mask |= _PAGE_NX;
625ad757b6aSThomas Gleixner 		}
626ad757b6aSThomas Gleixner 	}
627ad757b6aSThomas Gleixner }
628ad757b6aSThomas Gleixner #endif
629ad757b6aSThomas Gleixner 
63090d967e0SYinghai Lu /* user-defined highmem size */
63190d967e0SYinghai Lu static unsigned int highmem_pages = -1;
63290d967e0SYinghai Lu 
63390d967e0SYinghai Lu /*
63490d967e0SYinghai Lu  * highmem=size forces highmem to be exactly 'size' bytes.
63590d967e0SYinghai Lu  * This works even on boxes that have no highmem otherwise.
63690d967e0SYinghai Lu  * This also works to reduce highmem size on bigger boxes.
63790d967e0SYinghai Lu  */
63890d967e0SYinghai Lu static int __init parse_highmem(char *arg)
63990d967e0SYinghai Lu {
64090d967e0SYinghai Lu 	if (!arg)
64190d967e0SYinghai Lu 		return -EINVAL;
64290d967e0SYinghai Lu 
64390d967e0SYinghai Lu 	highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
64490d967e0SYinghai Lu 	return 0;
64590d967e0SYinghai Lu }
64690d967e0SYinghai Lu early_param("highmem", parse_highmem);
64790d967e0SYinghai Lu 
6484769843bSIngo Molnar #define MSG_HIGHMEM_TOO_BIG \
6494769843bSIngo Molnar 	"highmem size (%luMB) is bigger than pages available (%luMB)!\n"
6504769843bSIngo Molnar 
6514769843bSIngo Molnar #define MSG_LOWMEM_TOO_SMALL \
6524769843bSIngo Molnar 	"highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
65390d967e0SYinghai Lu /*
6544769843bSIngo Molnar  * All of RAM fits into lowmem - but if user wants highmem
6554769843bSIngo Molnar  * artificially via the highmem=x boot parameter then create
6564769843bSIngo Molnar  * it:
65790d967e0SYinghai Lu  */
6584769843bSIngo Molnar void __init lowmem_pfn_init(void)
65990d967e0SYinghai Lu {
660346cafecSYinghai Lu 	/* max_low_pfn is 0, we already have early_res support */
66190d967e0SYinghai Lu 	max_low_pfn = max_pfn;
662d88316c2SIngo Molnar 
6634769843bSIngo Molnar 	if (highmem_pages == -1)
6644769843bSIngo Molnar 		highmem_pages = 0;
6654769843bSIngo Molnar #ifdef CONFIG_HIGHMEM
6664769843bSIngo Molnar 	if (highmem_pages >= max_pfn) {
6674769843bSIngo Molnar 		printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
6684769843bSIngo Molnar 			pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
6694769843bSIngo Molnar 		highmem_pages = 0;
6704769843bSIngo Molnar 	}
6714769843bSIngo Molnar 	if (highmem_pages) {
6724769843bSIngo Molnar 		if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
6734769843bSIngo Molnar 			printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
6744769843bSIngo Molnar 				pages_to_mb(highmem_pages));
6754769843bSIngo Molnar 			highmem_pages = 0;
6764769843bSIngo Molnar 		}
6774769843bSIngo Molnar 		max_low_pfn -= highmem_pages;
6784769843bSIngo Molnar 	}
6794769843bSIngo Molnar #else
6804769843bSIngo Molnar 	if (highmem_pages)
6814769843bSIngo Molnar 		printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
6824769843bSIngo Molnar #endif
6834769843bSIngo Molnar }
6844769843bSIngo Molnar 
6854769843bSIngo Molnar #define MSG_HIGHMEM_TOO_SMALL \
6864769843bSIngo Molnar 	"only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
6874769843bSIngo Molnar 
6884769843bSIngo Molnar #define MSG_HIGHMEM_TRIMMED \
6894769843bSIngo Molnar 	"Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
6904769843bSIngo Molnar /*
6914769843bSIngo Molnar  * We have more RAM than fits into lowmem - we try to put it into
6924769843bSIngo Molnar  * highmem, also taking the highmem=x boot parameter into account:
6934769843bSIngo Molnar  */
6944769843bSIngo Molnar void __init highmem_pfn_init(void)
6954769843bSIngo Molnar {
696d88316c2SIngo Molnar 	max_low_pfn = MAXMEM_PFN;
697d88316c2SIngo Molnar 
69890d967e0SYinghai Lu 	if (highmem_pages == -1)
69990d967e0SYinghai Lu 		highmem_pages = max_pfn - MAXMEM_PFN;
7004769843bSIngo Molnar 
70190d967e0SYinghai Lu 	if (highmem_pages + MAXMEM_PFN < max_pfn)
70290d967e0SYinghai Lu 		max_pfn = MAXMEM_PFN + highmem_pages;
7034769843bSIngo Molnar 
70490d967e0SYinghai Lu 	if (highmem_pages + MAXMEM_PFN > max_pfn) {
7054769843bSIngo Molnar 		printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
70690d967e0SYinghai Lu 			pages_to_mb(max_pfn - MAXMEM_PFN),
70790d967e0SYinghai Lu 			pages_to_mb(highmem_pages));
70890d967e0SYinghai Lu 		highmem_pages = 0;
70990d967e0SYinghai Lu 	}
71090d967e0SYinghai Lu #ifndef CONFIG_HIGHMEM
71190d967e0SYinghai Lu 	/* Maximum memory usable is what is directly addressable */
7124769843bSIngo Molnar 	printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
71390d967e0SYinghai Lu 	if (max_pfn > MAX_NONPAE_PFN)
7144769843bSIngo Molnar 		printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
71590d967e0SYinghai Lu 	else
71690d967e0SYinghai Lu 		printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
71790d967e0SYinghai Lu 	max_pfn = MAXMEM_PFN;
71890d967e0SYinghai Lu #else /* !CONFIG_HIGHMEM */
71990d967e0SYinghai Lu #ifndef CONFIG_HIGHMEM64G
72090d967e0SYinghai Lu 	if (max_pfn > MAX_NONPAE_PFN) {
72190d967e0SYinghai Lu 		max_pfn = MAX_NONPAE_PFN;
7224769843bSIngo Molnar 		printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
72390d967e0SYinghai Lu 	}
72490d967e0SYinghai Lu #endif /* !CONFIG_HIGHMEM64G */
72590d967e0SYinghai Lu #endif /* !CONFIG_HIGHMEM */
72690d967e0SYinghai Lu }
7274769843bSIngo Molnar 
72890d967e0SYinghai Lu /*
72990d967e0SYinghai Lu  * Determine low and high memory ranges:
73090d967e0SYinghai Lu  */
73190d967e0SYinghai Lu void __init find_low_pfn_range(void)
73290d967e0SYinghai Lu {
73390d967e0SYinghai Lu 	/* it could update max_pfn */
73490d967e0SYinghai Lu 
735d88316c2SIngo Molnar 	if (max_pfn <= MAXMEM_PFN)
7364769843bSIngo Molnar 		lowmem_pfn_init();
737d88316c2SIngo Molnar 	else
738d88316c2SIngo Molnar 		highmem_pfn_init();
73990d967e0SYinghai Lu }
74090d967e0SYinghai Lu 
741b2ac82a0SYinghai Lu #ifndef CONFIG_NEED_MULTIPLE_NODES
7422ec65f8bSYinghai Lu void __init initmem_init(unsigned long start_pfn,
743b2ac82a0SYinghai Lu 				  unsigned long end_pfn)
744b2ac82a0SYinghai Lu {
745b2ac82a0SYinghai Lu #ifdef CONFIG_HIGHMEM
746b2ac82a0SYinghai Lu 	highstart_pfn = highend_pfn = max_pfn;
747b2ac82a0SYinghai Lu 	if (max_pfn > max_low_pfn)
748b2ac82a0SYinghai Lu 		highstart_pfn = max_low_pfn;
749b2ac82a0SYinghai Lu 	memory_present(0, 0, highend_pfn);
750cb95a13aSYinghai Lu 	e820_register_active_regions(0, 0, highend_pfn);
751b2ac82a0SYinghai Lu 	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
752b2ac82a0SYinghai Lu 		pages_to_mb(highend_pfn - highstart_pfn));
753b2ac82a0SYinghai Lu 	num_physpages = highend_pfn;
754b2ac82a0SYinghai Lu 	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
755b2ac82a0SYinghai Lu #else
756b2ac82a0SYinghai Lu 	memory_present(0, 0, max_low_pfn);
757cb95a13aSYinghai Lu 	e820_register_active_regions(0, 0, max_low_pfn);
758b2ac82a0SYinghai Lu 	num_physpages = max_low_pfn;
759b2ac82a0SYinghai Lu 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
760b2ac82a0SYinghai Lu #endif
761b2ac82a0SYinghai Lu #ifdef CONFIG_FLATMEM
762b2ac82a0SYinghai Lu 	max_mapnr = num_physpages;
763b2ac82a0SYinghai Lu #endif
764b2ac82a0SYinghai Lu 	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
765b2ac82a0SYinghai Lu 			pages_to_mb(max_low_pfn));
766b2ac82a0SYinghai Lu 
767b2ac82a0SYinghai Lu 	setup_bootmem_allocator();
768b2ac82a0SYinghai Lu }
769cb95a13aSYinghai Lu #endif /* !CONFIG_NEED_MULTIPLE_NODES */
770b2ac82a0SYinghai Lu 
771cb95a13aSYinghai Lu static void __init zone_sizes_init(void)
772b2ac82a0SYinghai Lu {
773b2ac82a0SYinghai Lu 	unsigned long max_zone_pfns[MAX_NR_ZONES];
774b2ac82a0SYinghai Lu 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
775b2ac82a0SYinghai Lu 	max_zone_pfns[ZONE_DMA] =
776b2ac82a0SYinghai Lu 		virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
777b2ac82a0SYinghai Lu 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
778b2ac82a0SYinghai Lu #ifdef CONFIG_HIGHMEM
779b2ac82a0SYinghai Lu 	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
780b2ac82a0SYinghai Lu #endif
781b2ac82a0SYinghai Lu 
782b2ac82a0SYinghai Lu 	free_area_init_nodes(max_zone_pfns);
783b2ac82a0SYinghai Lu }
784b2ac82a0SYinghai Lu 
785a71edd1fSYinghai Lu static unsigned long __init setup_node_bootmem(int nodeid,
786a71edd1fSYinghai Lu 				 unsigned long start_pfn,
787a71edd1fSYinghai Lu 				 unsigned long end_pfn,
788a71edd1fSYinghai Lu 				 unsigned long bootmap)
789a71edd1fSYinghai Lu {
790a71edd1fSYinghai Lu 	unsigned long bootmap_size;
791a71edd1fSYinghai Lu 
792a71edd1fSYinghai Lu 	if (start_pfn > max_low_pfn)
793a71edd1fSYinghai Lu 		return bootmap;
794a71edd1fSYinghai Lu 	if (end_pfn > max_low_pfn)
795a71edd1fSYinghai Lu 		end_pfn = max_low_pfn;
796a71edd1fSYinghai Lu 
797a71edd1fSYinghai Lu 	/* don't touch min_low_pfn */
798a71edd1fSYinghai Lu 	bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
799a71edd1fSYinghai Lu 					 bootmap >> PAGE_SHIFT,
800a71edd1fSYinghai Lu 					 start_pfn, end_pfn);
801a71edd1fSYinghai Lu 	printk(KERN_INFO "  node %d low ram: %08lx - %08lx\n",
802a71edd1fSYinghai Lu 		nodeid, start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
803a71edd1fSYinghai Lu 	printk(KERN_INFO "  node %d bootmap %08lx - %08lx\n",
804a71edd1fSYinghai Lu 		 nodeid, bootmap, bootmap + bootmap_size);
805a71edd1fSYinghai Lu 	free_bootmem_with_active_regions(nodeid, end_pfn);
806a71edd1fSYinghai Lu 	early_res_to_bootmem(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
807a71edd1fSYinghai Lu 
808a71edd1fSYinghai Lu 	return bootmap + bootmap_size;
809a71edd1fSYinghai Lu }
810a71edd1fSYinghai Lu 
811b2ac82a0SYinghai Lu void __init setup_bootmem_allocator(void)
812b2ac82a0SYinghai Lu {
813a71edd1fSYinghai Lu 	int nodeid;
814b2ac82a0SYinghai Lu 	unsigned long bootmap_size, bootmap;
815b2ac82a0SYinghai Lu 	/*
816b2ac82a0SYinghai Lu 	 * Initialize the boot-time allocator (with low memory only):
817b2ac82a0SYinghai Lu 	 */
818b2ac82a0SYinghai Lu 	bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
819fc5efe39SYinghai Lu 	bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
820b2ac82a0SYinghai Lu 				 PAGE_SIZE);
821b2ac82a0SYinghai Lu 	if (bootmap == -1L)
822b2ac82a0SYinghai Lu 		panic("Cannot find bootmem map of size %ld\n", bootmap_size);
823b2ac82a0SYinghai Lu 	reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
824225c37d7SYinghai Lu 
825b2ac82a0SYinghai Lu 	printk(KERN_INFO "  mapped low ram: 0 - %08lx\n",
826b2ac82a0SYinghai Lu 		 max_pfn_mapped<<PAGE_SHIFT);
827fc5efe39SYinghai Lu 	printk(KERN_INFO "  low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
828a71edd1fSYinghai Lu 
829a71edd1fSYinghai Lu #ifdef CONFIG_NEED_MULTIPLE_NODES
830a71edd1fSYinghai Lu 	for_each_online_node(nodeid)
831a71edd1fSYinghai Lu 		bootmap = setup_node_bootmem(nodeid, node_start_pfn[nodeid],
832a71edd1fSYinghai Lu 					node_end_pfn[nodeid], bootmap);
833a71edd1fSYinghai Lu #else
834fc5efe39SYinghai Lu 	bootmap = setup_node_bootmem(0, 0, max_low_pfn, bootmap);
835a71edd1fSYinghai Lu #endif
836b2ac82a0SYinghai Lu 
837c464573cSPekka Enberg 	after_bootmem = 1;
838b2ac82a0SYinghai Lu }
839b2ac82a0SYinghai Lu 
840ad757b6aSThomas Gleixner /*
841ad757b6aSThomas Gleixner  * paging_init() sets up the page tables - note that the first 8MB are
842ad757b6aSThomas Gleixner  * already mapped by head.S.
843ad757b6aSThomas Gleixner  *
844ad757b6aSThomas Gleixner  * This routines also unmaps the page at virtual kernel address 0, so
845ad757b6aSThomas Gleixner  * that we can trap those pesky NULL-reference errors in the kernel.
846ad757b6aSThomas Gleixner  */
847ad757b6aSThomas Gleixner void __init paging_init(void)
848ad757b6aSThomas Gleixner {
849ad757b6aSThomas Gleixner 	pagetable_init();
850ad757b6aSThomas Gleixner 
851ad757b6aSThomas Gleixner 	__flush_tlb_all();
852ad757b6aSThomas Gleixner 
853ad757b6aSThomas Gleixner 	kmap_init();
85411cd0bc1SYinghai Lu 
85511cd0bc1SYinghai Lu 	/*
85611cd0bc1SYinghai Lu 	 * NOTE: at this point the bootmem allocator is fully available.
85711cd0bc1SYinghai Lu 	 */
85811cd0bc1SYinghai Lu 	sparse_init();
85911cd0bc1SYinghai Lu 	zone_sizes_init();
860ad757b6aSThomas Gleixner }
861ad757b6aSThomas Gleixner 
862ad757b6aSThomas Gleixner /*
863ad757b6aSThomas Gleixner  * Test if the WP bit works in supervisor mode. It isn't supported on 386's
864f7f17a67SDmitri Vorobiev  * and also on some strange 486's. All 586+'s are OK. This used to involve
865f7f17a67SDmitri Vorobiev  * black magic jumps to work around some nasty CPU bugs, but fortunately the
866f7f17a67SDmitri Vorobiev  * switch to using exceptions got rid of all that.
867ad757b6aSThomas Gleixner  */
868ad757b6aSThomas Gleixner static void __init test_wp_bit(void)
869ad757b6aSThomas Gleixner {
870d7d119d7SIngo Molnar 	printk(KERN_INFO
871d7d119d7SIngo Molnar   "Checking if this processor honours the WP bit even in supervisor mode...");
872ad757b6aSThomas Gleixner 
873ad757b6aSThomas Gleixner 	/* Any page-aligned address will do, the test is non-destructive */
874ad757b6aSThomas Gleixner 	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
875ad757b6aSThomas Gleixner 	boot_cpu_data.wp_works_ok = do_test_wp_bit();
876ad757b6aSThomas Gleixner 	clear_fixmap(FIX_WP_TEST);
877ad757b6aSThomas Gleixner 
878ad757b6aSThomas Gleixner 	if (!boot_cpu_data.wp_works_ok) {
879d7d119d7SIngo Molnar 		printk(KERN_CONT "No.\n");
880ad757b6aSThomas Gleixner #ifdef CONFIG_X86_WP_WORKS_OK
881d7d119d7SIngo Molnar 		panic(
882d7d119d7SIngo Molnar   "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
883ad757b6aSThomas Gleixner #endif
884ad757b6aSThomas Gleixner 	} else {
885d7d119d7SIngo Molnar 		printk(KERN_CONT "Ok.\n");
886ad757b6aSThomas Gleixner 	}
887ad757b6aSThomas Gleixner }
888ad757b6aSThomas Gleixner 
889ad757b6aSThomas Gleixner static struct kcore_list kcore_mem, kcore_vmalloc;
890ad757b6aSThomas Gleixner 
891ad757b6aSThomas Gleixner void __init mem_init(void)
892ad757b6aSThomas Gleixner {
893ad757b6aSThomas Gleixner 	int codesize, reservedpages, datasize, initsize;
894cc9f7a0cSYinghai Lu 	int tmp;
895ad757b6aSThomas Gleixner 
896cfb80c9eSJeremy Fitzhardinge 	pci_iommu_alloc();
897cfb80c9eSJeremy Fitzhardinge 
898ad757b6aSThomas Gleixner #ifdef CONFIG_FLATMEM
899ad757b6aSThomas Gleixner 	BUG_ON(!mem_map);
900ad757b6aSThomas Gleixner #endif
901ad757b6aSThomas Gleixner 	/* this will put all low memory onto the freelists */
902ad757b6aSThomas Gleixner 	totalram_pages += free_all_bootmem();
903ad757b6aSThomas Gleixner 
904ad757b6aSThomas Gleixner 	reservedpages = 0;
905ad757b6aSThomas Gleixner 	for (tmp = 0; tmp < max_low_pfn; tmp++)
906ad757b6aSThomas Gleixner 		/*
9078550eb99SIngo Molnar 		 * Only count reserved RAM pages:
908ad757b6aSThomas Gleixner 		 */
909ad757b6aSThomas Gleixner 		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
910ad757b6aSThomas Gleixner 			reservedpages++;
911ad757b6aSThomas Gleixner 
912cc9f7a0cSYinghai Lu 	set_highmem_pages_init();
913ad757b6aSThomas Gleixner 
914ad757b6aSThomas Gleixner 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
915ad757b6aSThomas Gleixner 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
916ad757b6aSThomas Gleixner 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
917ad757b6aSThomas Gleixner 
918ad757b6aSThomas Gleixner 	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
919ad757b6aSThomas Gleixner 	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
920ad757b6aSThomas Gleixner 		   VMALLOC_END-VMALLOC_START);
921ad757b6aSThomas Gleixner 
9228550eb99SIngo Molnar 	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
9238550eb99SIngo Molnar 			"%dk reserved, %dk data, %dk init, %ldk highmem)\n",
924ad757b6aSThomas Gleixner 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
925ad757b6aSThomas Gleixner 		num_physpages << (PAGE_SHIFT-10),
926ad757b6aSThomas Gleixner 		codesize >> 10,
927ad757b6aSThomas Gleixner 		reservedpages << (PAGE_SHIFT-10),
928ad757b6aSThomas Gleixner 		datasize >> 10,
929ad757b6aSThomas Gleixner 		initsize >> 10,
930ad757b6aSThomas Gleixner 		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
931ad757b6aSThomas Gleixner 	       );
932ad757b6aSThomas Gleixner 
933d7d119d7SIngo Molnar 	printk(KERN_INFO "virtual kernel memory layout:\n"
934ad757b6aSThomas Gleixner 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
935ad757b6aSThomas Gleixner #ifdef CONFIG_HIGHMEM
936ad757b6aSThomas Gleixner 		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
937ad757b6aSThomas Gleixner #endif
938ad757b6aSThomas Gleixner 		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
939ad757b6aSThomas Gleixner 		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
940ad757b6aSThomas Gleixner 		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
941ad757b6aSThomas Gleixner 		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
942ad757b6aSThomas Gleixner 		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
943ad757b6aSThomas Gleixner 		FIXADDR_START, FIXADDR_TOP,
944ad757b6aSThomas Gleixner 		(FIXADDR_TOP - FIXADDR_START) >> 10,
945ad757b6aSThomas Gleixner 
946ad757b6aSThomas Gleixner #ifdef CONFIG_HIGHMEM
947ad757b6aSThomas Gleixner 		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
948ad757b6aSThomas Gleixner 		(LAST_PKMAP*PAGE_SIZE) >> 10,
949ad757b6aSThomas Gleixner #endif
950ad757b6aSThomas Gleixner 
951ad757b6aSThomas Gleixner 		VMALLOC_START, VMALLOC_END,
952ad757b6aSThomas Gleixner 		(VMALLOC_END - VMALLOC_START) >> 20,
953ad757b6aSThomas Gleixner 
954ad757b6aSThomas Gleixner 		(unsigned long)__va(0), (unsigned long)high_memory,
955ad757b6aSThomas Gleixner 		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
956ad757b6aSThomas Gleixner 
957ad757b6aSThomas Gleixner 		(unsigned long)&__init_begin, (unsigned long)&__init_end,
9588550eb99SIngo Molnar 		((unsigned long)&__init_end -
9598550eb99SIngo Molnar 		 (unsigned long)&__init_begin) >> 10,
960ad757b6aSThomas Gleixner 
961ad757b6aSThomas Gleixner 		(unsigned long)&_etext, (unsigned long)&_edata,
962ad757b6aSThomas Gleixner 		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
963ad757b6aSThomas Gleixner 
964ad757b6aSThomas Gleixner 		(unsigned long)&_text, (unsigned long)&_etext,
965ad757b6aSThomas Gleixner 		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
966ad757b6aSThomas Gleixner 
967beeb4195SJan Beulich 	/*
968beeb4195SJan Beulich 	 * Check boundaries twice: Some fundamental inconsistencies can
969beeb4195SJan Beulich 	 * be detected at build time already.
970beeb4195SJan Beulich 	 */
971beeb4195SJan Beulich #define __FIXADDR_TOP (-PAGE_SIZE)
972beeb4195SJan Beulich #ifdef CONFIG_HIGHMEM
973beeb4195SJan Beulich 	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
974beeb4195SJan Beulich 	BUILD_BUG_ON(VMALLOC_END			> PKMAP_BASE);
975beeb4195SJan Beulich #endif
976beeb4195SJan Beulich #define high_memory (-128UL << 20)
977beeb4195SJan Beulich 	BUILD_BUG_ON(VMALLOC_START			>= VMALLOC_END);
978beeb4195SJan Beulich #undef high_memory
979beeb4195SJan Beulich #undef __FIXADDR_TOP
980beeb4195SJan Beulich 
981ad757b6aSThomas Gleixner #ifdef CONFIG_HIGHMEM
982ad757b6aSThomas Gleixner 	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
983ad757b6aSThomas Gleixner 	BUG_ON(VMALLOC_END				> PKMAP_BASE);
984ad757b6aSThomas Gleixner #endif
985beeb4195SJan Beulich 	BUG_ON(VMALLOC_START				>= VMALLOC_END);
986ad757b6aSThomas Gleixner 	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
987ad757b6aSThomas Gleixner 
988ad757b6aSThomas Gleixner 	if (boot_cpu_data.wp_works_ok < 0)
989ad757b6aSThomas Gleixner 		test_wp_bit();
990ad757b6aSThomas Gleixner 
99161165d7aSHugh Dickins 	save_pg_dir();
992ad757b6aSThomas Gleixner 	zap_low_mappings();
993ad757b6aSThomas Gleixner }
994ad757b6aSThomas Gleixner 
995ad757b6aSThomas Gleixner #ifdef CONFIG_MEMORY_HOTPLUG
996ad757b6aSThomas Gleixner int arch_add_memory(int nid, u64 start, u64 size)
997ad757b6aSThomas Gleixner {
998ad757b6aSThomas Gleixner 	struct pglist_data *pgdata = NODE_DATA(nid);
999ad757b6aSThomas Gleixner 	struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
1000ad757b6aSThomas Gleixner 	unsigned long start_pfn = start >> PAGE_SHIFT;
1001ad757b6aSThomas Gleixner 	unsigned long nr_pages = size >> PAGE_SHIFT;
1002ad757b6aSThomas Gleixner 
1003c04fc586SGary Hade 	return __add_pages(nid, zone, start_pfn, nr_pages);
1004ad757b6aSThomas Gleixner }
1005ad757b6aSThomas Gleixner #endif
1006ad757b6aSThomas Gleixner 
1007ad757b6aSThomas Gleixner /*
1008ad757b6aSThomas Gleixner  * This function cannot be __init, since exceptions don't work in that
1009ad757b6aSThomas Gleixner  * section.  Put this after the callers, so that it cannot be inlined.
1010ad757b6aSThomas Gleixner  */
10118550eb99SIngo Molnar static noinline int do_test_wp_bit(void)
1012ad757b6aSThomas Gleixner {
1013ad757b6aSThomas Gleixner 	char tmp_reg;
1014ad757b6aSThomas Gleixner 	int flag;
1015ad757b6aSThomas Gleixner 
1016ad757b6aSThomas Gleixner 	__asm__ __volatile__(
1017ad757b6aSThomas Gleixner 		"	movb %0, %1	\n"
1018ad757b6aSThomas Gleixner 		"1:	movb %1, %0	\n"
1019ad757b6aSThomas Gleixner 		"	xorl %2, %2	\n"
1020ad757b6aSThomas Gleixner 		"2:			\n"
1021f832ff18SH. Peter Anvin 		_ASM_EXTABLE(1b,2b)
1022ad757b6aSThomas Gleixner 		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
1023ad757b6aSThomas Gleixner 		 "=q" (tmp_reg),
1024ad757b6aSThomas Gleixner 		 "=r" (flag)
1025ad757b6aSThomas Gleixner 		:"2" (1)
1026ad757b6aSThomas Gleixner 		:"memory");
1027ad757b6aSThomas Gleixner 
1028ad757b6aSThomas Gleixner 	return flag;
1029ad757b6aSThomas Gleixner }
1030ad757b6aSThomas Gleixner 
1031ad757b6aSThomas Gleixner #ifdef CONFIG_DEBUG_RODATA
1032edeed305SArjan van de Ven const int rodata_test_data = 0xC3;
1033edeed305SArjan van de Ven EXPORT_SYMBOL_GPL(rodata_test_data);
1034ad757b6aSThomas Gleixner 
1035ad757b6aSThomas Gleixner void mark_rodata_ro(void)
1036ad757b6aSThomas Gleixner {
1037ad757b6aSThomas Gleixner 	unsigned long start = PFN_ALIGN(_text);
1038ad757b6aSThomas Gleixner 	unsigned long size = PFN_ALIGN(_etext) - start;
1039ad757b6aSThomas Gleixner 
10408f0f996eSSteven Rostedt #ifndef CONFIG_DYNAMIC_FTRACE
10418f0f996eSSteven Rostedt 	/* Dynamic tracing modifies the kernel text section */
10426d238cc4SArjan van de Ven 	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1043d7d119d7SIngo Molnar 	printk(KERN_INFO "Write protecting the kernel text: %luk\n",
1044d7d119d7SIngo Molnar 		size >> 10);
10450c42f392SAndi Kleen 
10460c42f392SAndi Kleen #ifdef CONFIG_CPA_DEBUG
1047d7d119d7SIngo Molnar 	printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
1048d7d119d7SIngo Molnar 		start, start+size);
10496d238cc4SArjan van de Ven 	set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
10500c42f392SAndi Kleen 
1051d7d119d7SIngo Molnar 	printk(KERN_INFO "Testing CPA: write protecting again\n");
10526d238cc4SArjan van de Ven 	set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
10530c42f392SAndi Kleen #endif
10548f0f996eSSteven Rostedt #endif /* CONFIG_DYNAMIC_FTRACE */
10558f0f996eSSteven Rostedt 
1056ad757b6aSThomas Gleixner 	start += size;
1057ad757b6aSThomas Gleixner 	size = (unsigned long)__end_rodata - start;
10586d238cc4SArjan van de Ven 	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1059d7d119d7SIngo Molnar 	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1060ad757b6aSThomas Gleixner 		size >> 10);
1061edeed305SArjan van de Ven 	rodata_test();
1062ad757b6aSThomas Gleixner 
10630c42f392SAndi Kleen #ifdef CONFIG_CPA_DEBUG
1064d7d119d7SIngo Molnar 	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
10656d238cc4SArjan van de Ven 	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
10660c42f392SAndi Kleen 
1067d7d119d7SIngo Molnar 	printk(KERN_INFO "Testing CPA: write protecting again\n");
10686d238cc4SArjan van de Ven 	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
10690c42f392SAndi Kleen #endif
1070ad757b6aSThomas Gleixner }
1071ad757b6aSThomas Gleixner #endif
1072ad757b6aSThomas Gleixner 
1073d2dbf343SYinghai Lu int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
1074d2dbf343SYinghai Lu 				   int flags)
1075d2dbf343SYinghai Lu {
1076d2dbf343SYinghai Lu 	return reserve_bootmem(phys, len, flags);
1077d2dbf343SYinghai Lu }
1078