xref: /linux/arch/x86/mm/init_32.c (revision 4e0ae876f77bc01a7e77724dea57b4b82bd53244)
1 /*
2  *
3  *  Copyright (C) 1995  Linus Torvalds
4  *
5  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6  */
7 
8 #include <linux/signal.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
16 #include <linux/mm.h>
17 #include <linux/hugetlb.h>
18 #include <linux/swap.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/pci.h>
24 #include <linux/pfn.h>
25 #include <linux/poison.h>
26 #include <linux/memblock.h>
27 #include <linux/proc_fs.h>
28 #include <linux/memory_hotplug.h>
29 #include <linux/initrd.h>
30 #include <linux/cpumask.h>
31 #include <linux/gfp.h>
32 
33 #include <asm/asm.h>
34 #include <asm/bios_ebda.h>
35 #include <asm/processor.h>
36 #include <linux/uaccess.h>
37 #include <asm/pgtable.h>
38 #include <asm/dma.h>
39 #include <asm/fixmap.h>
40 #include <asm/e820/api.h>
41 #include <asm/apic.h>
42 #include <asm/bugs.h>
43 #include <asm/tlb.h>
44 #include <asm/tlbflush.h>
45 #include <asm/olpc_ofw.h>
46 #include <asm/pgalloc.h>
47 #include <asm/sections.h>
48 #include <asm/paravirt.h>
49 #include <asm/setup.h>
50 #include <asm/set_memory.h>
51 #include <asm/page_types.h>
52 #include <asm/cpu_entry_area.h>
53 #include <asm/init.h>
54 
55 #include "mm_internal.h"
56 
57 unsigned long highstart_pfn, highend_pfn;
58 
59 bool __read_mostly __vmalloc_start_set = false;
60 
61 /*
62  * Creates a middle page table and puts a pointer to it in the
63  * given global directory entry. This only returns the gd entry
64  * in non-PAE compilation mode, since the middle layer is folded.
65  */
66 static pmd_t * __init one_md_table_init(pgd_t *pgd)
67 {
68 	p4d_t *p4d;
69 	pud_t *pud;
70 	pmd_t *pmd_table;
71 
72 #ifdef CONFIG_X86_PAE
73 	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
74 		pmd_table = (pmd_t *)alloc_low_page();
75 		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
76 		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
77 		p4d = p4d_offset(pgd, 0);
78 		pud = pud_offset(p4d, 0);
79 		BUG_ON(pmd_table != pmd_offset(pud, 0));
80 
81 		return pmd_table;
82 	}
83 #endif
84 	p4d = p4d_offset(pgd, 0);
85 	pud = pud_offset(p4d, 0);
86 	pmd_table = pmd_offset(pud, 0);
87 
88 	return pmd_table;
89 }
90 
91 /*
92  * Create a page table and place a pointer to it in a middle page
93  * directory entry:
94  */
95 static pte_t * __init one_page_table_init(pmd_t *pmd)
96 {
97 	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
98 		pte_t *page_table = (pte_t *)alloc_low_page();
99 
100 		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
101 		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
102 		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
103 	}
104 
105 	return pte_offset_kernel(pmd, 0);
106 }
107 
108 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
109 {
110 	int pgd_idx = pgd_index(vaddr);
111 	int pmd_idx = pmd_index(vaddr);
112 
113 	return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
114 }
115 
116 pte_t * __init populate_extra_pte(unsigned long vaddr)
117 {
118 	int pte_idx = pte_index(vaddr);
119 	pmd_t *pmd;
120 
121 	pmd = populate_extra_pmd(vaddr);
122 	return one_page_table_init(pmd) + pte_idx;
123 }
124 
125 static unsigned long __init
126 page_table_range_init_count(unsigned long start, unsigned long end)
127 {
128 	unsigned long count = 0;
129 #ifdef CONFIG_HIGHMEM
130 	int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
131 	int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
132 	int pgd_idx, pmd_idx;
133 	unsigned long vaddr;
134 
135 	if (pmd_idx_kmap_begin == pmd_idx_kmap_end)
136 		return 0;
137 
138 	vaddr = start;
139 	pgd_idx = pgd_index(vaddr);
140 	pmd_idx = pmd_index(vaddr);
141 
142 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
143 		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
144 							pmd_idx++) {
145 			if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin &&
146 			    (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end)
147 				count++;
148 			vaddr += PMD_SIZE;
149 		}
150 		pmd_idx = 0;
151 	}
152 #endif
153 	return count;
154 }
155 
156 static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
157 					   unsigned long vaddr, pte_t *lastpte,
158 					   void **adr)
159 {
160 #ifdef CONFIG_HIGHMEM
161 	/*
162 	 * Something (early fixmap) may already have put a pte
163 	 * page here, which causes the page table allocation
164 	 * to become nonlinear. Attempt to fix it, and if it
165 	 * is still nonlinear then we have to bug.
166 	 */
167 	int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
168 	int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
169 
170 	if (pmd_idx_kmap_begin != pmd_idx_kmap_end
171 	    && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
172 	    && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) {
173 		pte_t *newpte;
174 		int i;
175 
176 		BUG_ON(after_bootmem);
177 		newpte = *adr;
178 		for (i = 0; i < PTRS_PER_PTE; i++)
179 			set_pte(newpte + i, pte[i]);
180 		*adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE);
181 
182 		paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
183 		set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
184 		BUG_ON(newpte != pte_offset_kernel(pmd, 0));
185 		__flush_tlb_all();
186 
187 		paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
188 		pte = newpte;
189 	}
190 	BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
191 	       && vaddr > fix_to_virt(FIX_KMAP_END)
192 	       && lastpte && lastpte + PTRS_PER_PTE != pte);
193 #endif
194 	return pte;
195 }
196 
197 /*
198  * This function initializes a certain range of kernel virtual memory
199  * with new bootmem page tables, everywhere page tables are missing in
200  * the given range.
201  *
202  * NOTE: The pagetables are allocated contiguous on the physical space
203  * so we can cache the place of the first one and move around without
204  * checking the pgd every time.
205  */
206 static void __init
207 page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
208 {
209 	int pgd_idx, pmd_idx;
210 	unsigned long vaddr;
211 	pgd_t *pgd;
212 	pmd_t *pmd;
213 	pte_t *pte = NULL;
214 	unsigned long count = page_table_range_init_count(start, end);
215 	void *adr = NULL;
216 
217 	if (count)
218 		adr = alloc_low_pages(count);
219 
220 	vaddr = start;
221 	pgd_idx = pgd_index(vaddr);
222 	pmd_idx = pmd_index(vaddr);
223 	pgd = pgd_base + pgd_idx;
224 
225 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
226 		pmd = one_md_table_init(pgd);
227 		pmd = pmd + pmd_index(vaddr);
228 		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
229 							pmd++, pmd_idx++) {
230 			pte = page_table_kmap_check(one_page_table_init(pmd),
231 						    pmd, vaddr, pte, &adr);
232 
233 			vaddr += PMD_SIZE;
234 		}
235 		pmd_idx = 0;
236 	}
237 }
238 
239 static inline int is_kernel_text(unsigned long addr)
240 {
241 	if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
242 		return 1;
243 	return 0;
244 }
245 
246 /*
247  * This maps the physical memory to kernel virtual address space, a total
248  * of max_low_pfn pages, by creating page tables starting from address
249  * PAGE_OFFSET:
250  */
251 unsigned long __init
252 kernel_physical_mapping_init(unsigned long start,
253 			     unsigned long end,
254 			     unsigned long page_size_mask)
255 {
256 	int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
257 	unsigned long last_map_addr = end;
258 	unsigned long start_pfn, end_pfn;
259 	pgd_t *pgd_base = swapper_pg_dir;
260 	int pgd_idx, pmd_idx, pte_ofs;
261 	unsigned long pfn;
262 	pgd_t *pgd;
263 	pmd_t *pmd;
264 	pte_t *pte;
265 	unsigned pages_2m, pages_4k;
266 	int mapping_iter;
267 
268 	start_pfn = start >> PAGE_SHIFT;
269 	end_pfn = end >> PAGE_SHIFT;
270 
271 	/*
272 	 * First iteration will setup identity mapping using large/small pages
273 	 * based on use_pse, with other attributes same as set by
274 	 * the early code in head_32.S
275 	 *
276 	 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
277 	 * as desired for the kernel identity mapping.
278 	 *
279 	 * This two pass mechanism conforms to the TLB app note which says:
280 	 *
281 	 *     "Software should not write to a paging-structure entry in a way
282 	 *      that would change, for any linear address, both the page size
283 	 *      and either the page frame or attributes."
284 	 */
285 	mapping_iter = 1;
286 
287 	if (!boot_cpu_has(X86_FEATURE_PSE))
288 		use_pse = 0;
289 
290 repeat:
291 	pages_2m = pages_4k = 0;
292 	pfn = start_pfn;
293 	pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
294 	pgd = pgd_base + pgd_idx;
295 	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
296 		pmd = one_md_table_init(pgd);
297 
298 		if (pfn >= end_pfn)
299 			continue;
300 #ifdef CONFIG_X86_PAE
301 		pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
302 		pmd += pmd_idx;
303 #else
304 		pmd_idx = 0;
305 #endif
306 		for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
307 		     pmd++, pmd_idx++) {
308 			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
309 
310 			/*
311 			 * Map with big pages if possible, otherwise
312 			 * create normal page tables:
313 			 */
314 			if (use_pse) {
315 				unsigned int addr2;
316 				pgprot_t prot = PAGE_KERNEL_LARGE;
317 				/*
318 				 * first pass will use the same initial
319 				 * identity mapping attribute + _PAGE_PSE.
320 				 */
321 				pgprot_t init_prot =
322 					__pgprot(PTE_IDENT_ATTR |
323 						 _PAGE_PSE);
324 
325 				pfn &= PMD_MASK >> PAGE_SHIFT;
326 				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
327 					PAGE_OFFSET + PAGE_SIZE-1;
328 
329 				if (is_kernel_text(addr) ||
330 				    is_kernel_text(addr2))
331 					prot = PAGE_KERNEL_LARGE_EXEC;
332 
333 				pages_2m++;
334 				if (mapping_iter == 1)
335 					set_pmd(pmd, pfn_pmd(pfn, init_prot));
336 				else
337 					set_pmd(pmd, pfn_pmd(pfn, prot));
338 
339 				pfn += PTRS_PER_PTE;
340 				continue;
341 			}
342 			pte = one_page_table_init(pmd);
343 
344 			pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
345 			pte += pte_ofs;
346 			for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
347 			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
348 				pgprot_t prot = PAGE_KERNEL;
349 				/*
350 				 * first pass will use the same initial
351 				 * identity mapping attribute.
352 				 */
353 				pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
354 
355 				if (is_kernel_text(addr))
356 					prot = PAGE_KERNEL_EXEC;
357 
358 				pages_4k++;
359 				if (mapping_iter == 1) {
360 					set_pte(pte, pfn_pte(pfn, init_prot));
361 					last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
362 				} else
363 					set_pte(pte, pfn_pte(pfn, prot));
364 			}
365 		}
366 	}
367 	if (mapping_iter == 1) {
368 		/*
369 		 * update direct mapping page count only in the first
370 		 * iteration.
371 		 */
372 		update_page_count(PG_LEVEL_2M, pages_2m);
373 		update_page_count(PG_LEVEL_4K, pages_4k);
374 
375 		/*
376 		 * local global flush tlb, which will flush the previous
377 		 * mappings present in both small and large page TLB's.
378 		 */
379 		__flush_tlb_all();
380 
381 		/*
382 		 * Second iteration will set the actual desired PTE attributes.
383 		 */
384 		mapping_iter = 2;
385 		goto repeat;
386 	}
387 	return last_map_addr;
388 }
389 
390 pte_t *kmap_pte;
391 
392 static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
393 {
394 	pgd_t *pgd = pgd_offset_k(vaddr);
395 	p4d_t *p4d = p4d_offset(pgd, vaddr);
396 	pud_t *pud = pud_offset(p4d, vaddr);
397 	pmd_t *pmd = pmd_offset(pud, vaddr);
398 	return pte_offset_kernel(pmd, vaddr);
399 }
400 
401 static void __init kmap_init(void)
402 {
403 	unsigned long kmap_vstart;
404 
405 	/*
406 	 * Cache the first kmap pte:
407 	 */
408 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
409 	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
410 }
411 
412 #ifdef CONFIG_HIGHMEM
413 static void __init permanent_kmaps_init(pgd_t *pgd_base)
414 {
415 	unsigned long vaddr;
416 	pgd_t *pgd;
417 	p4d_t *p4d;
418 	pud_t *pud;
419 	pmd_t *pmd;
420 	pte_t *pte;
421 
422 	vaddr = PKMAP_BASE;
423 	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
424 
425 	pgd = swapper_pg_dir + pgd_index(vaddr);
426 	p4d = p4d_offset(pgd, vaddr);
427 	pud = pud_offset(p4d, vaddr);
428 	pmd = pmd_offset(pud, vaddr);
429 	pte = pte_offset_kernel(pmd, vaddr);
430 	pkmap_page_table = pte;
431 }
432 
433 void __init add_highpages_with_active_regions(int nid,
434 			 unsigned long start_pfn, unsigned long end_pfn)
435 {
436 	phys_addr_t start, end;
437 	u64 i;
438 
439 	for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
440 		unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
441 					    start_pfn, end_pfn);
442 		unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
443 					      start_pfn, end_pfn);
444 		for ( ; pfn < e_pfn; pfn++)
445 			if (pfn_valid(pfn))
446 				free_highmem_page(pfn_to_page(pfn));
447 	}
448 }
449 #else
450 static inline void permanent_kmaps_init(pgd_t *pgd_base)
451 {
452 }
453 #endif /* CONFIG_HIGHMEM */
454 
455 void __init sync_initial_page_table(void)
456 {
457 	clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
458 			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
459 			KERNEL_PGD_PTRS);
460 
461 	/*
462 	 * sync back low identity map too.  It is used for example
463 	 * in the 32-bit EFI stub.
464 	 */
465 	clone_pgd_range(initial_page_table,
466 			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
467 			min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
468 }
469 
470 void __init native_pagetable_init(void)
471 {
472 	unsigned long pfn, va;
473 	pgd_t *pgd, *base = swapper_pg_dir;
474 	p4d_t *p4d;
475 	pud_t *pud;
476 	pmd_t *pmd;
477 	pte_t *pte;
478 
479 	/*
480 	 * Remove any mappings which extend past the end of physical
481 	 * memory from the boot time page table.
482 	 * In virtual address space, we should have at least two pages
483 	 * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END
484 	 * definition. And max_low_pfn is set to VMALLOC_END physical
485 	 * address. If initial memory mapping is doing right job, we
486 	 * should have pte used near max_low_pfn or one pmd is not present.
487 	 */
488 	for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
489 		va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
490 		pgd = base + pgd_index(va);
491 		if (!pgd_present(*pgd))
492 			break;
493 
494 		p4d = p4d_offset(pgd, va);
495 		pud = pud_offset(p4d, va);
496 		pmd = pmd_offset(pud, va);
497 		if (!pmd_present(*pmd))
498 			break;
499 
500 		/* should not be large page here */
501 		if (pmd_large(*pmd)) {
502 			pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
503 				pfn, pmd, __pa(pmd));
504 			BUG_ON(1);
505 		}
506 
507 		pte = pte_offset_kernel(pmd, va);
508 		if (!pte_present(*pte))
509 			break;
510 
511 		printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n",
512 				pfn, pmd, __pa(pmd), pte, __pa(pte));
513 		pte_clear(NULL, va, pte);
514 	}
515 	paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
516 	paging_init();
517 }
518 
519 /*
520  * Build a proper pagetable for the kernel mappings.  Up until this
521  * point, we've been running on some set of pagetables constructed by
522  * the boot process.
523  *
524  * If we're booting on native hardware, this will be a pagetable
525  * constructed in arch/x86/kernel/head_32.S.  The root of the
526  * pagetable will be swapper_pg_dir.
527  *
528  * If we're booting paravirtualized under a hypervisor, then there are
529  * more options: we may already be running PAE, and the pagetable may
530  * or may not be based in swapper_pg_dir.  In any case,
531  * paravirt_pagetable_init() will set up swapper_pg_dir
532  * appropriately for the rest of the initialization to work.
533  *
534  * In general, pagetable_init() assumes that the pagetable may already
535  * be partially populated, and so it avoids stomping on any existing
536  * mappings.
537  */
538 void __init early_ioremap_page_table_range_init(void)
539 {
540 	pgd_t *pgd_base = swapper_pg_dir;
541 	unsigned long vaddr, end;
542 
543 	/*
544 	 * Fixed mappings, only the page table structure has to be
545 	 * created - mappings will be set by set_fixmap():
546 	 */
547 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
548 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
549 	page_table_range_init(vaddr, end, pgd_base);
550 	early_ioremap_reset();
551 }
552 
553 static void __init pagetable_init(void)
554 {
555 	pgd_t *pgd_base = swapper_pg_dir;
556 
557 	permanent_kmaps_init(pgd_base);
558 }
559 
560 #define DEFAULT_PTE_MASK ~(_PAGE_NX | _PAGE_GLOBAL)
561 /* Bits supported by the hardware: */
562 pteval_t __supported_pte_mask __read_mostly = DEFAULT_PTE_MASK;
563 /* Bits allowed in normal kernel mappings: */
564 pteval_t __default_kernel_pte_mask __read_mostly = DEFAULT_PTE_MASK;
565 EXPORT_SYMBOL_GPL(__supported_pte_mask);
566 /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
567 EXPORT_SYMBOL(__default_kernel_pte_mask);
568 
569 /* user-defined highmem size */
570 static unsigned int highmem_pages = -1;
571 
572 /*
573  * highmem=size forces highmem to be exactly 'size' bytes.
574  * This works even on boxes that have no highmem otherwise.
575  * This also works to reduce highmem size on bigger boxes.
576  */
577 static int __init parse_highmem(char *arg)
578 {
579 	if (!arg)
580 		return -EINVAL;
581 
582 	highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
583 	return 0;
584 }
585 early_param("highmem", parse_highmem);
586 
587 #define MSG_HIGHMEM_TOO_BIG \
588 	"highmem size (%luMB) is bigger than pages available (%luMB)!\n"
589 
590 #define MSG_LOWMEM_TOO_SMALL \
591 	"highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
592 /*
593  * All of RAM fits into lowmem - but if user wants highmem
594  * artificially via the highmem=x boot parameter then create
595  * it:
596  */
597 static void __init lowmem_pfn_init(void)
598 {
599 	/* max_low_pfn is 0, we already have early_res support */
600 	max_low_pfn = max_pfn;
601 
602 	if (highmem_pages == -1)
603 		highmem_pages = 0;
604 #ifdef CONFIG_HIGHMEM
605 	if (highmem_pages >= max_pfn) {
606 		printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
607 			pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
608 		highmem_pages = 0;
609 	}
610 	if (highmem_pages) {
611 		if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
612 			printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
613 				pages_to_mb(highmem_pages));
614 			highmem_pages = 0;
615 		}
616 		max_low_pfn -= highmem_pages;
617 	}
618 #else
619 	if (highmem_pages)
620 		printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
621 #endif
622 }
623 
624 #define MSG_HIGHMEM_TOO_SMALL \
625 	"only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
626 
627 #define MSG_HIGHMEM_TRIMMED \
628 	"Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
629 /*
630  * We have more RAM than fits into lowmem - we try to put it into
631  * highmem, also taking the highmem=x boot parameter into account:
632  */
633 static void __init highmem_pfn_init(void)
634 {
635 	max_low_pfn = MAXMEM_PFN;
636 
637 	if (highmem_pages == -1)
638 		highmem_pages = max_pfn - MAXMEM_PFN;
639 
640 	if (highmem_pages + MAXMEM_PFN < max_pfn)
641 		max_pfn = MAXMEM_PFN + highmem_pages;
642 
643 	if (highmem_pages + MAXMEM_PFN > max_pfn) {
644 		printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
645 			pages_to_mb(max_pfn - MAXMEM_PFN),
646 			pages_to_mb(highmem_pages));
647 		highmem_pages = 0;
648 	}
649 #ifndef CONFIG_HIGHMEM
650 	/* Maximum memory usable is what is directly addressable */
651 	printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
652 	if (max_pfn > MAX_NONPAE_PFN)
653 		printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
654 	else
655 		printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
656 	max_pfn = MAXMEM_PFN;
657 #else /* !CONFIG_HIGHMEM */
658 #ifndef CONFIG_HIGHMEM64G
659 	if (max_pfn > MAX_NONPAE_PFN) {
660 		max_pfn = MAX_NONPAE_PFN;
661 		printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
662 	}
663 #endif /* !CONFIG_HIGHMEM64G */
664 #endif /* !CONFIG_HIGHMEM */
665 }
666 
667 /*
668  * Determine low and high memory ranges:
669  */
670 void __init find_low_pfn_range(void)
671 {
672 	/* it could update max_pfn */
673 
674 	if (max_pfn <= MAXMEM_PFN)
675 		lowmem_pfn_init();
676 	else
677 		highmem_pfn_init();
678 }
679 
680 #ifndef CONFIG_NEED_MULTIPLE_NODES
681 void __init initmem_init(void)
682 {
683 #ifdef CONFIG_HIGHMEM
684 	highstart_pfn = highend_pfn = max_pfn;
685 	if (max_pfn > max_low_pfn)
686 		highstart_pfn = max_low_pfn;
687 	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
688 		pages_to_mb(highend_pfn - highstart_pfn));
689 	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
690 #else
691 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
692 #endif
693 
694 	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
695 	sparse_memory_present_with_active_regions(0);
696 
697 #ifdef CONFIG_FLATMEM
698 	max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn;
699 #endif
700 	__vmalloc_start_set = true;
701 
702 	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
703 			pages_to_mb(max_low_pfn));
704 
705 	setup_bootmem_allocator();
706 }
707 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
708 
709 void __init setup_bootmem_allocator(void)
710 {
711 	printk(KERN_INFO "  mapped low ram: 0 - %08lx\n",
712 		 max_pfn_mapped<<PAGE_SHIFT);
713 	printk(KERN_INFO "  low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
714 }
715 
716 /*
717  * paging_init() sets up the page tables - note that the first 8MB are
718  * already mapped by head.S.
719  *
720  * This routines also unmaps the page at virtual kernel address 0, so
721  * that we can trap those pesky NULL-reference errors in the kernel.
722  */
723 void __init paging_init(void)
724 {
725 	pagetable_init();
726 
727 	__flush_tlb_all();
728 
729 	kmap_init();
730 
731 	/*
732 	 * NOTE: at this point the bootmem allocator is fully available.
733 	 */
734 	olpc_dt_build_devicetree();
735 	sparse_memory_present_with_active_regions(MAX_NUMNODES);
736 	sparse_init();
737 	zone_sizes_init();
738 }
739 
740 /*
741  * Test if the WP bit works in supervisor mode. It isn't supported on 386's
742  * and also on some strange 486's. All 586+'s are OK. This used to involve
743  * black magic jumps to work around some nasty CPU bugs, but fortunately the
744  * switch to using exceptions got rid of all that.
745  */
746 static void __init test_wp_bit(void)
747 {
748 	char z = 0;
749 
750 	printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode...");
751 
752 	__set_fixmap(FIX_WP_TEST, __pa_symbol(empty_zero_page), PAGE_KERNEL_RO);
753 
754 	if (probe_kernel_write((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) {
755 		clear_fixmap(FIX_WP_TEST);
756 		printk(KERN_CONT "Ok.\n");
757 		return;
758 	}
759 
760 	printk(KERN_CONT "No.\n");
761 	panic("Linux doesn't support CPUs with broken WP.");
762 }
763 
764 void __init mem_init(void)
765 {
766 	pci_iommu_alloc();
767 
768 #ifdef CONFIG_FLATMEM
769 	BUG_ON(!mem_map);
770 #endif
771 	/*
772 	 * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
773 	 * be done before memblock_free_all(). Memblock use free low memory for
774 	 * temporary data (see find_range_array()) and for this purpose can use
775 	 * pages that was already passed to the buddy allocator, hence marked as
776 	 * not accessible in the page tables when compiled with
777 	 * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not
778 	 * important here.
779 	 */
780 	set_highmem_pages_init();
781 
782 	/* this will put all low memory onto the freelists */
783 	memblock_free_all();
784 
785 	after_bootmem = 1;
786 	x86_init.hyper.init_after_bootmem();
787 
788 	mem_init_print_info(NULL);
789 	printk(KERN_INFO "virtual kernel memory layout:\n"
790 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
791 		"  cpu_entry : 0x%08lx - 0x%08lx   (%4ld kB)\n"
792 #ifdef CONFIG_HIGHMEM
793 		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
794 #endif
795 		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
796 		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
797 		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
798 		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
799 		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
800 		FIXADDR_START, FIXADDR_TOP,
801 		(FIXADDR_TOP - FIXADDR_START) >> 10,
802 
803 		CPU_ENTRY_AREA_BASE,
804 		CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE,
805 		CPU_ENTRY_AREA_MAP_SIZE >> 10,
806 
807 #ifdef CONFIG_HIGHMEM
808 		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
809 		(LAST_PKMAP*PAGE_SIZE) >> 10,
810 #endif
811 
812 		VMALLOC_START, VMALLOC_END,
813 		(VMALLOC_END - VMALLOC_START) >> 20,
814 
815 		(unsigned long)__va(0), (unsigned long)high_memory,
816 		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
817 
818 		(unsigned long)&__init_begin, (unsigned long)&__init_end,
819 		((unsigned long)&__init_end -
820 		 (unsigned long)&__init_begin) >> 10,
821 
822 		(unsigned long)&_etext, (unsigned long)&_edata,
823 		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
824 
825 		(unsigned long)&_text, (unsigned long)&_etext,
826 		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
827 
828 	/*
829 	 * Check boundaries twice: Some fundamental inconsistencies can
830 	 * be detected at build time already.
831 	 */
832 #define __FIXADDR_TOP (-PAGE_SIZE)
833 #ifdef CONFIG_HIGHMEM
834 	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
835 	BUILD_BUG_ON(VMALLOC_END			> PKMAP_BASE);
836 #endif
837 #define high_memory (-128UL << 20)
838 	BUILD_BUG_ON(VMALLOC_START			>= VMALLOC_END);
839 #undef high_memory
840 #undef __FIXADDR_TOP
841 
842 #ifdef CONFIG_HIGHMEM
843 	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
844 	BUG_ON(VMALLOC_END				> PKMAP_BASE);
845 #endif
846 	BUG_ON(VMALLOC_START				>= VMALLOC_END);
847 	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
848 
849 	test_wp_bit();
850 }
851 
852 #ifdef CONFIG_MEMORY_HOTPLUG
853 int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
854 		bool want_memblock)
855 {
856 	unsigned long start_pfn = start >> PAGE_SHIFT;
857 	unsigned long nr_pages = size >> PAGE_SHIFT;
858 
859 	return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
860 }
861 
862 #ifdef CONFIG_MEMORY_HOTREMOVE
863 int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap)
864 {
865 	unsigned long start_pfn = start >> PAGE_SHIFT;
866 	unsigned long nr_pages = size >> PAGE_SHIFT;
867 	struct zone *zone;
868 
869 	zone = page_zone(pfn_to_page(start_pfn));
870 	return __remove_pages(zone, start_pfn, nr_pages, altmap);
871 }
872 #endif
873 #endif
874 
875 int kernel_set_to_readonly __read_mostly;
876 
877 void set_kernel_text_rw(void)
878 {
879 	unsigned long start = PFN_ALIGN(_text);
880 	unsigned long size = PFN_ALIGN(_etext) - start;
881 
882 	if (!kernel_set_to_readonly)
883 		return;
884 
885 	pr_debug("Set kernel text: %lx - %lx for read write\n",
886 		 start, start+size);
887 
888 	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
889 }
890 
891 void set_kernel_text_ro(void)
892 {
893 	unsigned long start = PFN_ALIGN(_text);
894 	unsigned long size = PFN_ALIGN(_etext) - start;
895 
896 	if (!kernel_set_to_readonly)
897 		return;
898 
899 	pr_debug("Set kernel text: %lx - %lx for read only\n",
900 		 start, start+size);
901 
902 	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
903 }
904 
905 static void mark_nxdata_nx(void)
906 {
907 	/*
908 	 * When this called, init has already been executed and released,
909 	 * so everything past _etext should be NX.
910 	 */
911 	unsigned long start = PFN_ALIGN(_etext);
912 	/*
913 	 * This comes from is_kernel_text upper limit. Also HPAGE where used:
914 	 */
915 	unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
916 
917 	if (__supported_pte_mask & _PAGE_NX)
918 		printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
919 	set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT);
920 }
921 
922 void mark_rodata_ro(void)
923 {
924 	unsigned long start = PFN_ALIGN(_text);
925 	unsigned long size = (unsigned long)__end_rodata - start;
926 
927 	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
928 	pr_info("Write protecting kernel text and read-only data: %luk\n",
929 		size >> 10);
930 
931 	kernel_set_to_readonly = 1;
932 
933 #ifdef CONFIG_CPA_DEBUG
934 	pr_info("Testing CPA: Reverting %lx-%lx\n", start, start + size);
935 	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
936 
937 	pr_info("Testing CPA: write protecting again\n");
938 	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
939 #endif
940 	mark_nxdata_nx();
941 	if (__supported_pte_mask & _PAGE_NX)
942 		debug_checkwx();
943 }
944