xref: /linux/arch/x86/mm/init_64.c (revision 643d1f7fe3aa12c8bdea6fa5b4ba874ff6dd601d)
1 /*
2  *  linux/arch/x86_64/mm/init.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
6  *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7  */
8 
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/pagemap.h>
22 #include <linux/bootmem.h>
23 #include <linux/proc_fs.h>
24 #include <linux/pci.h>
25 #include <linux/pfn.h>
26 #include <linux/poison.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/module.h>
29 #include <linux/memory_hotplug.h>
30 #include <linux/nmi.h>
31 
32 #include <asm/processor.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35 #include <asm/pgtable.h>
36 #include <asm/pgalloc.h>
37 #include <asm/dma.h>
38 #include <asm/fixmap.h>
39 #include <asm/e820.h>
40 #include <asm/apic.h>
41 #include <asm/tlb.h>
42 #include <asm/mmu_context.h>
43 #include <asm/proto.h>
44 #include <asm/smp.h>
45 #include <asm/sections.h>
46 #include <asm/kdebug.h>
47 #include <asm/numa.h>
48 
49 const struct dma_mapping_ops *dma_ops;
50 EXPORT_SYMBOL(dma_ops);
51 
52 static unsigned long dma_reserve __initdata;
53 
54 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
55 
56 /*
57  * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58  * physical space so we can cache the place of the first one and move
59  * around without checking the pgd every time.
60  */
61 
62 void show_mem(void)
63 {
64 	long i, total = 0, reserved = 0;
65 	long shared = 0, cached = 0;
66 	struct page *page;
67 	pg_data_t *pgdat;
68 
69 	printk(KERN_INFO "Mem-info:\n");
70 	show_free_areas();
71 	printk(KERN_INFO "Free swap:       %6ldkB\n",
72 		nr_swap_pages << (PAGE_SHIFT-10));
73 
74 	for_each_online_pgdat(pgdat) {
75 		for (i = 0; i < pgdat->node_spanned_pages; ++i) {
76 			/*
77 			 * This loop can take a while with 256 GB and
78 			 * 4k pages so defer the NMI watchdog:
79 			 */
80 			if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
81 				touch_nmi_watchdog();
82 
83 			if (!pfn_valid(pgdat->node_start_pfn + i))
84 				continue;
85 
86 			page = pfn_to_page(pgdat->node_start_pfn + i);
87 			total++;
88 			if (PageReserved(page))
89 				reserved++;
90 			else if (PageSwapCache(page))
91 				cached++;
92 			else if (page_count(page))
93 				shared += page_count(page) - 1;
94 		}
95 	}
96 	printk(KERN_INFO "%lu pages of RAM\n",		total);
97 	printk(KERN_INFO "%lu reserved pages\n",	reserved);
98 	printk(KERN_INFO "%lu pages shared\n",		shared);
99 	printk(KERN_INFO "%lu pages swap cached\n",	cached);
100 }
101 
102 int after_bootmem;
103 
104 static __init void *spp_getpage(void)
105 {
106 	void *ptr;
107 
108 	if (after_bootmem)
109 		ptr = (void *) get_zeroed_page(GFP_ATOMIC);
110 	else
111 		ptr = alloc_bootmem_pages(PAGE_SIZE);
112 
113 	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
114 		panic("set_pte_phys: cannot allocate page data %s\n",
115 			after_bootmem ? "after bootmem" : "");
116 	}
117 
118 	pr_debug("spp_getpage %p\n", ptr);
119 
120 	return ptr;
121 }
122 
123 static __init void
124 set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
125 {
126 	pgd_t *pgd;
127 	pud_t *pud;
128 	pmd_t *pmd;
129 	pte_t *pte, new_pte;
130 
131 	pr_debug("set_pte_phys %lx to %lx\n", vaddr, phys);
132 
133 	pgd = pgd_offset_k(vaddr);
134 	if (pgd_none(*pgd)) {
135 		printk(KERN_ERR
136 			"PGD FIXMAP MISSING, it should be setup in head.S!\n");
137 		return;
138 	}
139 	pud = pud_offset(pgd, vaddr);
140 	if (pud_none(*pud)) {
141 		pmd = (pmd_t *) spp_getpage();
142 		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
143 		if (pmd != pmd_offset(pud, 0)) {
144 			printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
145 				pmd, pmd_offset(pud, 0));
146 			return;
147 		}
148 	}
149 	pmd = pmd_offset(pud, vaddr);
150 	if (pmd_none(*pmd)) {
151 		pte = (pte_t *) spp_getpage();
152 		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
153 		if (pte != pte_offset_kernel(pmd, 0)) {
154 			printk(KERN_ERR "PAGETABLE BUG #02!\n");
155 			return;
156 		}
157 	}
158 	new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
159 
160 	pte = pte_offset_kernel(pmd, vaddr);
161 	if (!pte_none(*pte) &&
162 	    pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
163 		pte_ERROR(*pte);
164 	set_pte(pte, new_pte);
165 
166 	/*
167 	 * It's enough to flush this one mapping.
168 	 * (PGE mappings get flushed as well)
169 	 */
170 	__flush_tlb_one(vaddr);
171 }
172 
173 /* NOTE: this is meant to be run only at boot */
174 void __init
175 __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
176 {
177 	unsigned long address = __fix_to_virt(idx);
178 
179 	if (idx >= __end_of_fixed_addresses) {
180 		printk(KERN_ERR "Invalid __set_fixmap\n");
181 		return;
182 	}
183 	set_pte_phys(address, phys, prot);
184 }
185 
186 static unsigned long __initdata table_start;
187 static unsigned long __meminitdata table_end;
188 
189 static __meminit void *alloc_low_page(unsigned long *phys)
190 {
191 	unsigned long pfn = table_end++;
192 	void *adr;
193 
194 	if (after_bootmem) {
195 		adr = (void *)get_zeroed_page(GFP_ATOMIC);
196 		*phys = __pa(adr);
197 
198 		return adr;
199 	}
200 
201 	if (pfn >= end_pfn)
202 		panic("alloc_low_page: ran out of memory");
203 
204 	adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
205 	memset(adr, 0, PAGE_SIZE);
206 	*phys  = pfn * PAGE_SIZE;
207 	return adr;
208 }
209 
210 static __meminit void unmap_low_page(void *adr)
211 {
212 	if (after_bootmem)
213 		return;
214 
215 	early_iounmap(adr, PAGE_SIZE);
216 }
217 
218 /* Must run before zap_low_mappings */
219 __meminit void *early_ioremap(unsigned long addr, unsigned long size)
220 {
221 	pmd_t *pmd, *last_pmd;
222 	unsigned long vaddr;
223 	int i, pmds;
224 
225 	pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
226 	vaddr = __START_KERNEL_map;
227 	pmd = level2_kernel_pgt;
228 	last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
229 
230 	for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
231 		for (i = 0; i < pmds; i++) {
232 			if (pmd_present(pmd[i]))
233 				goto continue_outer_loop;
234 		}
235 		vaddr += addr & ~PMD_MASK;
236 		addr &= PMD_MASK;
237 
238 		for (i = 0; i < pmds; i++, addr += PMD_SIZE)
239 			set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
240 		__flush_tlb_all();
241 
242 		return (void *)vaddr;
243 continue_outer_loop:
244 		;
245 	}
246 	printk(KERN_ERR "early_ioremap(0x%lx, %lu) failed\n", addr, size);
247 
248 	return NULL;
249 }
250 
251 /*
252  * To avoid virtual aliases later:
253  */
254 __meminit void early_iounmap(void *addr, unsigned long size)
255 {
256 	unsigned long vaddr;
257 	pmd_t *pmd;
258 	int i, pmds;
259 
260 	vaddr = (unsigned long)addr;
261 	pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
262 	pmd = level2_kernel_pgt + pmd_index(vaddr);
263 
264 	for (i = 0; i < pmds; i++)
265 		pmd_clear(pmd + i);
266 
267 	__flush_tlb_all();
268 }
269 
270 static void __meminit
271 phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
272 {
273 	int i = pmd_index(address);
274 
275 	for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
276 		unsigned long entry;
277 		pmd_t *pmd = pmd_page + pmd_index(address);
278 
279 		if (address >= end) {
280 			if (!after_bootmem) {
281 				for (; i < PTRS_PER_PMD; i++, pmd++)
282 					set_pmd(pmd, __pmd(0));
283 			}
284 			break;
285 		}
286 
287 		if (pmd_val(*pmd))
288 			continue;
289 
290 		entry = __PAGE_KERNEL_LARGE|_PAGE_GLOBAL|address;
291 		entry &= __supported_pte_mask;
292 		set_pmd(pmd, __pmd(entry));
293 	}
294 }
295 
296 static void __meminit
297 phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
298 {
299 	pmd_t *pmd = pmd_offset(pud, 0);
300 	spin_lock(&init_mm.page_table_lock);
301 	phys_pmd_init(pmd, address, end);
302 	spin_unlock(&init_mm.page_table_lock);
303 	__flush_tlb_all();
304 }
305 
306 static void __meminit
307 phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
308 {
309 	int i = pud_index(addr);
310 
311 	for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
312 		unsigned long pmd_phys;
313 		pud_t *pud = pud_page + pud_index(addr);
314 		pmd_t *pmd;
315 
316 		if (addr >= end)
317 			break;
318 
319 		if (!after_bootmem &&
320 				!e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
321 			set_pud(pud, __pud(0));
322 			continue;
323 		}
324 
325 		if (pud_val(*pud)) {
326 			phys_pmd_update(pud, addr, end);
327 			continue;
328 		}
329 
330 		pmd = alloc_low_page(&pmd_phys);
331 
332 		spin_lock(&init_mm.page_table_lock);
333 		set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
334 		phys_pmd_init(pmd, addr, end);
335 		spin_unlock(&init_mm.page_table_lock);
336 
337 		unmap_low_page(pmd);
338 	}
339 	__flush_tlb_all();
340 }
341 
342 static void __init find_early_table_space(unsigned long end)
343 {
344 	unsigned long puds, pmds, tables, start;
345 
346 	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
347 	pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
348 	tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
349 		 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
350 
351 	/*
352 	 * RED-PEN putting page tables only on node 0 could
353 	 * cause a hotspot and fill up ZONE_DMA. The page tables
354 	 * need roughly 0.5KB per GB.
355 	 */
356 	start = 0x8000;
357 	table_start = find_e820_area(start, end, tables, PAGE_SIZE);
358 	if (table_start == -1UL)
359 		panic("Cannot find space for the kernel page tables");
360 
361 	table_start >>= PAGE_SHIFT;
362 	table_end = table_start;
363 
364 	early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
365 		end, table_start << PAGE_SHIFT,
366 		(table_start << PAGE_SHIFT) + tables);
367 }
368 
369 /*
370  * Setup the direct mapping of the physical memory at PAGE_OFFSET.
371  * This runs before bootmem is initialized and gets pages directly from
372  * the physical memory. To access them they are temporarily mapped.
373  */
374 void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
375 {
376 	unsigned long next;
377 
378 	pr_debug("init_memory_mapping\n");
379 
380 	/*
381 	 * Find space for the kernel direct mapping tables.
382 	 *
383 	 * Later we should allocate these tables in the local node of the
384 	 * memory mapped. Unfortunately this is done currently before the
385 	 * nodes are discovered.
386 	 */
387 	if (!after_bootmem)
388 		find_early_table_space(end);
389 
390 	start = (unsigned long)__va(start);
391 	end = (unsigned long)__va(end);
392 
393 	for (; start < end; start = next) {
394 		pgd_t *pgd = pgd_offset_k(start);
395 		unsigned long pud_phys;
396 		pud_t *pud;
397 
398 		if (after_bootmem)
399 			pud = pud_offset(pgd, start & PGDIR_MASK);
400 		else
401 			pud = alloc_low_page(&pud_phys);
402 
403 		next = start + PGDIR_SIZE;
404 		if (next > end)
405 			next = end;
406 		phys_pud_init(pud, __pa(start), __pa(next));
407 		if (!after_bootmem)
408 			set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
409 		unmap_low_page(pud);
410 	}
411 
412 	if (!after_bootmem)
413 		mmu_cr4_features = read_cr4();
414 	__flush_tlb_all();
415 
416 	if (!after_bootmem)
417 		reserve_early(table_start << PAGE_SHIFT,
418 				 table_end << PAGE_SHIFT, "PGTABLE");
419 }
420 
421 #ifndef CONFIG_NUMA
422 void __init paging_init(void)
423 {
424 	unsigned long max_zone_pfns[MAX_NR_ZONES];
425 
426 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
427 	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
428 	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
429 	max_zone_pfns[ZONE_NORMAL] = end_pfn;
430 
431 	memory_present(0, 0, end_pfn);
432 	sparse_init();
433 	free_area_init_nodes(max_zone_pfns);
434 }
435 #endif
436 
437 /*
438  * Unmap a kernel mapping if it exists. This is useful to avoid
439  * prefetches from the CPU leading to inconsistent cache lines.
440  * address and size must be aligned to 2MB boundaries.
441  * Does nothing when the mapping doesn't exist.
442  */
443 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
444 {
445 	unsigned long end = address + size;
446 
447 	BUG_ON(address & ~LARGE_PAGE_MASK);
448 	BUG_ON(size & ~LARGE_PAGE_MASK);
449 
450 	for (; address < end; address += LARGE_PAGE_SIZE) {
451 		pgd_t *pgd = pgd_offset_k(address);
452 		pud_t *pud;
453 		pmd_t *pmd;
454 
455 		if (pgd_none(*pgd))
456 			continue;
457 
458 		pud = pud_offset(pgd, address);
459 		if (pud_none(*pud))
460 			continue;
461 
462 		pmd = pmd_offset(pud, address);
463 		if (!pmd || pmd_none(*pmd))
464 			continue;
465 
466 		if (!(pmd_val(*pmd) & _PAGE_PSE)) {
467 			/*
468 			 * Could handle this, but it should not happen
469 			 * currently:
470 			 */
471 			printk(KERN_ERR "clear_kernel_mapping: "
472 				"mapping has been split. will leak memory\n");
473 			pmd_ERROR(*pmd);
474 		}
475 		set_pmd(pmd, __pmd(0));
476 	}
477 	__flush_tlb_all();
478 }
479 
480 /*
481  * Memory hotplug specific functions
482  */
483 void online_page(struct page *page)
484 {
485 	ClearPageReserved(page);
486 	init_page_count(page);
487 	__free_page(page);
488 	totalram_pages++;
489 	num_physpages++;
490 }
491 
492 #ifdef CONFIG_MEMORY_HOTPLUG
493 /*
494  * Memory is added always to NORMAL zone. This means you will never get
495  * additional DMA/DMA32 memory.
496  */
497 int arch_add_memory(int nid, u64 start, u64 size)
498 {
499 	struct pglist_data *pgdat = NODE_DATA(nid);
500 	struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
501 	unsigned long start_pfn = start >> PAGE_SHIFT;
502 	unsigned long nr_pages = size >> PAGE_SHIFT;
503 	int ret;
504 
505 	init_memory_mapping(start, start + size-1);
506 
507 	ret = __add_pages(zone, start_pfn, nr_pages);
508 	WARN_ON(1);
509 
510 	return ret;
511 }
512 EXPORT_SYMBOL_GPL(arch_add_memory);
513 
514 #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
515 int memory_add_physaddr_to_nid(u64 start)
516 {
517 	return 0;
518 }
519 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
520 #endif
521 
522 #endif /* CONFIG_MEMORY_HOTPLUG */
523 
524 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
525 			 kcore_modules, kcore_vsyscall;
526 
527 void __init mem_init(void)
528 {
529 	long codesize, reservedpages, datasize, initsize;
530 
531 	pci_iommu_alloc();
532 
533 	/* clear_bss() already clear the empty_zero_page */
534 
535 	/* temporary debugging - double check it's true: */
536 	{
537 		int i;
538 
539 		for (i = 0; i < 1024; i++)
540 			WARN_ON_ONCE(empty_zero_page[i]);
541 	}
542 
543 	reservedpages = 0;
544 
545 	/* this will put all low memory onto the freelists */
546 #ifdef CONFIG_NUMA
547 	totalram_pages = numa_free_all_bootmem();
548 #else
549 	totalram_pages = free_all_bootmem();
550 #endif
551 	reservedpages = end_pfn - totalram_pages -
552 					absent_pages_in_range(0, end_pfn);
553 	after_bootmem = 1;
554 
555 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
556 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
557 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
558 
559 	/* Register memory areas for /proc/kcore */
560 	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
561 	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
562 		   VMALLOC_END-VMALLOC_START);
563 	kclist_add(&kcore_kernel, &_stext, _end - _stext);
564 	kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
565 	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
566 				 VSYSCALL_END - VSYSCALL_START);
567 
568 	printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
569 				"%ldk reserved, %ldk data, %ldk init)\n",
570 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
571 		end_pfn << (PAGE_SHIFT-10),
572 		codesize >> 10,
573 		reservedpages << (PAGE_SHIFT-10),
574 		datasize >> 10,
575 		initsize >> 10);
576 }
577 
578 void free_init_pages(char *what, unsigned long begin, unsigned long end)
579 {
580 	unsigned long addr;
581 
582 	if (begin >= end)
583 		return;
584 
585 	/*
586 	 * If debugging page accesses then do not free this memory but
587 	 * mark them not present - any buggy init-section access will
588 	 * create a kernel page fault:
589 	 */
590 #ifdef CONFIG_DEBUG_PAGEALLOC
591 	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
592 		begin, PAGE_ALIGN(end));
593 	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
594 #else
595 	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
596 
597 	for (addr = begin; addr < end; addr += PAGE_SIZE) {
598 		ClearPageReserved(virt_to_page(addr));
599 		init_page_count(virt_to_page(addr));
600 		memset((void *)(addr & ~(PAGE_SIZE-1)),
601 			POISON_FREE_INITMEM, PAGE_SIZE);
602 		free_page(addr);
603 		totalram_pages++;
604 	}
605 #endif
606 }
607 
608 void free_initmem(void)
609 {
610 	free_init_pages("unused kernel memory",
611 			(unsigned long)(&__init_begin),
612 			(unsigned long)(&__init_end));
613 }
614 
615 #ifdef CONFIG_DEBUG_RODATA
616 const int rodata_test_data = 0xC3;
617 EXPORT_SYMBOL_GPL(rodata_test_data);
618 
619 void mark_rodata_ro(void)
620 {
621 	unsigned long start = (unsigned long)_stext, end;
622 
623 #ifdef CONFIG_HOTPLUG_CPU
624 	/* It must still be possible to apply SMP alternatives. */
625 	if (num_possible_cpus() > 1)
626 		start = (unsigned long)_etext;
627 #endif
628 
629 #ifdef CONFIG_KPROBES
630 	start = (unsigned long)__start_rodata;
631 #endif
632 
633 	end = (unsigned long)__end_rodata;
634 	start = (start + PAGE_SIZE - 1) & PAGE_MASK;
635 	end &= PAGE_MASK;
636 	if (end <= start)
637 		return;
638 
639 	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
640 
641 	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
642 	       (end - start) >> 10);
643 
644 	rodata_test();
645 
646 #ifdef CONFIG_CPA_DEBUG
647 	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
648 	set_memory_rw(start, (end-start) >> PAGE_SHIFT);
649 
650 	printk(KERN_INFO "Testing CPA: again\n");
651 	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
652 #endif
653 }
654 #endif
655 
656 #ifdef CONFIG_BLK_DEV_INITRD
657 void free_initrd_mem(unsigned long start, unsigned long end)
658 {
659 	free_init_pages("initrd memory", start, end);
660 }
661 #endif
662 
663 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
664 {
665 #ifdef CONFIG_NUMA
666 	int nid = phys_to_nid(phys);
667 #endif
668 	unsigned long pfn = phys >> PAGE_SHIFT;
669 
670 	if (pfn >= end_pfn) {
671 		/*
672 		 * This can happen with kdump kernels when accessing
673 		 * firmware tables:
674 		 */
675 		if (pfn < end_pfn_map)
676 			return;
677 
678 		printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
679 				phys, len);
680 		return;
681 	}
682 
683 	/* Should check here against the e820 map to avoid double free */
684 #ifdef CONFIG_NUMA
685 	reserve_bootmem_node(NODE_DATA(nid), phys, len);
686 #else
687 	reserve_bootmem(phys, len);
688 #endif
689 	if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
690 		dma_reserve += len / PAGE_SIZE;
691 		set_dma_reserve(dma_reserve);
692 	}
693 }
694 
695 int kern_addr_valid(unsigned long addr)
696 {
697 	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
698 	pgd_t *pgd;
699 	pud_t *pud;
700 	pmd_t *pmd;
701 	pte_t *pte;
702 
703 	if (above != 0 && above != -1UL)
704 		return 0;
705 
706 	pgd = pgd_offset_k(addr);
707 	if (pgd_none(*pgd))
708 		return 0;
709 
710 	pud = pud_offset(pgd, addr);
711 	if (pud_none(*pud))
712 		return 0;
713 
714 	pmd = pmd_offset(pud, addr);
715 	if (pmd_none(*pmd))
716 		return 0;
717 
718 	if (pmd_large(*pmd))
719 		return pfn_valid(pmd_pfn(*pmd));
720 
721 	pte = pte_offset_kernel(pmd, addr);
722 	if (pte_none(*pte))
723 		return 0;
724 
725 	return pfn_valid(pte_pfn(*pte));
726 }
727 
728 /*
729  * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
730  * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
731  * not need special handling anymore:
732  */
733 static struct vm_area_struct gate_vma = {
734 	.vm_start	= VSYSCALL_START,
735 	.vm_end		= VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
736 	.vm_page_prot	= PAGE_READONLY_EXEC,
737 	.vm_flags	= VM_READ | VM_EXEC
738 };
739 
740 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
741 {
742 #ifdef CONFIG_IA32_EMULATION
743 	if (test_tsk_thread_flag(tsk, TIF_IA32))
744 		return NULL;
745 #endif
746 	return &gate_vma;
747 }
748 
749 int in_gate_area(struct task_struct *task, unsigned long addr)
750 {
751 	struct vm_area_struct *vma = get_gate_vma(task);
752 
753 	if (!vma)
754 		return 0;
755 
756 	return (addr >= vma->vm_start) && (addr < vma->vm_end);
757 }
758 
759 /*
760  * Use this when you have no reliable task/vma, typically from interrupt
761  * context. It is less reliable than using the task's vma and may give
762  * false positives:
763  */
764 int in_gate_area_no_task(unsigned long addr)
765 {
766 	return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
767 }
768 
769 const char *arch_vma_name(struct vm_area_struct *vma)
770 {
771 	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
772 		return "[vdso]";
773 	if (vma == &gate_vma)
774 		return "[vsyscall]";
775 	return NULL;
776 }
777 
778 #ifdef CONFIG_SPARSEMEM_VMEMMAP
779 /*
780  * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
781  */
782 int __meminit
783 vmemmap_populate(struct page *start_page, unsigned long size, int node)
784 {
785 	unsigned long addr = (unsigned long)start_page;
786 	unsigned long end = (unsigned long)(start_page + size);
787 	unsigned long next;
788 	pgd_t *pgd;
789 	pud_t *pud;
790 	pmd_t *pmd;
791 
792 	for (; addr < end; addr = next) {
793 		next = pmd_addr_end(addr, end);
794 
795 		pgd = vmemmap_pgd_populate(addr, node);
796 		if (!pgd)
797 			return -ENOMEM;
798 
799 		pud = vmemmap_pud_populate(pgd, addr, node);
800 		if (!pud)
801 			return -ENOMEM;
802 
803 		pmd = pmd_offset(pud, addr);
804 		if (pmd_none(*pmd)) {
805 			pte_t entry;
806 			void *p;
807 
808 			p = vmemmap_alloc_block(PMD_SIZE, node);
809 			if (!p)
810 				return -ENOMEM;
811 
812 			entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
813 							PAGE_KERNEL_LARGE);
814 			set_pmd(pmd, __pmd(pte_val(entry)));
815 
816 			printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
817 				addr, addr + PMD_SIZE - 1, p, node);
818 		} else {
819 			vmemmap_verify((pte_t *)pmd, node, addr, next);
820 		}
821 	}
822 	return 0;
823 }
824 #endif
825