xref: /linux/arch/x86/mm/numa_32.c (revision fd639726bf15fca8ee1a00dce8e0096d0ad9bd18)
1 /*
2  * Written by: Patricia Gaughen <gone@us.ibm.com>, IBM Corporation
3  * August 2002: added remote node KVA remap - Martin J. Bligh
4  *
5  * Copyright (C) 2002, IBM Corp.
6  *
7  * All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
17  * NON INFRINGEMENT.  See the GNU General Public License for more
18  * details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23  */
24 
25 #include <linux/bootmem.h>
26 #include <linux/memblock.h>
27 #include <linux/init.h>
28 
29 #include "numa_internal.h"
30 
31 #ifdef CONFIG_DISCONTIGMEM
32 /*
33  * 4) physnode_map     - the mapping between a pfn and owning node
34  * physnode_map keeps track of the physical memory layout of a generic
35  * numa node on a 64Mb break (each element of the array will
36  * represent 64Mb of memory and will be marked by the node id.  so,
37  * if the first gig is on node 0, and the second gig is on node 1
38  * physnode_map will contain:
39  *
40  *     physnode_map[0-15] = 0;
41  *     physnode_map[16-31] = 1;
42  *     physnode_map[32- ] = -1;
43  */
44 s8 physnode_map[MAX_SECTIONS] __read_mostly = { [0 ... (MAX_SECTIONS - 1)] = -1};
45 EXPORT_SYMBOL(physnode_map);
46 
47 void memory_present(int nid, unsigned long start, unsigned long end)
48 {
49 	unsigned long pfn;
50 
51 	printk(KERN_INFO "Node: %d, start_pfn: %lx, end_pfn: %lx\n",
52 			nid, start, end);
53 	printk(KERN_DEBUG "  Setting physnode_map array to node %d for pfns:\n", nid);
54 	printk(KERN_DEBUG "  ");
55 	start = round_down(start, PAGES_PER_SECTION);
56 	end = round_up(end, PAGES_PER_SECTION);
57 	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
58 		physnode_map[pfn / PAGES_PER_SECTION] = nid;
59 		printk(KERN_CONT "%lx ", pfn);
60 	}
61 	printk(KERN_CONT "\n");
62 }
63 
64 unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
65 					      unsigned long end_pfn)
66 {
67 	unsigned long nr_pages = end_pfn - start_pfn;
68 
69 	if (!nr_pages)
70 		return 0;
71 
72 	return (nr_pages + 1) * sizeof(struct page);
73 }
74 #endif
75 
76 extern unsigned long highend_pfn, highstart_pfn;
77 
78 void __init initmem_init(void)
79 {
80 	x86_numa_init();
81 
82 #ifdef CONFIG_HIGHMEM
83 	highstart_pfn = highend_pfn = max_pfn;
84 	if (max_pfn > max_low_pfn)
85 		highstart_pfn = max_low_pfn;
86 	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
87 	       pages_to_mb(highend_pfn - highstart_pfn));
88 	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
89 #else
90 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
91 #endif
92 	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
93 			pages_to_mb(max_low_pfn));
94 	printk(KERN_DEBUG "max_low_pfn = %lx, highstart_pfn = %lx\n",
95 			max_low_pfn, highstart_pfn);
96 
97 	printk(KERN_DEBUG "Low memory ends at vaddr %08lx\n",
98 			(ulong) pfn_to_kaddr(max_low_pfn));
99 
100 	printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
101 			(ulong) pfn_to_kaddr(highstart_pfn));
102 
103 	__vmalloc_start_set = true;
104 	setup_bootmem_allocator();
105 }
106