xref: /linux/mm/sparse.c (revision d67b569f5f620c0fb95d5212642746b7ba9d29e4)
1 /*
2  * sparse memory mappings.
3  */
4 #include <linux/config.h>
5 #include <linux/mm.h>
6 #include <linux/mmzone.h>
7 #include <linux/bootmem.h>
8 #include <linux/module.h>
9 #include <asm/dma.h>
10 
11 /*
12  * Permanent SPARSEMEM data:
13  *
14  * 1) mem_section	- memory sections, mem_map's for valid memory
15  */
16 struct mem_section mem_section[NR_MEM_SECTIONS];
17 EXPORT_SYMBOL(mem_section);
18 
19 /* Record a memory area against a node. */
20 void memory_present(int nid, unsigned long start, unsigned long end)
21 {
22 	unsigned long pfn;
23 
24 	start &= PAGE_SECTION_MASK;
25 	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
26 		unsigned long section = pfn_to_section_nr(pfn);
27 		if (!mem_section[section].section_mem_map)
28 			mem_section[section].section_mem_map = SECTION_MARKED_PRESENT;
29 	}
30 }
31 
32 /*
33  * Only used by the i386 NUMA architecures, but relatively
34  * generic code.
35  */
36 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
37 						     unsigned long end_pfn)
38 {
39 	unsigned long pfn;
40 	unsigned long nr_pages = 0;
41 
42 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
43 		if (nid != early_pfn_to_nid(pfn))
44 			continue;
45 
46 		if (pfn_valid(pfn))
47 			nr_pages += PAGES_PER_SECTION;
48 	}
49 
50 	return nr_pages * sizeof(struct page);
51 }
52 
53 /*
54  * Subtle, we encode the real pfn into the mem_map such that
55  * the identity pfn - section_mem_map will return the actual
56  * physical page frame number.
57  */
58 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
59 {
60 	return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
61 }
62 
63 /*
64  * We need this if we ever free the mem_maps.  While not implemented yet,
65  * this function is included for parity with its sibling.
66  */
67 static __attribute((unused))
68 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
69 {
70 	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
71 }
72 
73 static int sparse_init_one_section(struct mem_section *ms,
74 		unsigned long pnum, struct page *mem_map)
75 {
76 	if (!valid_section(ms))
77 		return -EINVAL;
78 
79 	ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum);
80 
81 	return 1;
82 }
83 
84 static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
85 {
86 	struct page *map;
87 	int nid = early_pfn_to_nid(section_nr_to_pfn(pnum));
88 
89 	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
90 	if (map)
91 		return map;
92 
93 	map = alloc_bootmem_node(NODE_DATA(nid),
94 			sizeof(struct page) * PAGES_PER_SECTION);
95 	if (map)
96 		return map;
97 
98 	printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
99 	mem_section[pnum].section_mem_map = 0;
100 	return NULL;
101 }
102 
103 /*
104  * Allocate the accumulated non-linear sections, allocate a mem_map
105  * for each and record the physical to section mapping.
106  */
107 void sparse_init(void)
108 {
109 	unsigned long pnum;
110 	struct page *map;
111 
112 	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
113 		if (!valid_section_nr(pnum))
114 			continue;
115 
116 		map = sparse_early_mem_map_alloc(pnum);
117 		if (map)
118 			sparse_init_one_section(&mem_section[pnum], pnum, map);
119 	}
120 }
121 
122 /*
123  * returns the number of sections whose mem_maps were properly
124  * set.  If this is <=0, then that means that the passed-in
125  * map was not consumed and must be freed.
126  */
127 int sparse_add_one_section(unsigned long start_pfn, int nr_pages, struct page *map)
128 {
129 	struct mem_section *ms = __pfn_to_section(start_pfn);
130 
131 	if (ms->section_mem_map & SECTION_MARKED_PRESENT)
132 		return -EEXIST;
133 
134 	ms->section_mem_map |= SECTION_MARKED_PRESENT;
135 
136 	return sparse_init_one_section(ms, pfn_to_section_nr(start_pfn), map);
137 }
138