xref: /linux/mm/sparse.c (revision af0cd5a7c3cded50c25e98acd94912d17a0eb914)
1d41dee36SAndy Whitcroft /*
2d41dee36SAndy Whitcroft  * sparse memory mappings.
3d41dee36SAndy Whitcroft  */
4d41dee36SAndy Whitcroft #include <linux/mm.h>
5d41dee36SAndy Whitcroft #include <linux/mmzone.h>
6d41dee36SAndy Whitcroft #include <linux/bootmem.h>
70b0acbecSDave Hansen #include <linux/highmem.h>
8d41dee36SAndy Whitcroft #include <linux/module.h>
928ae55c9SDave Hansen #include <linux/spinlock.h>
100b0acbecSDave Hansen #include <linux/vmalloc.h>
11d41dee36SAndy Whitcroft #include <asm/dma.h>
128f6aac41SChristoph Lameter #include <asm/pgalloc.h>
138f6aac41SChristoph Lameter #include <asm/pgtable.h>
14d41dee36SAndy Whitcroft 
15d41dee36SAndy Whitcroft /*
16d41dee36SAndy Whitcroft  * Permanent SPARSEMEM data:
17d41dee36SAndy Whitcroft  *
18d41dee36SAndy Whitcroft  * 1) mem_section	- memory sections, mem_map's for valid memory
19d41dee36SAndy Whitcroft  */
203e347261SBob Picco #ifdef CONFIG_SPARSEMEM_EXTREME
21802f192eSBob Picco struct mem_section *mem_section[NR_SECTION_ROOTS]
2222fc6eccSRavikiran G Thirumalai 	____cacheline_internodealigned_in_smp;
233e347261SBob Picco #else
243e347261SBob Picco struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
2522fc6eccSRavikiran G Thirumalai 	____cacheline_internodealigned_in_smp;
263e347261SBob Picco #endif
273e347261SBob Picco EXPORT_SYMBOL(mem_section);
283e347261SBob Picco 
2989689ae7SChristoph Lameter #ifdef NODE_NOT_IN_PAGE_FLAGS
3089689ae7SChristoph Lameter /*
3189689ae7SChristoph Lameter  * If we did not store the node number in the page then we have to
3289689ae7SChristoph Lameter  * do a lookup in the section_to_node_table in order to find which
3389689ae7SChristoph Lameter  * node the page belongs to.
3489689ae7SChristoph Lameter  */
3589689ae7SChristoph Lameter #if MAX_NUMNODES <= 256
3689689ae7SChristoph Lameter static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
3789689ae7SChristoph Lameter #else
3889689ae7SChristoph Lameter static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
3989689ae7SChristoph Lameter #endif
4089689ae7SChristoph Lameter 
4125ba77c1SAndy Whitcroft int page_to_nid(struct page *page)
4289689ae7SChristoph Lameter {
4389689ae7SChristoph Lameter 	return section_to_node_table[page_to_section(page)];
4489689ae7SChristoph Lameter }
4589689ae7SChristoph Lameter EXPORT_SYMBOL(page_to_nid);
4685770ffeSAndy Whitcroft 
4785770ffeSAndy Whitcroft static void set_section_nid(unsigned long section_nr, int nid)
4885770ffeSAndy Whitcroft {
4985770ffeSAndy Whitcroft 	section_to_node_table[section_nr] = nid;
5085770ffeSAndy Whitcroft }
5185770ffeSAndy Whitcroft #else /* !NODE_NOT_IN_PAGE_FLAGS */
5285770ffeSAndy Whitcroft static inline void set_section_nid(unsigned long section_nr, int nid)
5385770ffeSAndy Whitcroft {
5485770ffeSAndy Whitcroft }
5589689ae7SChristoph Lameter #endif
5689689ae7SChristoph Lameter 
573e347261SBob Picco #ifdef CONFIG_SPARSEMEM_EXTREME
58577a32f6SSam Ravnborg static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
59802f192eSBob Picco {
6028ae55c9SDave Hansen 	struct mem_section *section = NULL;
6128ae55c9SDave Hansen 	unsigned long array_size = SECTIONS_PER_ROOT *
6228ae55c9SDave Hansen 				   sizeof(struct mem_section);
63802f192eSBob Picco 
6439d24e64SMike Kravetz 	if (slab_is_available())
6546a66eecSMike Kravetz 		section = kmalloc_node(array_size, GFP_KERNEL, nid);
6646a66eecSMike Kravetz 	else
6728ae55c9SDave Hansen 		section = alloc_bootmem_node(NODE_DATA(nid), array_size);
683e347261SBob Picco 
6928ae55c9SDave Hansen 	if (section)
7028ae55c9SDave Hansen 		memset(section, 0, array_size);
713e347261SBob Picco 
7228ae55c9SDave Hansen 	return section;
73802f192eSBob Picco }
7428ae55c9SDave Hansen 
75a3142c8eSYasunori Goto static int __meminit sparse_index_init(unsigned long section_nr, int nid)
7628ae55c9SDave Hansen {
7734af946aSIngo Molnar 	static DEFINE_SPINLOCK(index_init_lock);
7828ae55c9SDave Hansen 	unsigned long root = SECTION_NR_TO_ROOT(section_nr);
7928ae55c9SDave Hansen 	struct mem_section *section;
8028ae55c9SDave Hansen 	int ret = 0;
8128ae55c9SDave Hansen 
8228ae55c9SDave Hansen 	if (mem_section[root])
8328ae55c9SDave Hansen 		return -EEXIST;
8428ae55c9SDave Hansen 
8528ae55c9SDave Hansen 	section = sparse_index_alloc(nid);
86*af0cd5a7SWANG Cong 	if (!section)
87*af0cd5a7SWANG Cong 		return -ENOMEM;
8828ae55c9SDave Hansen 	/*
8928ae55c9SDave Hansen 	 * This lock keeps two different sections from
9028ae55c9SDave Hansen 	 * reallocating for the same index
9128ae55c9SDave Hansen 	 */
9228ae55c9SDave Hansen 	spin_lock(&index_init_lock);
9328ae55c9SDave Hansen 
9428ae55c9SDave Hansen 	if (mem_section[root]) {
9528ae55c9SDave Hansen 		ret = -EEXIST;
9628ae55c9SDave Hansen 		goto out;
9728ae55c9SDave Hansen 	}
9828ae55c9SDave Hansen 
9928ae55c9SDave Hansen 	mem_section[root] = section;
10028ae55c9SDave Hansen out:
10128ae55c9SDave Hansen 	spin_unlock(&index_init_lock);
10228ae55c9SDave Hansen 	return ret;
10328ae55c9SDave Hansen }
10428ae55c9SDave Hansen #else /* !SPARSEMEM_EXTREME */
10528ae55c9SDave Hansen static inline int sparse_index_init(unsigned long section_nr, int nid)
10628ae55c9SDave Hansen {
10728ae55c9SDave Hansen 	return 0;
10828ae55c9SDave Hansen }
10928ae55c9SDave Hansen #endif
11028ae55c9SDave Hansen 
1114ca644d9SDave Hansen /*
1124ca644d9SDave Hansen  * Although written for the SPARSEMEM_EXTREME case, this happens
113cd881a6bSAndy Whitcroft  * to also work for the flat array case because
1144ca644d9SDave Hansen  * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
1154ca644d9SDave Hansen  */
1164ca644d9SDave Hansen int __section_nr(struct mem_section* ms)
1174ca644d9SDave Hansen {
1184ca644d9SDave Hansen 	unsigned long root_nr;
1194ca644d9SDave Hansen 	struct mem_section* root;
1204ca644d9SDave Hansen 
12112783b00SMike Kravetz 	for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
12212783b00SMike Kravetz 		root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
1234ca644d9SDave Hansen 		if (!root)
1244ca644d9SDave Hansen 			continue;
1254ca644d9SDave Hansen 
1264ca644d9SDave Hansen 		if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
1274ca644d9SDave Hansen 		     break;
1284ca644d9SDave Hansen 	}
1294ca644d9SDave Hansen 
1304ca644d9SDave Hansen 	return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
1314ca644d9SDave Hansen }
1324ca644d9SDave Hansen 
13330c253e6SAndy Whitcroft /*
13430c253e6SAndy Whitcroft  * During early boot, before section_mem_map is used for an actual
13530c253e6SAndy Whitcroft  * mem_map, we use section_mem_map to store the section's NUMA
13630c253e6SAndy Whitcroft  * node.  This keeps us from having to use another data structure.  The
13730c253e6SAndy Whitcroft  * node information is cleared just before we store the real mem_map.
13830c253e6SAndy Whitcroft  */
13930c253e6SAndy Whitcroft static inline unsigned long sparse_encode_early_nid(int nid)
14030c253e6SAndy Whitcroft {
14130c253e6SAndy Whitcroft 	return (nid << SECTION_NID_SHIFT);
14230c253e6SAndy Whitcroft }
14330c253e6SAndy Whitcroft 
14430c253e6SAndy Whitcroft static inline int sparse_early_nid(struct mem_section *section)
14530c253e6SAndy Whitcroft {
14630c253e6SAndy Whitcroft 	return (section->section_mem_map >> SECTION_NID_SHIFT);
14730c253e6SAndy Whitcroft }
14830c253e6SAndy Whitcroft 
149d41dee36SAndy Whitcroft /* Record a memory area against a node. */
150a3142c8eSYasunori Goto void __init memory_present(int nid, unsigned long start, unsigned long end)
151d41dee36SAndy Whitcroft {
152d41dee36SAndy Whitcroft 	unsigned long pfn;
153d41dee36SAndy Whitcroft 
154d41dee36SAndy Whitcroft 	start &= PAGE_SECTION_MASK;
155d41dee36SAndy Whitcroft 	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
156d41dee36SAndy Whitcroft 		unsigned long section = pfn_to_section_nr(pfn);
157802f192eSBob Picco 		struct mem_section *ms;
158802f192eSBob Picco 
159802f192eSBob Picco 		sparse_index_init(section, nid);
16085770ffeSAndy Whitcroft 		set_section_nid(section, nid);
161802f192eSBob Picco 
162802f192eSBob Picco 		ms = __nr_to_section(section);
163802f192eSBob Picco 		if (!ms->section_mem_map)
16430c253e6SAndy Whitcroft 			ms->section_mem_map = sparse_encode_early_nid(nid) |
16530c253e6SAndy Whitcroft 							SECTION_MARKED_PRESENT;
166d41dee36SAndy Whitcroft 	}
167d41dee36SAndy Whitcroft }
168d41dee36SAndy Whitcroft 
169d41dee36SAndy Whitcroft /*
170d41dee36SAndy Whitcroft  * Only used by the i386 NUMA architecures, but relatively
171d41dee36SAndy Whitcroft  * generic code.
172d41dee36SAndy Whitcroft  */
173d41dee36SAndy Whitcroft unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
174d41dee36SAndy Whitcroft 						     unsigned long end_pfn)
175d41dee36SAndy Whitcroft {
176d41dee36SAndy Whitcroft 	unsigned long pfn;
177d41dee36SAndy Whitcroft 	unsigned long nr_pages = 0;
178d41dee36SAndy Whitcroft 
179d41dee36SAndy Whitcroft 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
180d41dee36SAndy Whitcroft 		if (nid != early_pfn_to_nid(pfn))
181d41dee36SAndy Whitcroft 			continue;
182d41dee36SAndy Whitcroft 
183540557b9SAndy Whitcroft 		if (pfn_present(pfn))
184d41dee36SAndy Whitcroft 			nr_pages += PAGES_PER_SECTION;
185d41dee36SAndy Whitcroft 	}
186d41dee36SAndy Whitcroft 
187d41dee36SAndy Whitcroft 	return nr_pages * sizeof(struct page);
188d41dee36SAndy Whitcroft }
189d41dee36SAndy Whitcroft 
190d41dee36SAndy Whitcroft /*
19129751f69SAndy Whitcroft  * Subtle, we encode the real pfn into the mem_map such that
19229751f69SAndy Whitcroft  * the identity pfn - section_mem_map will return the actual
19329751f69SAndy Whitcroft  * physical page frame number.
19429751f69SAndy Whitcroft  */
19529751f69SAndy Whitcroft static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
19629751f69SAndy Whitcroft {
19729751f69SAndy Whitcroft 	return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
19829751f69SAndy Whitcroft }
19929751f69SAndy Whitcroft 
20029751f69SAndy Whitcroft /*
20129751f69SAndy Whitcroft  * We need this if we ever free the mem_maps.  While not implemented yet,
20229751f69SAndy Whitcroft  * this function is included for parity with its sibling.
20329751f69SAndy Whitcroft  */
20429751f69SAndy Whitcroft static __attribute((unused))
20529751f69SAndy Whitcroft struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
20629751f69SAndy Whitcroft {
20729751f69SAndy Whitcroft 	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
20829751f69SAndy Whitcroft }
20929751f69SAndy Whitcroft 
210a3142c8eSYasunori Goto static int __meminit sparse_init_one_section(struct mem_section *ms,
2115c0e3066SMel Gorman 		unsigned long pnum, struct page *mem_map,
2125c0e3066SMel Gorman 		unsigned long *pageblock_bitmap)
21329751f69SAndy Whitcroft {
214540557b9SAndy Whitcroft 	if (!present_section(ms))
21529751f69SAndy Whitcroft 		return -EINVAL;
21629751f69SAndy Whitcroft 
21730c253e6SAndy Whitcroft 	ms->section_mem_map &= ~SECTION_MAP_MASK;
218540557b9SAndy Whitcroft 	ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
219540557b9SAndy Whitcroft 							SECTION_HAS_MEM_MAP;
2205c0e3066SMel Gorman  	ms->pageblock_flags = pageblock_bitmap;
22129751f69SAndy Whitcroft 
22229751f69SAndy Whitcroft 	return 1;
22329751f69SAndy Whitcroft }
22429751f69SAndy Whitcroft 
2255c0e3066SMel Gorman static unsigned long usemap_size(void)
2265c0e3066SMel Gorman {
2275c0e3066SMel Gorman 	unsigned long size_bytes;
2285c0e3066SMel Gorman 	size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
2295c0e3066SMel Gorman 	size_bytes = roundup(size_bytes, sizeof(unsigned long));
2305c0e3066SMel Gorman 	return size_bytes;
2315c0e3066SMel Gorman }
2325c0e3066SMel Gorman 
2335c0e3066SMel Gorman #ifdef CONFIG_MEMORY_HOTPLUG
2345c0e3066SMel Gorman static unsigned long *__kmalloc_section_usemap(void)
2355c0e3066SMel Gorman {
2365c0e3066SMel Gorman 	return kmalloc(usemap_size(), GFP_KERNEL);
2375c0e3066SMel Gorman }
2385c0e3066SMel Gorman #endif /* CONFIG_MEMORY_HOTPLUG */
2395c0e3066SMel Gorman 
2405c0e3066SMel Gorman static unsigned long *sparse_early_usemap_alloc(unsigned long pnum)
2415c0e3066SMel Gorman {
2425c0e3066SMel Gorman 	unsigned long *usemap;
2435c0e3066SMel Gorman 	struct mem_section *ms = __nr_to_section(pnum);
2445c0e3066SMel Gorman 	int nid = sparse_early_nid(ms);
2455c0e3066SMel Gorman 
2465c0e3066SMel Gorman 	usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());
2475c0e3066SMel Gorman 	if (usemap)
2485c0e3066SMel Gorman 		return usemap;
2495c0e3066SMel Gorman 
2505c0e3066SMel Gorman 	/* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
2515c0e3066SMel Gorman 	nid = 0;
2525c0e3066SMel Gorman 
2535c0e3066SMel Gorman 	printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
2545c0e3066SMel Gorman 	return NULL;
2555c0e3066SMel Gorman }
2565c0e3066SMel Gorman 
2578f6aac41SChristoph Lameter #ifndef CONFIG_SPARSEMEM_VMEMMAP
25898f3cfc1SYasunori Goto struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
25929751f69SAndy Whitcroft {
26029751f69SAndy Whitcroft 	struct page *map;
26129751f69SAndy Whitcroft 
26229751f69SAndy Whitcroft 	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
26329751f69SAndy Whitcroft 	if (map)
26429751f69SAndy Whitcroft 		return map;
26529751f69SAndy Whitcroft 
26629751f69SAndy Whitcroft 	map = alloc_bootmem_node(NODE_DATA(nid),
26729751f69SAndy Whitcroft 			sizeof(struct page) * PAGES_PER_SECTION);
2688f6aac41SChristoph Lameter 	return map;
2698f6aac41SChristoph Lameter }
2708f6aac41SChristoph Lameter #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
2718f6aac41SChristoph Lameter 
2728f6aac41SChristoph Lameter struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
2738f6aac41SChristoph Lameter {
2748f6aac41SChristoph Lameter 	struct page *map;
2758f6aac41SChristoph Lameter 	struct mem_section *ms = __nr_to_section(pnum);
2768f6aac41SChristoph Lameter 	int nid = sparse_early_nid(ms);
2778f6aac41SChristoph Lameter 
27898f3cfc1SYasunori Goto 	map = sparse_mem_map_populate(pnum, nid);
27929751f69SAndy Whitcroft 	if (map)
28029751f69SAndy Whitcroft 		return map;
28129751f69SAndy Whitcroft 
2828f6aac41SChristoph Lameter 	printk(KERN_ERR "%s: sparsemem memory map backing failed "
2838f6aac41SChristoph Lameter 			"some memory will not be available.\n", __FUNCTION__);
284802f192eSBob Picco 	ms->section_mem_map = 0;
28529751f69SAndy Whitcroft 	return NULL;
28629751f69SAndy Whitcroft }
28729751f69SAndy Whitcroft 
288193faea9SStephen Rothwell /*
289193faea9SStephen Rothwell  * Allocate the accumulated non-linear sections, allocate a mem_map
290193faea9SStephen Rothwell  * for each and record the physical to section mapping.
291193faea9SStephen Rothwell  */
292193faea9SStephen Rothwell void __init sparse_init(void)
293193faea9SStephen Rothwell {
294193faea9SStephen Rothwell 	unsigned long pnum;
295193faea9SStephen Rothwell 	struct page *map;
2965c0e3066SMel Gorman 	unsigned long *usemap;
297193faea9SStephen Rothwell 
298193faea9SStephen Rothwell 	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
299540557b9SAndy Whitcroft 		if (!present_section_nr(pnum))
300193faea9SStephen Rothwell 			continue;
301193faea9SStephen Rothwell 
302193faea9SStephen Rothwell 		map = sparse_early_mem_map_alloc(pnum);
303193faea9SStephen Rothwell 		if (!map)
304193faea9SStephen Rothwell 			continue;
3055c0e3066SMel Gorman 
3065c0e3066SMel Gorman 		usemap = sparse_early_usemap_alloc(pnum);
3075c0e3066SMel Gorman 		if (!usemap)
3085c0e3066SMel Gorman 			continue;
3095c0e3066SMel Gorman 
3105c0e3066SMel Gorman 		sparse_init_one_section(__nr_to_section(pnum), pnum, map,
3115c0e3066SMel Gorman 								usemap);
312193faea9SStephen Rothwell 	}
313193faea9SStephen Rothwell }
314193faea9SStephen Rothwell 
315193faea9SStephen Rothwell #ifdef CONFIG_MEMORY_HOTPLUG
31698f3cfc1SYasunori Goto #ifdef CONFIG_SPARSEMEM_VMEMMAP
31798f3cfc1SYasunori Goto static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
31898f3cfc1SYasunori Goto 						 unsigned long nr_pages)
31998f3cfc1SYasunori Goto {
32098f3cfc1SYasunori Goto 	/* This will make the necessary allocations eventually. */
32198f3cfc1SYasunori Goto 	return sparse_mem_map_populate(pnum, nid);
32298f3cfc1SYasunori Goto }
32398f3cfc1SYasunori Goto static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
32498f3cfc1SYasunori Goto {
32598f3cfc1SYasunori Goto 	return; /* XXX: Not implemented yet */
32698f3cfc1SYasunori Goto }
32798f3cfc1SYasunori Goto #else
3280b0acbecSDave Hansen static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
3290b0acbecSDave Hansen {
3300b0acbecSDave Hansen 	struct page *page, *ret;
3310b0acbecSDave Hansen 	unsigned long memmap_size = sizeof(struct page) * nr_pages;
3320b0acbecSDave Hansen 
333f2d0aa5bSYasunori Goto 	page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
3340b0acbecSDave Hansen 	if (page)
3350b0acbecSDave Hansen 		goto got_map_page;
3360b0acbecSDave Hansen 
3370b0acbecSDave Hansen 	ret = vmalloc(memmap_size);
3380b0acbecSDave Hansen 	if (ret)
3390b0acbecSDave Hansen 		goto got_map_ptr;
3400b0acbecSDave Hansen 
3410b0acbecSDave Hansen 	return NULL;
3420b0acbecSDave Hansen got_map_page:
3430b0acbecSDave Hansen 	ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
3440b0acbecSDave Hansen got_map_ptr:
3450b0acbecSDave Hansen 	memset(ret, 0, memmap_size);
3460b0acbecSDave Hansen 
3470b0acbecSDave Hansen 	return ret;
3480b0acbecSDave Hansen }
3490b0acbecSDave Hansen 
35098f3cfc1SYasunori Goto static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
35198f3cfc1SYasunori Goto 						  unsigned long nr_pages)
35298f3cfc1SYasunori Goto {
35398f3cfc1SYasunori Goto 	return __kmalloc_section_memmap(nr_pages);
35498f3cfc1SYasunori Goto }
35598f3cfc1SYasunori Goto 
3560b0acbecSDave Hansen static int vaddr_in_vmalloc_area(void *addr)
3570b0acbecSDave Hansen {
3580b0acbecSDave Hansen 	if (addr >= (void *)VMALLOC_START &&
3590b0acbecSDave Hansen 	    addr < (void *)VMALLOC_END)
3600b0acbecSDave Hansen 		return 1;
3610b0acbecSDave Hansen 	return 0;
3620b0acbecSDave Hansen }
3630b0acbecSDave Hansen 
3640b0acbecSDave Hansen static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
3650b0acbecSDave Hansen {
3660b0acbecSDave Hansen 	if (vaddr_in_vmalloc_area(memmap))
3670b0acbecSDave Hansen 		vfree(memmap);
3680b0acbecSDave Hansen 	else
3690b0acbecSDave Hansen 		free_pages((unsigned long)memmap,
3700b0acbecSDave Hansen 			   get_order(sizeof(struct page) * nr_pages));
3710b0acbecSDave Hansen }
37298f3cfc1SYasunori Goto #endif /* CONFIG_SPARSEMEM_VMEMMAP */
3730b0acbecSDave Hansen 
37429751f69SAndy Whitcroft /*
37529751f69SAndy Whitcroft  * returns the number of sections whose mem_maps were properly
37629751f69SAndy Whitcroft  * set.  If this is <=0, then that means that the passed-in
37729751f69SAndy Whitcroft  * map was not consumed and must be freed.
378d41dee36SAndy Whitcroft  */
3790b0acbecSDave Hansen int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
3800b0acbecSDave Hansen 			   int nr_pages)
38129751f69SAndy Whitcroft {
3820b0acbecSDave Hansen 	unsigned long section_nr = pfn_to_section_nr(start_pfn);
3830b0acbecSDave Hansen 	struct pglist_data *pgdat = zone->zone_pgdat;
3840b0acbecSDave Hansen 	struct mem_section *ms;
3850b0acbecSDave Hansen 	struct page *memmap;
3865c0e3066SMel Gorman 	unsigned long *usemap;
3870b0acbecSDave Hansen 	unsigned long flags;
3880b0acbecSDave Hansen 	int ret;
38929751f69SAndy Whitcroft 
3900b0acbecSDave Hansen 	/*
3910b0acbecSDave Hansen 	 * no locking for this, because it does its own
3920b0acbecSDave Hansen 	 * plus, it does a kmalloc
3930b0acbecSDave Hansen 	 */
3940b0acbecSDave Hansen 	sparse_index_init(section_nr, pgdat->node_id);
39598f3cfc1SYasunori Goto 	memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
3965c0e3066SMel Gorman 	usemap = __kmalloc_section_usemap();
39729751f69SAndy Whitcroft 
3980b0acbecSDave Hansen 	pgdat_resize_lock(pgdat, &flags);
3990b0acbecSDave Hansen 
4000b0acbecSDave Hansen 	ms = __pfn_to_section(start_pfn);
4010b0acbecSDave Hansen 	if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
4020b0acbecSDave Hansen 		ret = -EEXIST;
4030b0acbecSDave Hansen 		goto out;
4040b0acbecSDave Hansen 	}
4055c0e3066SMel Gorman 
4065c0e3066SMel Gorman 	if (!usemap) {
4075c0e3066SMel Gorman 		ret = -ENOMEM;
4085c0e3066SMel Gorman 		goto out;
4095c0e3066SMel Gorman 	}
41029751f69SAndy Whitcroft 	ms->section_mem_map |= SECTION_MARKED_PRESENT;
41129751f69SAndy Whitcroft 
4125c0e3066SMel Gorman 	ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
4130b0acbecSDave Hansen 
4140b0acbecSDave Hansen out:
4150b0acbecSDave Hansen 	pgdat_resize_unlock(pgdat, &flags);
41646a66eecSMike Kravetz 	if (ret <= 0)
41746a66eecSMike Kravetz 		__kfree_section_memmap(memmap, nr_pages);
4180b0acbecSDave Hansen 	return ret;
419d41dee36SAndy Whitcroft }
420a3142c8eSYasunori Goto #endif
421