xref: /linux/mm/sparse.c (revision f0ca8c25256dd22737db5780e33e2809ce297625)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2d41dee36SAndy Whitcroft /*
3d41dee36SAndy Whitcroft  * sparse memory mappings.
4d41dee36SAndy Whitcroft  */
5d41dee36SAndy Whitcroft #include <linux/mm.h>
65a0e3ad6STejun Heo #include <linux/slab.h>
7d41dee36SAndy Whitcroft #include <linux/mmzone.h>
897ad1087SMike Rapoport #include <linux/memblock.h>
93b32123dSGideon Israel Dsouza #include <linux/compiler.h>
100b0acbecSDave Hansen #include <linux/highmem.h>
11b95f1b31SPaul Gortmaker #include <linux/export.h>
1228ae55c9SDave Hansen #include <linux/spinlock.h>
130b0acbecSDave Hansen #include <linux/vmalloc.h>
149f82883cSAlastair D'Silva #include <linux/swap.h>
159f82883cSAlastair D'Silva #include <linux/swapops.h>
16426e5c42SMuchun Song #include <linux/bootmem_info.h>
173b32123dSGideon Israel Dsouza 
180c0a4a51SYasunori Goto #include "internal.h"
19d41dee36SAndy Whitcroft #include <asm/dma.h>
20d41dee36SAndy Whitcroft 
21d41dee36SAndy Whitcroft /*
22d41dee36SAndy Whitcroft  * Permanent SPARSEMEM data:
23d41dee36SAndy Whitcroft  *
24d41dee36SAndy Whitcroft  * 1) mem_section	- memory sections, mem_map's for valid memory
25d41dee36SAndy Whitcroft  */
263e347261SBob Picco #ifdef CONFIG_SPARSEMEM_EXTREME
2783e3c487SKirill A. Shutemov struct mem_section **mem_section;
283e347261SBob Picco #else
293e347261SBob Picco struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
3022fc6eccSRavikiran G Thirumalai 	____cacheline_internodealigned_in_smp;
313e347261SBob Picco #endif
323e347261SBob Picco EXPORT_SYMBOL(mem_section);
333e347261SBob Picco 
3489689ae7SChristoph Lameter #ifdef NODE_NOT_IN_PAGE_FLAGS
3589689ae7SChristoph Lameter /*
3689689ae7SChristoph Lameter  * If we did not store the node number in the page then we have to
3789689ae7SChristoph Lameter  * do a lookup in the section_to_node_table in order to find which
3889689ae7SChristoph Lameter  * node the page belongs to.
3989689ae7SChristoph Lameter  */
4089689ae7SChristoph Lameter #if MAX_NUMNODES <= 256
4189689ae7SChristoph Lameter static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
4289689ae7SChristoph Lameter #else
4389689ae7SChristoph Lameter static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
4489689ae7SChristoph Lameter #endif
4589689ae7SChristoph Lameter 
4633dd4e0eSIan Campbell int page_to_nid(const struct page *page)
4789689ae7SChristoph Lameter {
4889689ae7SChristoph Lameter 	return section_to_node_table[page_to_section(page)];
4989689ae7SChristoph Lameter }
5089689ae7SChristoph Lameter EXPORT_SYMBOL(page_to_nid);
5185770ffeSAndy Whitcroft 
5285770ffeSAndy Whitcroft static void set_section_nid(unsigned long section_nr, int nid)
5385770ffeSAndy Whitcroft {
5485770ffeSAndy Whitcroft 	section_to_node_table[section_nr] = nid;
5585770ffeSAndy Whitcroft }
5685770ffeSAndy Whitcroft #else /* !NODE_NOT_IN_PAGE_FLAGS */
5785770ffeSAndy Whitcroft static inline void set_section_nid(unsigned long section_nr, int nid)
5885770ffeSAndy Whitcroft {
5985770ffeSAndy Whitcroft }
6089689ae7SChristoph Lameter #endif
6189689ae7SChristoph Lameter 
623e347261SBob Picco #ifdef CONFIG_SPARSEMEM_EXTREME
63bd721ea7SFabian Frederick static noinline struct mem_section __ref *sparse_index_alloc(int nid)
64802f192eSBob Picco {
6528ae55c9SDave Hansen 	struct mem_section *section = NULL;
6628ae55c9SDave Hansen 	unsigned long array_size = SECTIONS_PER_ROOT *
6728ae55c9SDave Hansen 				   sizeof(struct mem_section);
68802f192eSBob Picco 
698a7f97b9SMike Rapoport 	if (slab_is_available()) {
705b760e64SGavin Shan 		section = kzalloc_node(array_size, GFP_KERNEL, nid);
718a7f97b9SMike Rapoport 	} else {
727e1c4e27SMike Rapoport 		section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
737e1c4e27SMike Rapoport 					      nid);
748a7f97b9SMike Rapoport 		if (!section)
758a7f97b9SMike Rapoport 			panic("%s: Failed to allocate %lu bytes nid=%d\n",
768a7f97b9SMike Rapoport 			      __func__, array_size, nid);
778a7f97b9SMike Rapoport 	}
783e347261SBob Picco 
7928ae55c9SDave Hansen 	return section;
80802f192eSBob Picco }
8128ae55c9SDave Hansen 
82a3142c8eSYasunori Goto static int __meminit sparse_index_init(unsigned long section_nr, int nid)
8328ae55c9SDave Hansen {
8428ae55c9SDave Hansen 	unsigned long root = SECTION_NR_TO_ROOT(section_nr);
8528ae55c9SDave Hansen 	struct mem_section *section;
8628ae55c9SDave Hansen 
87ba72b4c8SDan Williams 	/*
88ba72b4c8SDan Williams 	 * An existing section is possible in the sub-section hotplug
89ba72b4c8SDan Williams 	 * case. First hot-add instantiates, follow-on hot-add reuses
90ba72b4c8SDan Williams 	 * the existing section.
91ba72b4c8SDan Williams 	 *
92ba72b4c8SDan Williams 	 * The mem_hotplug_lock resolves the apparent race below.
93ba72b4c8SDan Williams 	 */
9428ae55c9SDave Hansen 	if (mem_section[root])
95ba72b4c8SDan Williams 		return 0;
9628ae55c9SDave Hansen 
9728ae55c9SDave Hansen 	section = sparse_index_alloc(nid);
98af0cd5a7SWANG Cong 	if (!section)
99af0cd5a7SWANG Cong 		return -ENOMEM;
10028ae55c9SDave Hansen 
10128ae55c9SDave Hansen 	mem_section[root] = section;
102c1c95183SGavin Shan 
1039d1936cfSZhang Yanfei 	return 0;
10428ae55c9SDave Hansen }
10528ae55c9SDave Hansen #else /* !SPARSEMEM_EXTREME */
10628ae55c9SDave Hansen static inline int sparse_index_init(unsigned long section_nr, int nid)
10728ae55c9SDave Hansen {
10828ae55c9SDave Hansen 	return 0;
10928ae55c9SDave Hansen }
11028ae55c9SDave Hansen #endif
11128ae55c9SDave Hansen 
11230c253e6SAndy Whitcroft /*
11330c253e6SAndy Whitcroft  * During early boot, before section_mem_map is used for an actual
11430c253e6SAndy Whitcroft  * mem_map, we use section_mem_map to store the section's NUMA
11530c253e6SAndy Whitcroft  * node.  This keeps us from having to use another data structure.  The
11630c253e6SAndy Whitcroft  * node information is cleared just before we store the real mem_map.
11730c253e6SAndy Whitcroft  */
11830c253e6SAndy Whitcroft static inline unsigned long sparse_encode_early_nid(int nid)
11930c253e6SAndy Whitcroft {
120e0dbb2bcSMatthew Wilcox 	return ((unsigned long)nid << SECTION_NID_SHIFT);
12130c253e6SAndy Whitcroft }
12230c253e6SAndy Whitcroft 
12330c253e6SAndy Whitcroft static inline int sparse_early_nid(struct mem_section *section)
12430c253e6SAndy Whitcroft {
12530c253e6SAndy Whitcroft 	return (section->section_mem_map >> SECTION_NID_SHIFT);
12630c253e6SAndy Whitcroft }
12730c253e6SAndy Whitcroft 
1282dbb51c4SMel Gorman /* Validate the physical addressing limitations of the model */
129c7878534SMiaohe Lin static void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
1302dbb51c4SMel Gorman 						unsigned long *end_pfn)
131d41dee36SAndy Whitcroft {
1322dbb51c4SMel Gorman 	unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
133d41dee36SAndy Whitcroft 
134bead9a3aSIngo Molnar 	/*
135bead9a3aSIngo Molnar 	 * Sanity checks - do not allow an architecture to pass
136bead9a3aSIngo Molnar 	 * in larger pfns than the maximum scope of sparsemem:
137bead9a3aSIngo Molnar 	 */
1382dbb51c4SMel Gorman 	if (*start_pfn > max_sparsemem_pfn) {
1392dbb51c4SMel Gorman 		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
1402dbb51c4SMel Gorman 			"Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
1412dbb51c4SMel Gorman 			*start_pfn, *end_pfn, max_sparsemem_pfn);
1422dbb51c4SMel Gorman 		WARN_ON_ONCE(1);
1432dbb51c4SMel Gorman 		*start_pfn = max_sparsemem_pfn;
1442dbb51c4SMel Gorman 		*end_pfn = max_sparsemem_pfn;
145ef161a98SCyrill Gorcunov 	} else if (*end_pfn > max_sparsemem_pfn) {
1462dbb51c4SMel Gorman 		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
1472dbb51c4SMel Gorman 			"End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
1482dbb51c4SMel Gorman 			*start_pfn, *end_pfn, max_sparsemem_pfn);
1492dbb51c4SMel Gorman 		WARN_ON_ONCE(1);
1502dbb51c4SMel Gorman 		*end_pfn = max_sparsemem_pfn;
1512dbb51c4SMel Gorman 	}
1522dbb51c4SMel Gorman }
1532dbb51c4SMel Gorman 
154c4e1be9eSDave Hansen /*
155c4e1be9eSDave Hansen  * There are a number of times that we loop over NR_MEM_SECTIONS,
156c4e1be9eSDave Hansen  * looking for section_present() on each.  But, when we have very
157c4e1be9eSDave Hansen  * large physical address spaces, NR_MEM_SECTIONS can also be
158c4e1be9eSDave Hansen  * very large which makes the loops quite long.
159c4e1be9eSDave Hansen  *
160c4e1be9eSDave Hansen  * Keeping track of this gives us an easy way to break out of
161c4e1be9eSDave Hansen  * those loops early.
162c4e1be9eSDave Hansen  */
1632491f0a2SDavid Hildenbrand unsigned long __highest_present_section_nr;
164a1bc561bSOhhoon Kwon static void __section_mark_present(struct mem_section *ms,
165a1bc561bSOhhoon Kwon 		unsigned long section_nr)
166c4e1be9eSDave Hansen {
167c4e1be9eSDave Hansen 	if (section_nr > __highest_present_section_nr)
168c4e1be9eSDave Hansen 		__highest_present_section_nr = section_nr;
169c4e1be9eSDave Hansen 
170c4e1be9eSDave Hansen 	ms->section_mem_map |= SECTION_MARKED_PRESENT;
171c4e1be9eSDave Hansen }
172c4e1be9eSDave Hansen 
173c4e1be9eSDave Hansen #define for_each_present_section_nr(start, section_nr)		\
174c4e1be9eSDave Hansen 	for (section_nr = next_present_section_nr(start-1);	\
175d778015aSQian Cai 	     ((section_nr != -1) &&				\
176c4e1be9eSDave Hansen 	      (section_nr <= __highest_present_section_nr));	\
177c4e1be9eSDave Hansen 	     section_nr = next_present_section_nr(section_nr))
178c4e1be9eSDave Hansen 
17985c77f79SPavel Tatashin static inline unsigned long first_present_section_nr(void)
18085c77f79SPavel Tatashin {
18185c77f79SPavel Tatashin 	return next_present_section_nr(-1);
18285c77f79SPavel Tatashin }
18385c77f79SPavel Tatashin 
1840a9f9f62SBaoquan He #ifdef CONFIG_SPARSEMEM_VMEMMAP
185758b8db4SYi Wang static void subsection_mask_set(unsigned long *map, unsigned long pfn,
186f46edbd1SDan Williams 		unsigned long nr_pages)
187f46edbd1SDan Williams {
188f46edbd1SDan Williams 	int idx = subsection_map_index(pfn);
189f46edbd1SDan Williams 	int end = subsection_map_index(pfn + nr_pages - 1);
190f46edbd1SDan Williams 
191f46edbd1SDan Williams 	bitmap_set(map, idx, end - idx + 1);
192f46edbd1SDan Williams }
193f46edbd1SDan Williams 
194f46edbd1SDan Williams void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
195f46edbd1SDan Williams {
196f46edbd1SDan Williams 	int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
1979a845030SDan Williams 	unsigned long nr, start_sec = pfn_to_section_nr(pfn);
198f46edbd1SDan Williams 
199f46edbd1SDan Williams 	if (!nr_pages)
200f46edbd1SDan Williams 		return;
201f46edbd1SDan Williams 
2029a845030SDan Williams 	for (nr = start_sec; nr <= end_sec; nr++) {
203f46edbd1SDan Williams 		struct mem_section *ms;
204f46edbd1SDan Williams 		unsigned long pfns;
205f46edbd1SDan Williams 
206f46edbd1SDan Williams 		pfns = min(nr_pages, PAGES_PER_SECTION
207f46edbd1SDan Williams 				- (pfn & ~PAGE_SECTION_MASK));
2089a845030SDan Williams 		ms = __nr_to_section(nr);
209f46edbd1SDan Williams 		subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
210f46edbd1SDan Williams 
2119a845030SDan Williams 		pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
212f46edbd1SDan Williams 				pfns, subsection_map_index(pfn),
213f46edbd1SDan Williams 				subsection_map_index(pfn + pfns - 1));
214f46edbd1SDan Williams 
215f46edbd1SDan Williams 		pfn += pfns;
216f46edbd1SDan Williams 		nr_pages -= pfns;
217f46edbd1SDan Williams 	}
218f46edbd1SDan Williams }
2190a9f9f62SBaoquan He #else
2200a9f9f62SBaoquan He void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
2210a9f9f62SBaoquan He {
2220a9f9f62SBaoquan He }
2230a9f9f62SBaoquan He #endif
224f46edbd1SDan Williams 
2252dbb51c4SMel Gorman /* Record a memory area against a node. */
226c89ab04fSMike Rapoport static void __init memory_present(int nid, unsigned long start, unsigned long end)
2272dbb51c4SMel Gorman {
2282dbb51c4SMel Gorman 	unsigned long pfn;
229bead9a3aSIngo Molnar 
230629a359bSKirill A. Shutemov #ifdef CONFIG_SPARSEMEM_EXTREME
231629a359bSKirill A. Shutemov 	if (unlikely(!mem_section)) {
232629a359bSKirill A. Shutemov 		unsigned long size, align;
233629a359bSKirill A. Shutemov 
234d09cfbbfSBaoquan He 		size = sizeof(struct mem_section *) * NR_SECTION_ROOTS;
235629a359bSKirill A. Shutemov 		align = 1 << (INTERNODE_CACHE_SHIFT);
236eb31d559SMike Rapoport 		mem_section = memblock_alloc(size, align);
2378a7f97b9SMike Rapoport 		if (!mem_section)
2388a7f97b9SMike Rapoport 			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
2398a7f97b9SMike Rapoport 			      __func__, size, align);
240629a359bSKirill A. Shutemov 	}
241629a359bSKirill A. Shutemov #endif
242629a359bSKirill A. Shutemov 
243d41dee36SAndy Whitcroft 	start &= PAGE_SECTION_MASK;
2442dbb51c4SMel Gorman 	mminit_validate_memmodel_limits(&start, &end);
245d41dee36SAndy Whitcroft 	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
246d41dee36SAndy Whitcroft 		unsigned long section = pfn_to_section_nr(pfn);
247802f192eSBob Picco 		struct mem_section *ms;
248802f192eSBob Picco 
249802f192eSBob Picco 		sparse_index_init(section, nid);
25085770ffeSAndy Whitcroft 		set_section_nid(section, nid);
251802f192eSBob Picco 
252802f192eSBob Picco 		ms = __nr_to_section(section);
253c4e1be9eSDave Hansen 		if (!ms->section_mem_map) {
2542d070eabSMichal Hocko 			ms->section_mem_map = sparse_encode_early_nid(nid) |
2552d070eabSMichal Hocko 							SECTION_IS_ONLINE;
256a1bc561bSOhhoon Kwon 			__section_mark_present(ms, section);
257c4e1be9eSDave Hansen 		}
258d41dee36SAndy Whitcroft 	}
259d41dee36SAndy Whitcroft }
260d41dee36SAndy Whitcroft 
261d41dee36SAndy Whitcroft /*
262c89ab04fSMike Rapoport  * Mark all memblocks as present using memory_present().
263c89ab04fSMike Rapoport  * This is a convenience function that is useful to mark all of the systems
264c89ab04fSMike Rapoport  * memory as present during initialization.
2659def36e0SLogan Gunthorpe  */
266c89ab04fSMike Rapoport static void __init memblocks_present(void)
2679def36e0SLogan Gunthorpe {
268c9118e6cSMike Rapoport 	unsigned long start, end;
269c9118e6cSMike Rapoport 	int i, nid;
2709def36e0SLogan Gunthorpe 
271c9118e6cSMike Rapoport 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid)
272c9118e6cSMike Rapoport 		memory_present(nid, start, end);
2739def36e0SLogan Gunthorpe }
2749def36e0SLogan Gunthorpe 
2759def36e0SLogan Gunthorpe /*
27629751f69SAndy Whitcroft  * Subtle, we encode the real pfn into the mem_map such that
27729751f69SAndy Whitcroft  * the identity pfn - section_mem_map will return the actual
27829751f69SAndy Whitcroft  * physical page frame number.
27929751f69SAndy Whitcroft  */
28029751f69SAndy Whitcroft static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
28129751f69SAndy Whitcroft {
282def9b71eSPetr Tesarik 	unsigned long coded_mem_map =
283def9b71eSPetr Tesarik 		(unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
284ed7802ddSMuchun Song 	BUILD_BUG_ON(SECTION_MAP_LAST_BIT > PFN_SECTION_SHIFT);
285def9b71eSPetr Tesarik 	BUG_ON(coded_mem_map & ~SECTION_MAP_MASK);
286def9b71eSPetr Tesarik 	return coded_mem_map;
28729751f69SAndy Whitcroft }
28829751f69SAndy Whitcroft 
2893a0aaefeSDavid Hildenbrand #ifdef CONFIG_MEMORY_HOTPLUG
29029751f69SAndy Whitcroft /*
291ea01ea93SBadari Pulavarty  * Decode mem_map from the coded memmap
29229751f69SAndy Whitcroft  */
29329751f69SAndy Whitcroft struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
29429751f69SAndy Whitcroft {
295ea01ea93SBadari Pulavarty 	/* mask off the extra low bits of information */
296ea01ea93SBadari Pulavarty 	coded_mem_map &= SECTION_MAP_MASK;
29729751f69SAndy Whitcroft 	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
29829751f69SAndy Whitcroft }
2993a0aaefeSDavid Hildenbrand #endif /* CONFIG_MEMORY_HOTPLUG */
30029751f69SAndy Whitcroft 
3014e40987fSOscar Salvador static void __meminit sparse_init_one_section(struct mem_section *ms,
3025c0e3066SMel Gorman 		unsigned long pnum, struct page *mem_map,
303326e1b8fSDan Williams 		struct mem_section_usage *usage, unsigned long flags)
30429751f69SAndy Whitcroft {
30530c253e6SAndy Whitcroft 	ms->section_mem_map &= ~SECTION_MAP_MASK;
306326e1b8fSDan Williams 	ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum)
307326e1b8fSDan Williams 		| SECTION_HAS_MEM_MAP | flags;
308f1eca35aSDan Williams 	ms->usage = usage;
30929751f69SAndy Whitcroft }
31029751f69SAndy Whitcroft 
311f1eca35aSDan Williams static unsigned long usemap_size(void)
3125c0e3066SMel Gorman {
31360a7a88dSWei Yang 	return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long);
3145c0e3066SMel Gorman }
3155c0e3066SMel Gorman 
316f1eca35aSDan Williams size_t mem_section_usage_size(void)
3175c0e3066SMel Gorman {
318f1eca35aSDan Williams 	return sizeof(struct mem_section_usage) + usemap_size();
3195c0e3066SMel Gorman }
3205c0e3066SMel Gorman 
3212e126aa2SMike Rapoport #ifdef CONFIG_MEMORY_HOTREMOVE
322ccbd6283SMiles Chen static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat)
323ccbd6283SMiles Chen {
324a9ee6cf5SMike Rapoport #ifndef CONFIG_NUMA
325bdbda735SMiles Chen 	VM_BUG_ON(pgdat != &contig_page_data);
326bdbda735SMiles Chen 	return __pa_symbol(&contig_page_data);
327ccbd6283SMiles Chen #else
328ccbd6283SMiles Chen 	return __pa(pgdat);
329ccbd6283SMiles Chen #endif
330ccbd6283SMiles Chen }
331ccbd6283SMiles Chen 
332f1eca35aSDan Williams static struct mem_section_usage * __init
333a4322e1bSYinghai Lu sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
334238305bbSJohannes Weiner 					 unsigned long size)
33548c90682SYasunori Goto {
336f1eca35aSDan Williams 	struct mem_section_usage *usage;
33799ab7b19SYinghai Lu 	unsigned long goal, limit;
33899ab7b19SYinghai Lu 	int nid;
33948c90682SYasunori Goto 	/*
34048c90682SYasunori Goto 	 * A page may contain usemaps for other sections preventing the
34148c90682SYasunori Goto 	 * page being freed and making a section unremovable while
342c800bcd5SLi Zhong 	 * other sections referencing the usemap remain active. Similarly,
34348c90682SYasunori Goto 	 * a pgdat can prevent a section being removed. If section A
34448c90682SYasunori Goto 	 * contains a pgdat and section B contains the usemap, both
34548c90682SYasunori Goto 	 * sections become inter-dependent. This allocates usemaps
34648c90682SYasunori Goto 	 * from the same section as the pgdat where possible to avoid
34748c90682SYasunori Goto 	 * this problem.
34848c90682SYasunori Goto 	 */
349ccbd6283SMiles Chen 	goal = pgdat_to_phys(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
35099ab7b19SYinghai Lu 	limit = goal + (1UL << PA_SECTION_SHIFT);
35199ab7b19SYinghai Lu 	nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
35299ab7b19SYinghai Lu again:
353f1eca35aSDan Williams 	usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
354f1eca35aSDan Williams 	if (!usage && limit) {
35599ab7b19SYinghai Lu 		limit = 0;
35699ab7b19SYinghai Lu 		goto again;
35799ab7b19SYinghai Lu 	}
358f1eca35aSDan Williams 	return usage;
35948c90682SYasunori Goto }
36048c90682SYasunori Goto 
361f1eca35aSDan Williams static void __init check_usemap_section_nr(int nid,
362f1eca35aSDan Williams 		struct mem_section_usage *usage)
36348c90682SYasunori Goto {
36448c90682SYasunori Goto 	unsigned long usemap_snr, pgdat_snr;
36583e3c487SKirill A. Shutemov 	static unsigned long old_usemap_snr;
36683e3c487SKirill A. Shutemov 	static unsigned long old_pgdat_snr;
36748c90682SYasunori Goto 	struct pglist_data *pgdat = NODE_DATA(nid);
36848c90682SYasunori Goto 	int usemap_nid;
36948c90682SYasunori Goto 
37083e3c487SKirill A. Shutemov 	/* First call */
37183e3c487SKirill A. Shutemov 	if (!old_usemap_snr) {
37283e3c487SKirill A. Shutemov 		old_usemap_snr = NR_MEM_SECTIONS;
37383e3c487SKirill A. Shutemov 		old_pgdat_snr = NR_MEM_SECTIONS;
37483e3c487SKirill A. Shutemov 	}
37583e3c487SKirill A. Shutemov 
376f1eca35aSDan Williams 	usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
377ccbd6283SMiles Chen 	pgdat_snr = pfn_to_section_nr(pgdat_to_phys(pgdat) >> PAGE_SHIFT);
37848c90682SYasunori Goto 	if (usemap_snr == pgdat_snr)
37948c90682SYasunori Goto 		return;
38048c90682SYasunori Goto 
38148c90682SYasunori Goto 	if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
38248c90682SYasunori Goto 		/* skip redundant message */
38348c90682SYasunori Goto 		return;
38448c90682SYasunori Goto 
38548c90682SYasunori Goto 	old_usemap_snr = usemap_snr;
38648c90682SYasunori Goto 	old_pgdat_snr = pgdat_snr;
38748c90682SYasunori Goto 
38848c90682SYasunori Goto 	usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
38948c90682SYasunori Goto 	if (usemap_nid != nid) {
3901170532bSJoe Perches 		pr_info("node %d must be removed before remove section %ld\n",
39148c90682SYasunori Goto 			nid, usemap_snr);
39248c90682SYasunori Goto 		return;
39348c90682SYasunori Goto 	}
39448c90682SYasunori Goto 	/*
39548c90682SYasunori Goto 	 * There is a circular dependency.
39648c90682SYasunori Goto 	 * Some platforms allow un-removable section because they will just
39748c90682SYasunori Goto 	 * gather other removable sections for dynamic partitioning.
39848c90682SYasunori Goto 	 * Just notify un-removable section's number here.
39948c90682SYasunori Goto 	 */
4001170532bSJoe Perches 	pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
4011170532bSJoe Perches 		usemap_snr, pgdat_snr, nid);
40248c90682SYasunori Goto }
40348c90682SYasunori Goto #else
404f1eca35aSDan Williams static struct mem_section_usage * __init
405a4322e1bSYinghai Lu sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
406238305bbSJohannes Weiner 					 unsigned long size)
40748c90682SYasunori Goto {
40826fb3daeSMike Rapoport 	return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id);
40948c90682SYasunori Goto }
41048c90682SYasunori Goto 
411f1eca35aSDan Williams static void __init check_usemap_section_nr(int nid,
412f1eca35aSDan Williams 		struct mem_section_usage *usage)
41348c90682SYasunori Goto {
41448c90682SYasunori Goto }
41548c90682SYasunori Goto #endif /* CONFIG_MEMORY_HOTREMOVE */
41648c90682SYasunori Goto 
41735fd1eb1SPavel Tatashin #ifdef CONFIG_SPARSEMEM_VMEMMAP
418afda57bcSPavel Tatashin static unsigned long __init section_map_size(void)
41935fd1eb1SPavel Tatashin {
42035fd1eb1SPavel Tatashin 	return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
42135fd1eb1SPavel Tatashin }
42235fd1eb1SPavel Tatashin 
42335fd1eb1SPavel Tatashin #else
424afda57bcSPavel Tatashin static unsigned long __init section_map_size(void)
425e131c06bSPavel Tatashin {
426e131c06bSPavel Tatashin 	return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
427e131c06bSPavel Tatashin }
428e131c06bSPavel Tatashin 
429e9c0a3f0SDan Williams struct page __init *__populate_section_memmap(unsigned long pfn,
430e3246d8fSJoao Martins 		unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
431e3246d8fSJoao Martins 		struct dev_pagemap *pgmap)
43229751f69SAndy Whitcroft {
433e131c06bSPavel Tatashin 	unsigned long size = section_map_size();
434e131c06bSPavel Tatashin 	struct page *map = sparse_buffer_alloc(size);
4358a7f97b9SMike Rapoport 	phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
43629751f69SAndy Whitcroft 
437e131c06bSPavel Tatashin 	if (map)
438e131c06bSPavel Tatashin 		return map;
439e131c06bSPavel Tatashin 
440c803b3c8SMike Rapoport 	map = memmap_alloc(size, size, addr, nid, false);
4418a7f97b9SMike Rapoport 	if (!map)
4428a7f97b9SMike Rapoport 		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
4438a7f97b9SMike Rapoport 		      __func__, size, PAGE_SIZE, nid, &addr);
4448a7f97b9SMike Rapoport 
4458f6aac41SChristoph Lameter 	return map;
4468f6aac41SChristoph Lameter }
4478f6aac41SChristoph Lameter #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
4488f6aac41SChristoph Lameter 
44935fd1eb1SPavel Tatashin static void *sparsemap_buf __meminitdata;
45035fd1eb1SPavel Tatashin static void *sparsemap_buf_end __meminitdata;
45135fd1eb1SPavel Tatashin 
452ae831894SLecopzer Chen static inline void __meminit sparse_buffer_free(unsigned long size)
453ae831894SLecopzer Chen {
454ae831894SLecopzer Chen 	WARN_ON(!sparsemap_buf || size == 0);
4554421cca0SMike Rapoport 	memblock_free(sparsemap_buf, size);
456ae831894SLecopzer Chen }
457ae831894SLecopzer Chen 
458afda57bcSPavel Tatashin static void __init sparse_buffer_init(unsigned long size, int nid)
45935fd1eb1SPavel Tatashin {
4608a7f97b9SMike Rapoport 	phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
46135fd1eb1SPavel Tatashin 	WARN_ON(sparsemap_buf);	/* forgot to call sparse_buffer_fini()? */
46209dbcf42SMichal Hocko 	/*
46309dbcf42SMichal Hocko 	 * Pre-allocated buffer is mainly used by __populate_section_memmap
46409dbcf42SMichal Hocko 	 * and we want it to be properly aligned to the section size - this is
46509dbcf42SMichal Hocko 	 * especially the case for VMEMMAP which maps memmap to PMDs
46609dbcf42SMichal Hocko 	 */
467c803b3c8SMike Rapoport 	sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true);
46835fd1eb1SPavel Tatashin 	sparsemap_buf_end = sparsemap_buf + size;
46935fd1eb1SPavel Tatashin }
47035fd1eb1SPavel Tatashin 
471afda57bcSPavel Tatashin static void __init sparse_buffer_fini(void)
47235fd1eb1SPavel Tatashin {
47335fd1eb1SPavel Tatashin 	unsigned long size = sparsemap_buf_end - sparsemap_buf;
47435fd1eb1SPavel Tatashin 
47535fd1eb1SPavel Tatashin 	if (sparsemap_buf && size > 0)
476ae831894SLecopzer Chen 		sparse_buffer_free(size);
47735fd1eb1SPavel Tatashin 	sparsemap_buf = NULL;
47835fd1eb1SPavel Tatashin }
47935fd1eb1SPavel Tatashin 
48035fd1eb1SPavel Tatashin void * __meminit sparse_buffer_alloc(unsigned long size)
48135fd1eb1SPavel Tatashin {
48235fd1eb1SPavel Tatashin 	void *ptr = NULL;
48335fd1eb1SPavel Tatashin 
48435fd1eb1SPavel Tatashin 	if (sparsemap_buf) {
485db57e98dSLecopzer Chen 		ptr = (void *) roundup((unsigned long)sparsemap_buf, size);
48635fd1eb1SPavel Tatashin 		if (ptr + size > sparsemap_buf_end)
48735fd1eb1SPavel Tatashin 			ptr = NULL;
488ae831894SLecopzer Chen 		else {
489ae831894SLecopzer Chen 			/* Free redundant aligned space */
490ae831894SLecopzer Chen 			if ((unsigned long)(ptr - sparsemap_buf) > 0)
491ae831894SLecopzer Chen 				sparse_buffer_free((unsigned long)(ptr - sparsemap_buf));
49235fd1eb1SPavel Tatashin 			sparsemap_buf = ptr + size;
49335fd1eb1SPavel Tatashin 		}
494ae831894SLecopzer Chen 	}
49535fd1eb1SPavel Tatashin 	return ptr;
49635fd1eb1SPavel Tatashin }
49735fd1eb1SPavel Tatashin 
4983b32123dSGideon Israel Dsouza void __weak __meminit vmemmap_populate_print_last(void)
499c2b91e2eSYinghai Lu {
500c2b91e2eSYinghai Lu }
501a4322e1bSYinghai Lu 
50285c77f79SPavel Tatashin /*
50385c77f79SPavel Tatashin  * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end)
50485c77f79SPavel Tatashin  * And number of present sections in this node is map_count.
50585c77f79SPavel Tatashin  */
50685c77f79SPavel Tatashin static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
50785c77f79SPavel Tatashin 				   unsigned long pnum_end,
50885c77f79SPavel Tatashin 				   unsigned long map_count)
50985c77f79SPavel Tatashin {
510f1eca35aSDan Williams 	struct mem_section_usage *usage;
511f1eca35aSDan Williams 	unsigned long pnum;
51285c77f79SPavel Tatashin 	struct page *map;
51385c77f79SPavel Tatashin 
514f1eca35aSDan Williams 	usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid),
515f1eca35aSDan Williams 			mem_section_usage_size() * map_count);
516f1eca35aSDan Williams 	if (!usage) {
51785c77f79SPavel Tatashin 		pr_err("%s: node[%d] usemap allocation failed", __func__, nid);
51885c77f79SPavel Tatashin 		goto failed;
51985c77f79SPavel Tatashin 	}
52085c77f79SPavel Tatashin 	sparse_buffer_init(map_count * section_map_size(), nid);
52185c77f79SPavel Tatashin 	for_each_present_section_nr(pnum_begin, pnum) {
522e9c0a3f0SDan Williams 		unsigned long pfn = section_nr_to_pfn(pnum);
523e9c0a3f0SDan Williams 
52485c77f79SPavel Tatashin 		if (pnum >= pnum_end)
52585c77f79SPavel Tatashin 			break;
52685c77f79SPavel Tatashin 
527e9c0a3f0SDan Williams 		map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
528e3246d8fSJoao Martins 				nid, NULL, NULL);
52985c77f79SPavel Tatashin 		if (!map) {
53085c77f79SPavel Tatashin 			pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
53185c77f79SPavel Tatashin 			       __func__, nid);
53285c77f79SPavel Tatashin 			pnum_begin = pnum;
5332284f47fSWang Wensheng 			sparse_buffer_fini();
53485c77f79SPavel Tatashin 			goto failed;
53585c77f79SPavel Tatashin 		}
536f1eca35aSDan Williams 		check_usemap_section_nr(nid, usage);
537326e1b8fSDan Williams 		sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage,
538326e1b8fSDan Williams 				SECTION_IS_EARLY);
539f1eca35aSDan Williams 		usage = (void *) usage + mem_section_usage_size();
54085c77f79SPavel Tatashin 	}
54185c77f79SPavel Tatashin 	sparse_buffer_fini();
54285c77f79SPavel Tatashin 	return;
54385c77f79SPavel Tatashin failed:
54485c77f79SPavel Tatashin 	/* We failed to allocate, mark all the following pnums as not present */
54585c77f79SPavel Tatashin 	for_each_present_section_nr(pnum_begin, pnum) {
54685c77f79SPavel Tatashin 		struct mem_section *ms;
54785c77f79SPavel Tatashin 
54885c77f79SPavel Tatashin 		if (pnum >= pnum_end)
54985c77f79SPavel Tatashin 			break;
55085c77f79SPavel Tatashin 		ms = __nr_to_section(pnum);
55185c77f79SPavel Tatashin 		ms->section_mem_map = 0;
55285c77f79SPavel Tatashin 	}
55385c77f79SPavel Tatashin }
55485c77f79SPavel Tatashin 
55585c77f79SPavel Tatashin /*
55685c77f79SPavel Tatashin  * Allocate the accumulated non-linear sections, allocate a mem_map
55785c77f79SPavel Tatashin  * for each and record the physical to section mapping.
55885c77f79SPavel Tatashin  */
5592a3cb8baSPavel Tatashin void __init sparse_init(void)
56085c77f79SPavel Tatashin {
561c89ab04fSMike Rapoport 	unsigned long pnum_end, pnum_begin, map_count = 1;
562c89ab04fSMike Rapoport 	int nid_begin;
563c89ab04fSMike Rapoport 
564c89ab04fSMike Rapoport 	memblocks_present();
565c89ab04fSMike Rapoport 
566c89ab04fSMike Rapoport 	pnum_begin = first_present_section_nr();
567c89ab04fSMike Rapoport 	nid_begin = sparse_early_nid(__nr_to_section(pnum_begin));
56885c77f79SPavel Tatashin 
56985c77f79SPavel Tatashin 	/* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
57085c77f79SPavel Tatashin 	set_pageblock_order();
57185c77f79SPavel Tatashin 
57285c77f79SPavel Tatashin 	for_each_present_section_nr(pnum_begin + 1, pnum_end) {
57385c77f79SPavel Tatashin 		int nid = sparse_early_nid(__nr_to_section(pnum_end));
57485c77f79SPavel Tatashin 
57585c77f79SPavel Tatashin 		if (nid == nid_begin) {
57685c77f79SPavel Tatashin 			map_count++;
57785c77f79SPavel Tatashin 			continue;
57885c77f79SPavel Tatashin 		}
57985c77f79SPavel Tatashin 		/* Init node with sections in range [pnum_begin, pnum_end) */
58085c77f79SPavel Tatashin 		sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
58185c77f79SPavel Tatashin 		nid_begin = nid;
58285c77f79SPavel Tatashin 		pnum_begin = pnum_end;
58385c77f79SPavel Tatashin 		map_count = 1;
58485c77f79SPavel Tatashin 	}
58585c77f79SPavel Tatashin 	/* cover the last node */
58685c77f79SPavel Tatashin 	sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
58785c77f79SPavel Tatashin 	vmemmap_populate_print_last();
58885c77f79SPavel Tatashin }
58985c77f79SPavel Tatashin 
590193faea9SStephen Rothwell #ifdef CONFIG_MEMORY_HOTPLUG
5912d070eabSMichal Hocko 
5922d070eabSMichal Hocko /* Mark all memory sections within the pfn range as online */
5932d070eabSMichal Hocko void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
5942d070eabSMichal Hocko {
5952d070eabSMichal Hocko 	unsigned long pfn;
5962d070eabSMichal Hocko 
5972d070eabSMichal Hocko 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
598b4ccec41SMichal Hocko 		unsigned long section_nr = pfn_to_section_nr(pfn);
5992d070eabSMichal Hocko 		struct mem_section *ms;
6002d070eabSMichal Hocko 
6012d070eabSMichal Hocko 		/* onlining code should never touch invalid ranges */
6022d070eabSMichal Hocko 		if (WARN_ON(!valid_section_nr(section_nr)))
6032d070eabSMichal Hocko 			continue;
6042d070eabSMichal Hocko 
6052d070eabSMichal Hocko 		ms = __nr_to_section(section_nr);
6062d070eabSMichal Hocko 		ms->section_mem_map |= SECTION_IS_ONLINE;
6072d070eabSMichal Hocko 	}
6082d070eabSMichal Hocko }
6092d070eabSMichal Hocko 
6109b7ea46aSQian Cai /* Mark all memory sections within the pfn range as offline */
6112d070eabSMichal Hocko void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
6122d070eabSMichal Hocko {
6132d070eabSMichal Hocko 	unsigned long pfn;
6142d070eabSMichal Hocko 
6152d070eabSMichal Hocko 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
61627227c73SPavel Tatashin 		unsigned long section_nr = pfn_to_section_nr(pfn);
6172d070eabSMichal Hocko 		struct mem_section *ms;
6182d070eabSMichal Hocko 
6192d070eabSMichal Hocko 		/*
6202d070eabSMichal Hocko 		 * TODO this needs some double checking. Offlining code makes
6212d070eabSMichal Hocko 		 * sure to check pfn_valid but those checks might be just bogus
6222d070eabSMichal Hocko 		 */
6232d070eabSMichal Hocko 		if (WARN_ON(!valid_section_nr(section_nr)))
6242d070eabSMichal Hocko 			continue;
6252d070eabSMichal Hocko 
6262d070eabSMichal Hocko 		ms = __nr_to_section(section_nr);
6272d070eabSMichal Hocko 		ms->section_mem_map &= ~SECTION_IS_ONLINE;
6282d070eabSMichal Hocko 	}
6292d070eabSMichal Hocko }
6302d070eabSMichal Hocko 
63198f3cfc1SYasunori Goto #ifdef CONFIG_SPARSEMEM_VMEMMAP
632030eab4fSIlya Leoshkevich static struct page * __meminit populate_section_memmap(unsigned long pfn,
633e3246d8fSJoao Martins 		unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
634e3246d8fSJoao Martins 		struct dev_pagemap *pgmap)
63598f3cfc1SYasunori Goto {
636e3246d8fSJoao Martins 	return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
63798f3cfc1SYasunori Goto }
638e9c0a3f0SDan Williams 
639e9c0a3f0SDan Williams static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
64024b6d416SChristoph Hellwig 		struct vmem_altmap *altmap)
64198f3cfc1SYasunori Goto {
642e9c0a3f0SDan Williams 	unsigned long start = (unsigned long) pfn_to_page(pfn);
643e9c0a3f0SDan Williams 	unsigned long end = start + nr_pages * sizeof(struct page);
6440aad818bSJohannes Weiner 
64524b6d416SChristoph Hellwig 	vmemmap_free(start, end, altmap);
64698f3cfc1SYasunori Goto }
64781556b02SZhang Yanfei static void free_map_bootmem(struct page *memmap)
6480c0a4a51SYasunori Goto {
6490aad818bSJohannes Weiner 	unsigned long start = (unsigned long)memmap;
65081556b02SZhang Yanfei 	unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
6510aad818bSJohannes Weiner 
65224b6d416SChristoph Hellwig 	vmemmap_free(start, end, NULL);
6530c0a4a51SYasunori Goto }
6546ecb0fc6SBaoquan He 
6556ecb0fc6SBaoquan He static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
6566ecb0fc6SBaoquan He {
6576ecb0fc6SBaoquan He 	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
6586ecb0fc6SBaoquan He 	DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
6596ecb0fc6SBaoquan He 	struct mem_section *ms = __pfn_to_section(pfn);
6606ecb0fc6SBaoquan He 	unsigned long *subsection_map = ms->usage
6616ecb0fc6SBaoquan He 		? &ms->usage->subsection_map[0] : NULL;
6626ecb0fc6SBaoquan He 
6636ecb0fc6SBaoquan He 	subsection_mask_set(map, pfn, nr_pages);
6646ecb0fc6SBaoquan He 	if (subsection_map)
6656ecb0fc6SBaoquan He 		bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
6666ecb0fc6SBaoquan He 
6676ecb0fc6SBaoquan He 	if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
6686ecb0fc6SBaoquan He 				"section already deactivated (%#lx + %ld)\n",
6696ecb0fc6SBaoquan He 				pfn, nr_pages))
6706ecb0fc6SBaoquan He 		return -EINVAL;
6716ecb0fc6SBaoquan He 
6726ecb0fc6SBaoquan He 	bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
6736ecb0fc6SBaoquan He 	return 0;
6746ecb0fc6SBaoquan He }
6756ecb0fc6SBaoquan He 
6766ecb0fc6SBaoquan He static bool is_subsection_map_empty(struct mem_section *ms)
6776ecb0fc6SBaoquan He {
6786ecb0fc6SBaoquan He 	return bitmap_empty(&ms->usage->subsection_map[0],
6796ecb0fc6SBaoquan He 			    SUBSECTIONS_PER_SECTION);
6806ecb0fc6SBaoquan He }
6816ecb0fc6SBaoquan He 
6826ecb0fc6SBaoquan He static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
6836ecb0fc6SBaoquan He {
6846ecb0fc6SBaoquan He 	struct mem_section *ms = __pfn_to_section(pfn);
6856ecb0fc6SBaoquan He 	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
6866ecb0fc6SBaoquan He 	unsigned long *subsection_map;
6876ecb0fc6SBaoquan He 	int rc = 0;
6886ecb0fc6SBaoquan He 
6896ecb0fc6SBaoquan He 	subsection_mask_set(map, pfn, nr_pages);
6906ecb0fc6SBaoquan He 
6916ecb0fc6SBaoquan He 	subsection_map = &ms->usage->subsection_map[0];
6926ecb0fc6SBaoquan He 
6936ecb0fc6SBaoquan He 	if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
6946ecb0fc6SBaoquan He 		rc = -EINVAL;
6956ecb0fc6SBaoquan He 	else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
6966ecb0fc6SBaoquan He 		rc = -EEXIST;
6976ecb0fc6SBaoquan He 	else
6986ecb0fc6SBaoquan He 		bitmap_or(subsection_map, map, subsection_map,
6996ecb0fc6SBaoquan He 				SUBSECTIONS_PER_SECTION);
7006ecb0fc6SBaoquan He 
7016ecb0fc6SBaoquan He 	return rc;
7026ecb0fc6SBaoquan He }
70398f3cfc1SYasunori Goto #else
704030eab4fSIlya Leoshkevich struct page * __meminit populate_section_memmap(unsigned long pfn,
705e3246d8fSJoao Martins 		unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
706e3246d8fSJoao Martins 		struct dev_pagemap *pgmap)
7070b0acbecSDave Hansen {
7084027149aSBaoquan He 	return kvmalloc_node(array_size(sizeof(struct page),
7094027149aSBaoquan He 					PAGES_PER_SECTION), GFP_KERNEL, nid);
7100b0acbecSDave Hansen }
7110b0acbecSDave Hansen 
712e9c0a3f0SDan Williams static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
7137b73d978SChristoph Hellwig 		struct vmem_altmap *altmap)
71498f3cfc1SYasunori Goto {
7153af776f6SBaoquan He 	kvfree(pfn_to_page(pfn));
7160b0acbecSDave Hansen }
7170c0a4a51SYasunori Goto 
71881556b02SZhang Yanfei static void free_map_bootmem(struct page *memmap)
7190c0a4a51SYasunori Goto {
7200c0a4a51SYasunori Goto 	unsigned long maps_section_nr, removing_section_nr, i;
72181556b02SZhang Yanfei 	unsigned long magic, nr_pages;
722ae64ffcaSJianguo Wu 	struct page *page = virt_to_page(memmap);
7230c0a4a51SYasunori Goto 
72481556b02SZhang Yanfei 	nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
72581556b02SZhang Yanfei 		>> PAGE_SHIFT;
72681556b02SZhang Yanfei 
7270c0a4a51SYasunori Goto 	for (i = 0; i < nr_pages; i++, page++) {
728c5e97ed1SMatthew Wilcox (Oracle) 		magic = page->index;
7290c0a4a51SYasunori Goto 
7300c0a4a51SYasunori Goto 		BUG_ON(magic == NODE_INFO);
7310c0a4a51SYasunori Goto 
7320c0a4a51SYasunori Goto 		maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
733857e522aSYasuaki Ishimatsu 		removing_section_nr = page_private(page);
7340c0a4a51SYasunori Goto 
7350c0a4a51SYasunori Goto 		/*
7360c0a4a51SYasunori Goto 		 * When this function is called, the removing section is
7370c0a4a51SYasunori Goto 		 * logical offlined state. This means all pages are isolated
7380c0a4a51SYasunori Goto 		 * from page allocator. If removing section's memmap is placed
7390c0a4a51SYasunori Goto 		 * on the same section, it must not be freed.
7400c0a4a51SYasunori Goto 		 * If it is freed, page allocator may allocate it which will
7410c0a4a51SYasunori Goto 		 * be removed physically soon.
7420c0a4a51SYasunori Goto 		 */
7430c0a4a51SYasunori Goto 		if (maps_section_nr != removing_section_nr)
7440c0a4a51SYasunori Goto 			put_page_bootmem(page);
7450c0a4a51SYasunori Goto 	}
7460c0a4a51SYasunori Goto }
7470b0acbecSDave Hansen 
7480a9f9f62SBaoquan He static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
7490a9f9f62SBaoquan He {
7500a9f9f62SBaoquan He 	return 0;
7510a9f9f62SBaoquan He }
7520a9f9f62SBaoquan He 
7530a9f9f62SBaoquan He static bool is_subsection_map_empty(struct mem_section *ms)
7540a9f9f62SBaoquan He {
7550a9f9f62SBaoquan He 	return true;
7560a9f9f62SBaoquan He }
7576ecb0fc6SBaoquan He 
7586ecb0fc6SBaoquan He static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
7596ecb0fc6SBaoquan He {
7606ecb0fc6SBaoquan He 	return 0;
7616ecb0fc6SBaoquan He }
7626ecb0fc6SBaoquan He #endif /* CONFIG_SPARSEMEM_VMEMMAP */
76337bc1502SBaoquan He 
76495a5a34dSBaoquan He /*
76595a5a34dSBaoquan He  * To deactivate a memory region, there are 3 cases to handle across
76695a5a34dSBaoquan He  * two configurations (SPARSEMEM_VMEMMAP={y,n}):
76795a5a34dSBaoquan He  *
76895a5a34dSBaoquan He  * 1. deactivation of a partial hot-added section (only possible in
76995a5a34dSBaoquan He  *    the SPARSEMEM_VMEMMAP=y case).
77095a5a34dSBaoquan He  *      a) section was present at memory init.
77195a5a34dSBaoquan He  *      b) section was hot-added post memory init.
77295a5a34dSBaoquan He  * 2. deactivation of a complete hot-added section.
77395a5a34dSBaoquan He  * 3. deactivation of a complete section from memory init.
77495a5a34dSBaoquan He  *
77595a5a34dSBaoquan He  * For 1, when subsection_map does not empty we will not be freeing the
77695a5a34dSBaoquan He  * usage map, but still need to free the vmemmap range.
77795a5a34dSBaoquan He  *
77895a5a34dSBaoquan He  * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified
77995a5a34dSBaoquan He  */
78037bc1502SBaoquan He static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
78137bc1502SBaoquan He 		struct vmem_altmap *altmap)
78237bc1502SBaoquan He {
78337bc1502SBaoquan He 	struct mem_section *ms = __pfn_to_section(pfn);
78437bc1502SBaoquan He 	bool section_is_early = early_section(ms);
78537bc1502SBaoquan He 	struct page *memmap = NULL;
78637bc1502SBaoquan He 	bool empty;
78737bc1502SBaoquan He 
78837bc1502SBaoquan He 	if (clear_subsection_map(pfn, nr_pages))
78937bc1502SBaoquan He 		return;
79095a5a34dSBaoquan He 
79137bc1502SBaoquan He 	empty = is_subsection_map_empty(ms);
792d41e2f3bSBaoquan He 	if (empty) {
793ba72b4c8SDan Williams 		unsigned long section_nr = pfn_to_section_nr(pfn);
794ba72b4c8SDan Williams 
7958068df3bSDavid Hildenbrand 		/*
7968068df3bSDavid Hildenbrand 		 * When removing an early section, the usage map is kept (as the
7978068df3bSDavid Hildenbrand 		 * usage maps of other sections fall into the same page). It
7988068df3bSDavid Hildenbrand 		 * will be re-used when re-adding the section - which is then no
7998068df3bSDavid Hildenbrand 		 * longer an early section. If the usage map is PageReserved, it
8008068df3bSDavid Hildenbrand 		 * was allocated during boot.
8018068df3bSDavid Hildenbrand 		 */
8028068df3bSDavid Hildenbrand 		if (!PageReserved(virt_to_page(ms->usage))) {
803ba72b4c8SDan Williams 			kfree(ms->usage);
804ba72b4c8SDan Williams 			ms->usage = NULL;
805ba72b4c8SDan Williams 		}
806ba72b4c8SDan Williams 		memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
807b943f045SAneesh Kumar K.V 		/*
808b943f045SAneesh Kumar K.V 		 * Mark the section invalid so that valid_section()
809b943f045SAneesh Kumar K.V 		 * return false. This prevents code from dereferencing
810b943f045SAneesh Kumar K.V 		 * ms->usage array.
811b943f045SAneesh Kumar K.V 		 */
812b943f045SAneesh Kumar K.V 		ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
813ba72b4c8SDan Williams 	}
814ba72b4c8SDan Williams 
815ef69bc9fSWei Yang 	/*
816ef69bc9fSWei Yang 	 * The memmap of early sections is always fully populated. See
817ef69bc9fSWei Yang 	 * section_activate() and pfn_valid() .
818ef69bc9fSWei Yang 	 */
819ef69bc9fSWei Yang 	if (!section_is_early)
820ba72b4c8SDan Williams 		depopulate_section_memmap(pfn, nr_pages, altmap);
821ef69bc9fSWei Yang 	else if (memmap)
822ef69bc9fSWei Yang 		free_map_bootmem(memmap);
823d41e2f3bSBaoquan He 
824d41e2f3bSBaoquan He 	if (empty)
825d41e2f3bSBaoquan He 		ms->section_mem_map = (unsigned long)NULL;
826ba72b4c8SDan Williams }
827ba72b4c8SDan Williams 
8285d87255cSBaoquan He static struct page * __meminit section_activate(int nid, unsigned long pfn,
829e3246d8fSJoao Martins 		unsigned long nr_pages, struct vmem_altmap *altmap,
830e3246d8fSJoao Martins 		struct dev_pagemap *pgmap)
8315d87255cSBaoquan He {
8325d87255cSBaoquan He 	struct mem_section *ms = __pfn_to_section(pfn);
8335d87255cSBaoquan He 	struct mem_section_usage *usage = NULL;
8345d87255cSBaoquan He 	struct page *memmap;
835*f0ca8c25SLi zeming 	int rc;
8365d87255cSBaoquan He 
8375d87255cSBaoquan He 	if (!ms->usage) {
8385d87255cSBaoquan He 		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
8395d87255cSBaoquan He 		if (!usage)
8405d87255cSBaoquan He 			return ERR_PTR(-ENOMEM);
8415d87255cSBaoquan He 		ms->usage = usage;
8425d87255cSBaoquan He 	}
8435d87255cSBaoquan He 
8445d87255cSBaoquan He 	rc = fill_subsection_map(pfn, nr_pages);
845ba72b4c8SDan Williams 	if (rc) {
846ba72b4c8SDan Williams 		if (usage)
847ba72b4c8SDan Williams 			ms->usage = NULL;
848ba72b4c8SDan Williams 		kfree(usage);
849ba72b4c8SDan Williams 		return ERR_PTR(rc);
850ba72b4c8SDan Williams 	}
851ba72b4c8SDan Williams 
852ba72b4c8SDan Williams 	/*
853ba72b4c8SDan Williams 	 * The early init code does not consider partially populated
854ba72b4c8SDan Williams 	 * initial sections, it simply assumes that memory will never be
855ba72b4c8SDan Williams 	 * referenced.  If we hot-add memory into such a section then we
856ba72b4c8SDan Williams 	 * do not need to populate the memmap and can simply reuse what
857ba72b4c8SDan Williams 	 * is already there.
858ba72b4c8SDan Williams 	 */
859ba72b4c8SDan Williams 	if (nr_pages < PAGES_PER_SECTION && early_section(ms))
860ba72b4c8SDan Williams 		return pfn_to_page(pfn);
861ba72b4c8SDan Williams 
862e3246d8fSJoao Martins 	memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
863ba72b4c8SDan Williams 	if (!memmap) {
864ba72b4c8SDan Williams 		section_deactivate(pfn, nr_pages, altmap);
865ba72b4c8SDan Williams 		return ERR_PTR(-ENOMEM);
866ba72b4c8SDan Williams 	}
867ba72b4c8SDan Williams 
868ba72b4c8SDan Williams 	return memmap;
869ba72b4c8SDan Williams }
870ba72b4c8SDan Williams 
8717567cfc5SBaoquan He /**
872ba72b4c8SDan Williams  * sparse_add_section - add a memory section, or populate an existing one
8737567cfc5SBaoquan He  * @nid: The node to add section on
8747567cfc5SBaoquan He  * @start_pfn: start pfn of the memory range
875ba72b4c8SDan Williams  * @nr_pages: number of pfns to add in the section
876e3246d8fSJoao Martins  * @altmap: alternate pfns to allocate the memmap backing store
877e3246d8fSJoao Martins  * @pgmap: alternate compound page geometry for devmap mappings
8787567cfc5SBaoquan He  *
8797567cfc5SBaoquan He  * This is only intended for hotplug.
8807567cfc5SBaoquan He  *
88195a5a34dSBaoquan He  * Note that only VMEMMAP supports sub-section aligned hotplug,
88295a5a34dSBaoquan He  * the proper alignment and size are gated by check_pfn_span().
88395a5a34dSBaoquan He  *
88495a5a34dSBaoquan He  *
8857567cfc5SBaoquan He  * Return:
8867567cfc5SBaoquan He  * * 0		- On success.
8877567cfc5SBaoquan He  * * -EEXIST	- Section has been present.
8887567cfc5SBaoquan He  * * -ENOMEM	- Out of memory.
889d41dee36SAndy Whitcroft  */
8907ea62160SDan Williams int __meminit sparse_add_section(int nid, unsigned long start_pfn,
891e3246d8fSJoao Martins 		unsigned long nr_pages, struct vmem_altmap *altmap,
892e3246d8fSJoao Martins 		struct dev_pagemap *pgmap)
89329751f69SAndy Whitcroft {
8940b0acbecSDave Hansen 	unsigned long section_nr = pfn_to_section_nr(start_pfn);
8950b0acbecSDave Hansen 	struct mem_section *ms;
8960b0acbecSDave Hansen 	struct page *memmap;
8970b0acbecSDave Hansen 	int ret;
89829751f69SAndy Whitcroft 
8994e0d2e7eSWei Yang 	ret = sparse_index_init(section_nr, nid);
900ba72b4c8SDan Williams 	if (ret < 0)
901bbd06825SWANG Cong 		return ret;
90229751f69SAndy Whitcroft 
903e3246d8fSJoao Martins 	memmap = section_activate(nid, start_pfn, nr_pages, altmap, pgmap);
904ba72b4c8SDan Williams 	if (IS_ERR(memmap))
905ba72b4c8SDan Williams 		return PTR_ERR(memmap);
9065c0e3066SMel Gorman 
907d0dc12e8SPavel Tatashin 	/*
908d0dc12e8SPavel Tatashin 	 * Poison uninitialized struct pages in order to catch invalid flags
909d0dc12e8SPavel Tatashin 	 * combinations.
910d0dc12e8SPavel Tatashin 	 */
91118e19f19SWei Yang 	page_init_poison(memmap, sizeof(struct page) * nr_pages);
9123ac19f8eSWen Congyang 
913c1cbc3eeSWei Yang 	ms = __nr_to_section(section_nr);
91426f26bedSWei Yang 	set_section_nid(section_nr, nid);
915a1bc561bSOhhoon Kwon 	__section_mark_present(ms, section_nr);
9160b0acbecSDave Hansen 
917ba72b4c8SDan Williams 	/* Align memmap to section boundary in the subsection case */
918ba72b4c8SDan Williams 	if (section_nr_to_pfn(section_nr) != start_pfn)
9194627d76dSWei Yang 		memmap = pfn_to_page(section_nr_to_pfn(section_nr));
920ba72b4c8SDan Williams 	sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
921ba72b4c8SDan Williams 
922ba72b4c8SDan Williams 	return 0;
923d41dee36SAndy Whitcroft }
924ea01ea93SBadari Pulavarty 
925ba72b4c8SDan Williams void sparse_remove_section(struct mem_section *ms, unsigned long pfn,
9267ea62160SDan Williams 		unsigned long nr_pages, unsigned long map_offset,
927b9bf8d34SDavid Hildenbrand 		struct vmem_altmap *altmap)
928ea01ea93SBadari Pulavarty {
929ba72b4c8SDan Williams 	section_deactivate(pfn, nr_pages, altmap);
930ea01ea93SBadari Pulavarty }
9314edd7cefSDavid Rientjes #endif /* CONFIG_MEMORY_HOTPLUG */
932