xref: /linux/mm/sparse.c (revision 2284f47fe9fe2ed2ef619e5474e155cfeeebd569)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2d41dee36SAndy Whitcroft /*
3d41dee36SAndy Whitcroft  * sparse memory mappings.
4d41dee36SAndy Whitcroft  */
5d41dee36SAndy Whitcroft #include <linux/mm.h>
65a0e3ad6STejun Heo #include <linux/slab.h>
7d41dee36SAndy Whitcroft #include <linux/mmzone.h>
897ad1087SMike Rapoport #include <linux/memblock.h>
93b32123dSGideon Israel Dsouza #include <linux/compiler.h>
100b0acbecSDave Hansen #include <linux/highmem.h>
11b95f1b31SPaul Gortmaker #include <linux/export.h>
1228ae55c9SDave Hansen #include <linux/spinlock.h>
130b0acbecSDave Hansen #include <linux/vmalloc.h>
149f82883cSAlastair D'Silva #include <linux/swap.h>
159f82883cSAlastair D'Silva #include <linux/swapops.h>
163b32123dSGideon Israel Dsouza 
170c0a4a51SYasunori Goto #include "internal.h"
18d41dee36SAndy Whitcroft #include <asm/dma.h>
19d41dee36SAndy Whitcroft 
20d41dee36SAndy Whitcroft /*
21d41dee36SAndy Whitcroft  * Permanent SPARSEMEM data:
22d41dee36SAndy Whitcroft  *
23d41dee36SAndy Whitcroft  * 1) mem_section	- memory sections, mem_map's for valid memory
24d41dee36SAndy Whitcroft  */
253e347261SBob Picco #ifdef CONFIG_SPARSEMEM_EXTREME
2683e3c487SKirill A. Shutemov struct mem_section **mem_section;
273e347261SBob Picco #else
283e347261SBob Picco struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
2922fc6eccSRavikiran G Thirumalai 	____cacheline_internodealigned_in_smp;
303e347261SBob Picco #endif
313e347261SBob Picco EXPORT_SYMBOL(mem_section);
323e347261SBob Picco 
3389689ae7SChristoph Lameter #ifdef NODE_NOT_IN_PAGE_FLAGS
3489689ae7SChristoph Lameter /*
3589689ae7SChristoph Lameter  * If we did not store the node number in the page then we have to
3689689ae7SChristoph Lameter  * do a lookup in the section_to_node_table in order to find which
3789689ae7SChristoph Lameter  * node the page belongs to.
3889689ae7SChristoph Lameter  */
3989689ae7SChristoph Lameter #if MAX_NUMNODES <= 256
4089689ae7SChristoph Lameter static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
4189689ae7SChristoph Lameter #else
4289689ae7SChristoph Lameter static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
4389689ae7SChristoph Lameter #endif
4489689ae7SChristoph Lameter 
4533dd4e0eSIan Campbell int page_to_nid(const struct page *page)
4689689ae7SChristoph Lameter {
4789689ae7SChristoph Lameter 	return section_to_node_table[page_to_section(page)];
4889689ae7SChristoph Lameter }
4989689ae7SChristoph Lameter EXPORT_SYMBOL(page_to_nid);
5085770ffeSAndy Whitcroft 
5185770ffeSAndy Whitcroft static void set_section_nid(unsigned long section_nr, int nid)
5285770ffeSAndy Whitcroft {
5385770ffeSAndy Whitcroft 	section_to_node_table[section_nr] = nid;
5485770ffeSAndy Whitcroft }
5585770ffeSAndy Whitcroft #else /* !NODE_NOT_IN_PAGE_FLAGS */
5685770ffeSAndy Whitcroft static inline void set_section_nid(unsigned long section_nr, int nid)
5785770ffeSAndy Whitcroft {
5885770ffeSAndy Whitcroft }
5989689ae7SChristoph Lameter #endif
6089689ae7SChristoph Lameter 
613e347261SBob Picco #ifdef CONFIG_SPARSEMEM_EXTREME
62bd721ea7SFabian Frederick static noinline struct mem_section __ref *sparse_index_alloc(int nid)
63802f192eSBob Picco {
6428ae55c9SDave Hansen 	struct mem_section *section = NULL;
6528ae55c9SDave Hansen 	unsigned long array_size = SECTIONS_PER_ROOT *
6628ae55c9SDave Hansen 				   sizeof(struct mem_section);
67802f192eSBob Picco 
688a7f97b9SMike Rapoport 	if (slab_is_available()) {
695b760e64SGavin Shan 		section = kzalloc_node(array_size, GFP_KERNEL, nid);
708a7f97b9SMike Rapoport 	} else {
717e1c4e27SMike Rapoport 		section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
727e1c4e27SMike Rapoport 					      nid);
738a7f97b9SMike Rapoport 		if (!section)
748a7f97b9SMike Rapoport 			panic("%s: Failed to allocate %lu bytes nid=%d\n",
758a7f97b9SMike Rapoport 			      __func__, array_size, nid);
768a7f97b9SMike Rapoport 	}
773e347261SBob Picco 
7828ae55c9SDave Hansen 	return section;
79802f192eSBob Picco }
8028ae55c9SDave Hansen 
81a3142c8eSYasunori Goto static int __meminit sparse_index_init(unsigned long section_nr, int nid)
8228ae55c9SDave Hansen {
8328ae55c9SDave Hansen 	unsigned long root = SECTION_NR_TO_ROOT(section_nr);
8428ae55c9SDave Hansen 	struct mem_section *section;
8528ae55c9SDave Hansen 
86ba72b4c8SDan Williams 	/*
87ba72b4c8SDan Williams 	 * An existing section is possible in the sub-section hotplug
88ba72b4c8SDan Williams 	 * case. First hot-add instantiates, follow-on hot-add reuses
89ba72b4c8SDan Williams 	 * the existing section.
90ba72b4c8SDan Williams 	 *
91ba72b4c8SDan Williams 	 * The mem_hotplug_lock resolves the apparent race below.
92ba72b4c8SDan Williams 	 */
9328ae55c9SDave Hansen 	if (mem_section[root])
94ba72b4c8SDan Williams 		return 0;
9528ae55c9SDave Hansen 
9628ae55c9SDave Hansen 	section = sparse_index_alloc(nid);
97af0cd5a7SWANG Cong 	if (!section)
98af0cd5a7SWANG Cong 		return -ENOMEM;
9928ae55c9SDave Hansen 
10028ae55c9SDave Hansen 	mem_section[root] = section;
101c1c95183SGavin Shan 
1029d1936cfSZhang Yanfei 	return 0;
10328ae55c9SDave Hansen }
10428ae55c9SDave Hansen #else /* !SPARSEMEM_EXTREME */
10528ae55c9SDave Hansen static inline int sparse_index_init(unsigned long section_nr, int nid)
10628ae55c9SDave Hansen {
10728ae55c9SDave Hansen 	return 0;
10828ae55c9SDave Hansen }
10928ae55c9SDave Hansen #endif
11028ae55c9SDave Hansen 
11191fd8b95SZhou Chengming #ifdef CONFIG_SPARSEMEM_EXTREME
1122491f0a2SDavid Hildenbrand unsigned long __section_nr(struct mem_section *ms)
1134ca644d9SDave Hansen {
1144ca644d9SDave Hansen 	unsigned long root_nr;
11583e3c487SKirill A. Shutemov 	struct mem_section *root = NULL;
1164ca644d9SDave Hansen 
11712783b00SMike Kravetz 	for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
11812783b00SMike Kravetz 		root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
1194ca644d9SDave Hansen 		if (!root)
1204ca644d9SDave Hansen 			continue;
1214ca644d9SDave Hansen 
1224ca644d9SDave Hansen 		if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
1234ca644d9SDave Hansen 		     break;
1244ca644d9SDave Hansen 	}
1254ca644d9SDave Hansen 
12683e3c487SKirill A. Shutemov 	VM_BUG_ON(!root);
127db36a461SGavin Shan 
1284ca644d9SDave Hansen 	return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
1294ca644d9SDave Hansen }
13091fd8b95SZhou Chengming #else
1312491f0a2SDavid Hildenbrand unsigned long __section_nr(struct mem_section *ms)
13291fd8b95SZhou Chengming {
1332491f0a2SDavid Hildenbrand 	return (unsigned long)(ms - mem_section[0]);
13491fd8b95SZhou Chengming }
13591fd8b95SZhou Chengming #endif
1364ca644d9SDave Hansen 
13730c253e6SAndy Whitcroft /*
13830c253e6SAndy Whitcroft  * During early boot, before section_mem_map is used for an actual
13930c253e6SAndy Whitcroft  * mem_map, we use section_mem_map to store the section's NUMA
14030c253e6SAndy Whitcroft  * node.  This keeps us from having to use another data structure.  The
14130c253e6SAndy Whitcroft  * node information is cleared just before we store the real mem_map.
14230c253e6SAndy Whitcroft  */
14330c253e6SAndy Whitcroft static inline unsigned long sparse_encode_early_nid(int nid)
14430c253e6SAndy Whitcroft {
14530c253e6SAndy Whitcroft 	return (nid << SECTION_NID_SHIFT);
14630c253e6SAndy Whitcroft }
14730c253e6SAndy Whitcroft 
14830c253e6SAndy Whitcroft static inline int sparse_early_nid(struct mem_section *section)
14930c253e6SAndy Whitcroft {
15030c253e6SAndy Whitcroft 	return (section->section_mem_map >> SECTION_NID_SHIFT);
15130c253e6SAndy Whitcroft }
15230c253e6SAndy Whitcroft 
1532dbb51c4SMel Gorman /* Validate the physical addressing limitations of the model */
1542dbb51c4SMel Gorman void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
1552dbb51c4SMel Gorman 						unsigned long *end_pfn)
156d41dee36SAndy Whitcroft {
1572dbb51c4SMel Gorman 	unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
158d41dee36SAndy Whitcroft 
159bead9a3aSIngo Molnar 	/*
160bead9a3aSIngo Molnar 	 * Sanity checks - do not allow an architecture to pass
161bead9a3aSIngo Molnar 	 * in larger pfns than the maximum scope of sparsemem:
162bead9a3aSIngo Molnar 	 */
1632dbb51c4SMel Gorman 	if (*start_pfn > max_sparsemem_pfn) {
1642dbb51c4SMel Gorman 		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
1652dbb51c4SMel Gorman 			"Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
1662dbb51c4SMel Gorman 			*start_pfn, *end_pfn, max_sparsemem_pfn);
1672dbb51c4SMel Gorman 		WARN_ON_ONCE(1);
1682dbb51c4SMel Gorman 		*start_pfn = max_sparsemem_pfn;
1692dbb51c4SMel Gorman 		*end_pfn = max_sparsemem_pfn;
170ef161a98SCyrill Gorcunov 	} else if (*end_pfn > max_sparsemem_pfn) {
1712dbb51c4SMel Gorman 		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
1722dbb51c4SMel Gorman 			"End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
1732dbb51c4SMel Gorman 			*start_pfn, *end_pfn, max_sparsemem_pfn);
1742dbb51c4SMel Gorman 		WARN_ON_ONCE(1);
1752dbb51c4SMel Gorman 		*end_pfn = max_sparsemem_pfn;
1762dbb51c4SMel Gorman 	}
1772dbb51c4SMel Gorman }
1782dbb51c4SMel Gorman 
179c4e1be9eSDave Hansen /*
180c4e1be9eSDave Hansen  * There are a number of times that we loop over NR_MEM_SECTIONS,
181c4e1be9eSDave Hansen  * looking for section_present() on each.  But, when we have very
182c4e1be9eSDave Hansen  * large physical address spaces, NR_MEM_SECTIONS can also be
183c4e1be9eSDave Hansen  * very large which makes the loops quite long.
184c4e1be9eSDave Hansen  *
185c4e1be9eSDave Hansen  * Keeping track of this gives us an easy way to break out of
186c4e1be9eSDave Hansen  * those loops early.
187c4e1be9eSDave Hansen  */
1882491f0a2SDavid Hildenbrand unsigned long __highest_present_section_nr;
189c4e1be9eSDave Hansen static void section_mark_present(struct mem_section *ms)
190c4e1be9eSDave Hansen {
1912491f0a2SDavid Hildenbrand 	unsigned long section_nr = __section_nr(ms);
192c4e1be9eSDave Hansen 
193c4e1be9eSDave Hansen 	if (section_nr > __highest_present_section_nr)
194c4e1be9eSDave Hansen 		__highest_present_section_nr = section_nr;
195c4e1be9eSDave Hansen 
196c4e1be9eSDave Hansen 	ms->section_mem_map |= SECTION_MARKED_PRESENT;
197c4e1be9eSDave Hansen }
198c4e1be9eSDave Hansen 
199c4e1be9eSDave Hansen #define for_each_present_section_nr(start, section_nr)		\
200c4e1be9eSDave Hansen 	for (section_nr = next_present_section_nr(start-1);	\
201d778015aSQian Cai 	     ((section_nr != -1) &&				\
202c4e1be9eSDave Hansen 	      (section_nr <= __highest_present_section_nr));	\
203c4e1be9eSDave Hansen 	     section_nr = next_present_section_nr(section_nr))
204c4e1be9eSDave Hansen 
20585c77f79SPavel Tatashin static inline unsigned long first_present_section_nr(void)
20685c77f79SPavel Tatashin {
20785c77f79SPavel Tatashin 	return next_present_section_nr(-1);
20885c77f79SPavel Tatashin }
20985c77f79SPavel Tatashin 
2100a9f9f62SBaoquan He #ifdef CONFIG_SPARSEMEM_VMEMMAP
211758b8db4SYi Wang static void subsection_mask_set(unsigned long *map, unsigned long pfn,
212f46edbd1SDan Williams 		unsigned long nr_pages)
213f46edbd1SDan Williams {
214f46edbd1SDan Williams 	int idx = subsection_map_index(pfn);
215f46edbd1SDan Williams 	int end = subsection_map_index(pfn + nr_pages - 1);
216f46edbd1SDan Williams 
217f46edbd1SDan Williams 	bitmap_set(map, idx, end - idx + 1);
218f46edbd1SDan Williams }
219f46edbd1SDan Williams 
220f46edbd1SDan Williams void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
221f46edbd1SDan Williams {
222f46edbd1SDan Williams 	int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
2239a845030SDan Williams 	unsigned long nr, start_sec = pfn_to_section_nr(pfn);
224f46edbd1SDan Williams 
225f46edbd1SDan Williams 	if (!nr_pages)
226f46edbd1SDan Williams 		return;
227f46edbd1SDan Williams 
2289a845030SDan Williams 	for (nr = start_sec; nr <= end_sec; nr++) {
229f46edbd1SDan Williams 		struct mem_section *ms;
230f46edbd1SDan Williams 		unsigned long pfns;
231f46edbd1SDan Williams 
232f46edbd1SDan Williams 		pfns = min(nr_pages, PAGES_PER_SECTION
233f46edbd1SDan Williams 				- (pfn & ~PAGE_SECTION_MASK));
2349a845030SDan Williams 		ms = __nr_to_section(nr);
235f46edbd1SDan Williams 		subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
236f46edbd1SDan Williams 
2379a845030SDan Williams 		pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
238f46edbd1SDan Williams 				pfns, subsection_map_index(pfn),
239f46edbd1SDan Williams 				subsection_map_index(pfn + pfns - 1));
240f46edbd1SDan Williams 
241f46edbd1SDan Williams 		pfn += pfns;
242f46edbd1SDan Williams 		nr_pages -= pfns;
243f46edbd1SDan Williams 	}
244f46edbd1SDan Williams }
2450a9f9f62SBaoquan He #else
2460a9f9f62SBaoquan He void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
2470a9f9f62SBaoquan He {
2480a9f9f62SBaoquan He }
2490a9f9f62SBaoquan He #endif
250f46edbd1SDan Williams 
2512dbb51c4SMel Gorman /* Record a memory area against a node. */
252c89ab04fSMike Rapoport static void __init memory_present(int nid, unsigned long start, unsigned long end)
2532dbb51c4SMel Gorman {
2542dbb51c4SMel Gorman 	unsigned long pfn;
255bead9a3aSIngo Molnar 
256629a359bSKirill A. Shutemov #ifdef CONFIG_SPARSEMEM_EXTREME
257629a359bSKirill A. Shutemov 	if (unlikely(!mem_section)) {
258629a359bSKirill A. Shutemov 		unsigned long size, align;
259629a359bSKirill A. Shutemov 
260d09cfbbfSBaoquan He 		size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
261629a359bSKirill A. Shutemov 		align = 1 << (INTERNODE_CACHE_SHIFT);
262eb31d559SMike Rapoport 		mem_section = memblock_alloc(size, align);
2638a7f97b9SMike Rapoport 		if (!mem_section)
2648a7f97b9SMike Rapoport 			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
2658a7f97b9SMike Rapoport 			      __func__, size, align);
266629a359bSKirill A. Shutemov 	}
267629a359bSKirill A. Shutemov #endif
268629a359bSKirill A. Shutemov 
269d41dee36SAndy Whitcroft 	start &= PAGE_SECTION_MASK;
2702dbb51c4SMel Gorman 	mminit_validate_memmodel_limits(&start, &end);
271d41dee36SAndy Whitcroft 	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
272d41dee36SAndy Whitcroft 		unsigned long section = pfn_to_section_nr(pfn);
273802f192eSBob Picco 		struct mem_section *ms;
274802f192eSBob Picco 
275802f192eSBob Picco 		sparse_index_init(section, nid);
27685770ffeSAndy Whitcroft 		set_section_nid(section, nid);
277802f192eSBob Picco 
278802f192eSBob Picco 		ms = __nr_to_section(section);
279c4e1be9eSDave Hansen 		if (!ms->section_mem_map) {
2802d070eabSMichal Hocko 			ms->section_mem_map = sparse_encode_early_nid(nid) |
2812d070eabSMichal Hocko 							SECTION_IS_ONLINE;
282c4e1be9eSDave Hansen 			section_mark_present(ms);
283c4e1be9eSDave Hansen 		}
284d41dee36SAndy Whitcroft 	}
285d41dee36SAndy Whitcroft }
286d41dee36SAndy Whitcroft 
287d41dee36SAndy Whitcroft /*
288c89ab04fSMike Rapoport  * Mark all memblocks as present using memory_present().
289c89ab04fSMike Rapoport  * This is a convenience function that is useful to mark all of the systems
290c89ab04fSMike Rapoport  * memory as present during initialization.
2919def36e0SLogan Gunthorpe  */
292c89ab04fSMike Rapoport static void __init memblocks_present(void)
2939def36e0SLogan Gunthorpe {
294c9118e6cSMike Rapoport 	unsigned long start, end;
295c9118e6cSMike Rapoport 	int i, nid;
2969def36e0SLogan Gunthorpe 
297c9118e6cSMike Rapoport 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid)
298c9118e6cSMike Rapoport 		memory_present(nid, start, end);
2999def36e0SLogan Gunthorpe }
3009def36e0SLogan Gunthorpe 
3019def36e0SLogan Gunthorpe /*
30229751f69SAndy Whitcroft  * Subtle, we encode the real pfn into the mem_map such that
30329751f69SAndy Whitcroft  * the identity pfn - section_mem_map will return the actual
30429751f69SAndy Whitcroft  * physical page frame number.
30529751f69SAndy Whitcroft  */
30629751f69SAndy Whitcroft static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
30729751f69SAndy Whitcroft {
308def9b71eSPetr Tesarik 	unsigned long coded_mem_map =
309def9b71eSPetr Tesarik 		(unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
310def9b71eSPetr Tesarik 	BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT));
311def9b71eSPetr Tesarik 	BUG_ON(coded_mem_map & ~SECTION_MAP_MASK);
312def9b71eSPetr Tesarik 	return coded_mem_map;
31329751f69SAndy Whitcroft }
31429751f69SAndy Whitcroft 
3153a0aaefeSDavid Hildenbrand #ifdef CONFIG_MEMORY_HOTPLUG
31629751f69SAndy Whitcroft /*
317ea01ea93SBadari Pulavarty  * Decode mem_map from the coded memmap
31829751f69SAndy Whitcroft  */
31929751f69SAndy Whitcroft struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
32029751f69SAndy Whitcroft {
321ea01ea93SBadari Pulavarty 	/* mask off the extra low bits of information */
322ea01ea93SBadari Pulavarty 	coded_mem_map &= SECTION_MAP_MASK;
32329751f69SAndy Whitcroft 	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
32429751f69SAndy Whitcroft }
3253a0aaefeSDavid Hildenbrand #endif /* CONFIG_MEMORY_HOTPLUG */
32629751f69SAndy Whitcroft 
3274e40987fSOscar Salvador static void __meminit sparse_init_one_section(struct mem_section *ms,
3285c0e3066SMel Gorman 		unsigned long pnum, struct page *mem_map,
329326e1b8fSDan Williams 		struct mem_section_usage *usage, unsigned long flags)
33029751f69SAndy Whitcroft {
33130c253e6SAndy Whitcroft 	ms->section_mem_map &= ~SECTION_MAP_MASK;
332326e1b8fSDan Williams 	ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum)
333326e1b8fSDan Williams 		| SECTION_HAS_MEM_MAP | flags;
334f1eca35aSDan Williams 	ms->usage = usage;
33529751f69SAndy Whitcroft }
33629751f69SAndy Whitcroft 
337f1eca35aSDan Williams static unsigned long usemap_size(void)
3385c0e3066SMel Gorman {
33960a7a88dSWei Yang 	return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long);
3405c0e3066SMel Gorman }
3415c0e3066SMel Gorman 
342f1eca35aSDan Williams size_t mem_section_usage_size(void)
3435c0e3066SMel Gorman {
344f1eca35aSDan Williams 	return sizeof(struct mem_section_usage) + usemap_size();
3455c0e3066SMel Gorman }
3465c0e3066SMel Gorman 
34748c90682SYasunori Goto #ifdef CONFIG_MEMORY_HOTREMOVE
348f1eca35aSDan Williams static struct mem_section_usage * __init
349a4322e1bSYinghai Lu sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
350238305bbSJohannes Weiner 					 unsigned long size)
35148c90682SYasunori Goto {
352f1eca35aSDan Williams 	struct mem_section_usage *usage;
35399ab7b19SYinghai Lu 	unsigned long goal, limit;
35499ab7b19SYinghai Lu 	int nid;
35548c90682SYasunori Goto 	/*
35648c90682SYasunori Goto 	 * A page may contain usemaps for other sections preventing the
35748c90682SYasunori Goto 	 * page being freed and making a section unremovable while
358c800bcd5SLi Zhong 	 * other sections referencing the usemap remain active. Similarly,
35948c90682SYasunori Goto 	 * a pgdat can prevent a section being removed. If section A
36048c90682SYasunori Goto 	 * contains a pgdat and section B contains the usemap, both
36148c90682SYasunori Goto 	 * sections become inter-dependent. This allocates usemaps
36248c90682SYasunori Goto 	 * from the same section as the pgdat where possible to avoid
36348c90682SYasunori Goto 	 * this problem.
36448c90682SYasunori Goto 	 */
36507b4e2bcSYinghai Lu 	goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
36699ab7b19SYinghai Lu 	limit = goal + (1UL << PA_SECTION_SHIFT);
36799ab7b19SYinghai Lu 	nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
36899ab7b19SYinghai Lu again:
369f1eca35aSDan Williams 	usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
370f1eca35aSDan Williams 	if (!usage && limit) {
37199ab7b19SYinghai Lu 		limit = 0;
37299ab7b19SYinghai Lu 		goto again;
37399ab7b19SYinghai Lu 	}
374f1eca35aSDan Williams 	return usage;
37548c90682SYasunori Goto }
37648c90682SYasunori Goto 
377f1eca35aSDan Williams static void __init check_usemap_section_nr(int nid,
378f1eca35aSDan Williams 		struct mem_section_usage *usage)
37948c90682SYasunori Goto {
38048c90682SYasunori Goto 	unsigned long usemap_snr, pgdat_snr;
38183e3c487SKirill A. Shutemov 	static unsigned long old_usemap_snr;
38283e3c487SKirill A. Shutemov 	static unsigned long old_pgdat_snr;
38348c90682SYasunori Goto 	struct pglist_data *pgdat = NODE_DATA(nid);
38448c90682SYasunori Goto 	int usemap_nid;
38548c90682SYasunori Goto 
38683e3c487SKirill A. Shutemov 	/* First call */
38783e3c487SKirill A. Shutemov 	if (!old_usemap_snr) {
38883e3c487SKirill A. Shutemov 		old_usemap_snr = NR_MEM_SECTIONS;
38983e3c487SKirill A. Shutemov 		old_pgdat_snr = NR_MEM_SECTIONS;
39083e3c487SKirill A. Shutemov 	}
39183e3c487SKirill A. Shutemov 
392f1eca35aSDan Williams 	usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
39348c90682SYasunori Goto 	pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
39448c90682SYasunori Goto 	if (usemap_snr == pgdat_snr)
39548c90682SYasunori Goto 		return;
39648c90682SYasunori Goto 
39748c90682SYasunori Goto 	if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
39848c90682SYasunori Goto 		/* skip redundant message */
39948c90682SYasunori Goto 		return;
40048c90682SYasunori Goto 
40148c90682SYasunori Goto 	old_usemap_snr = usemap_snr;
40248c90682SYasunori Goto 	old_pgdat_snr = pgdat_snr;
40348c90682SYasunori Goto 
40448c90682SYasunori Goto 	usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
40548c90682SYasunori Goto 	if (usemap_nid != nid) {
4061170532bSJoe Perches 		pr_info("node %d must be removed before remove section %ld\n",
40748c90682SYasunori Goto 			nid, usemap_snr);
40848c90682SYasunori Goto 		return;
40948c90682SYasunori Goto 	}
41048c90682SYasunori Goto 	/*
41148c90682SYasunori Goto 	 * There is a circular dependency.
41248c90682SYasunori Goto 	 * Some platforms allow un-removable section because they will just
41348c90682SYasunori Goto 	 * gather other removable sections for dynamic partitioning.
41448c90682SYasunori Goto 	 * Just notify un-removable section's number here.
41548c90682SYasunori Goto 	 */
4161170532bSJoe Perches 	pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
4171170532bSJoe Perches 		usemap_snr, pgdat_snr, nid);
41848c90682SYasunori Goto }
41948c90682SYasunori Goto #else
420f1eca35aSDan Williams static struct mem_section_usage * __init
421a4322e1bSYinghai Lu sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
422238305bbSJohannes Weiner 					 unsigned long size)
42348c90682SYasunori Goto {
42426fb3daeSMike Rapoport 	return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id);
42548c90682SYasunori Goto }
42648c90682SYasunori Goto 
427f1eca35aSDan Williams static void __init check_usemap_section_nr(int nid,
428f1eca35aSDan Williams 		struct mem_section_usage *usage)
42948c90682SYasunori Goto {
43048c90682SYasunori Goto }
43148c90682SYasunori Goto #endif /* CONFIG_MEMORY_HOTREMOVE */
43248c90682SYasunori Goto 
43335fd1eb1SPavel Tatashin #ifdef CONFIG_SPARSEMEM_VMEMMAP
434afda57bcSPavel Tatashin static unsigned long __init section_map_size(void)
43535fd1eb1SPavel Tatashin {
43635fd1eb1SPavel Tatashin 	return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
43735fd1eb1SPavel Tatashin }
43835fd1eb1SPavel Tatashin 
43935fd1eb1SPavel Tatashin #else
440afda57bcSPavel Tatashin static unsigned long __init section_map_size(void)
441e131c06bSPavel Tatashin {
442e131c06bSPavel Tatashin 	return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
443e131c06bSPavel Tatashin }
444e131c06bSPavel Tatashin 
445e9c0a3f0SDan Williams struct page __init *__populate_section_memmap(unsigned long pfn,
446e9c0a3f0SDan Williams 		unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
44729751f69SAndy Whitcroft {
448e131c06bSPavel Tatashin 	unsigned long size = section_map_size();
449e131c06bSPavel Tatashin 	struct page *map = sparse_buffer_alloc(size);
4508a7f97b9SMike Rapoport 	phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
45129751f69SAndy Whitcroft 
452e131c06bSPavel Tatashin 	if (map)
453e131c06bSPavel Tatashin 		return map;
454e131c06bSPavel Tatashin 
45509dbcf42SMichal Hocko 	map = memblock_alloc_try_nid_raw(size, size, addr,
45697ad1087SMike Rapoport 					  MEMBLOCK_ALLOC_ACCESSIBLE, nid);
4578a7f97b9SMike Rapoport 	if (!map)
4588a7f97b9SMike Rapoport 		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
4598a7f97b9SMike Rapoport 		      __func__, size, PAGE_SIZE, nid, &addr);
4608a7f97b9SMike Rapoport 
4618f6aac41SChristoph Lameter 	return map;
4628f6aac41SChristoph Lameter }
4638f6aac41SChristoph Lameter #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
4648f6aac41SChristoph Lameter 
46535fd1eb1SPavel Tatashin static void *sparsemap_buf __meminitdata;
46635fd1eb1SPavel Tatashin static void *sparsemap_buf_end __meminitdata;
46735fd1eb1SPavel Tatashin 
468ae831894SLecopzer Chen static inline void __meminit sparse_buffer_free(unsigned long size)
469ae831894SLecopzer Chen {
470ae831894SLecopzer Chen 	WARN_ON(!sparsemap_buf || size == 0);
471ae831894SLecopzer Chen 	memblock_free_early(__pa(sparsemap_buf), size);
472ae831894SLecopzer Chen }
473ae831894SLecopzer Chen 
474afda57bcSPavel Tatashin static void __init sparse_buffer_init(unsigned long size, int nid)
47535fd1eb1SPavel Tatashin {
4768a7f97b9SMike Rapoport 	phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
47735fd1eb1SPavel Tatashin 	WARN_ON(sparsemap_buf);	/* forgot to call sparse_buffer_fini()? */
47809dbcf42SMichal Hocko 	/*
47909dbcf42SMichal Hocko 	 * Pre-allocated buffer is mainly used by __populate_section_memmap
48009dbcf42SMichal Hocko 	 * and we want it to be properly aligned to the section size - this is
48109dbcf42SMichal Hocko 	 * especially the case for VMEMMAP which maps memmap to PMDs
48209dbcf42SMichal Hocko 	 */
4830ac398b1SYunfeng Ye 	sparsemap_buf = memblock_alloc_exact_nid_raw(size, section_map_size(),
48409dbcf42SMichal Hocko 					addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
48535fd1eb1SPavel Tatashin 	sparsemap_buf_end = sparsemap_buf + size;
48635fd1eb1SPavel Tatashin }
48735fd1eb1SPavel Tatashin 
488afda57bcSPavel Tatashin static void __init sparse_buffer_fini(void)
48935fd1eb1SPavel Tatashin {
49035fd1eb1SPavel Tatashin 	unsigned long size = sparsemap_buf_end - sparsemap_buf;
49135fd1eb1SPavel Tatashin 
49235fd1eb1SPavel Tatashin 	if (sparsemap_buf && size > 0)
493ae831894SLecopzer Chen 		sparse_buffer_free(size);
49435fd1eb1SPavel Tatashin 	sparsemap_buf = NULL;
49535fd1eb1SPavel Tatashin }
49635fd1eb1SPavel Tatashin 
49735fd1eb1SPavel Tatashin void * __meminit sparse_buffer_alloc(unsigned long size)
49835fd1eb1SPavel Tatashin {
49935fd1eb1SPavel Tatashin 	void *ptr = NULL;
50035fd1eb1SPavel Tatashin 
50135fd1eb1SPavel Tatashin 	if (sparsemap_buf) {
502db57e98dSLecopzer Chen 		ptr = (void *) roundup((unsigned long)sparsemap_buf, size);
50335fd1eb1SPavel Tatashin 		if (ptr + size > sparsemap_buf_end)
50435fd1eb1SPavel Tatashin 			ptr = NULL;
505ae831894SLecopzer Chen 		else {
506ae831894SLecopzer Chen 			/* Free redundant aligned space */
507ae831894SLecopzer Chen 			if ((unsigned long)(ptr - sparsemap_buf) > 0)
508ae831894SLecopzer Chen 				sparse_buffer_free((unsigned long)(ptr - sparsemap_buf));
50935fd1eb1SPavel Tatashin 			sparsemap_buf = ptr + size;
51035fd1eb1SPavel Tatashin 		}
511ae831894SLecopzer Chen 	}
51235fd1eb1SPavel Tatashin 	return ptr;
51335fd1eb1SPavel Tatashin }
51435fd1eb1SPavel Tatashin 
5153b32123dSGideon Israel Dsouza void __weak __meminit vmemmap_populate_print_last(void)
516c2b91e2eSYinghai Lu {
517c2b91e2eSYinghai Lu }
518a4322e1bSYinghai Lu 
51985c77f79SPavel Tatashin /*
52085c77f79SPavel Tatashin  * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end)
52185c77f79SPavel Tatashin  * And number of present sections in this node is map_count.
52285c77f79SPavel Tatashin  */
52385c77f79SPavel Tatashin static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
52485c77f79SPavel Tatashin 				   unsigned long pnum_end,
52585c77f79SPavel Tatashin 				   unsigned long map_count)
52685c77f79SPavel Tatashin {
527f1eca35aSDan Williams 	struct mem_section_usage *usage;
528f1eca35aSDan Williams 	unsigned long pnum;
52985c77f79SPavel Tatashin 	struct page *map;
53085c77f79SPavel Tatashin 
531f1eca35aSDan Williams 	usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid),
532f1eca35aSDan Williams 			mem_section_usage_size() * map_count);
533f1eca35aSDan Williams 	if (!usage) {
53485c77f79SPavel Tatashin 		pr_err("%s: node[%d] usemap allocation failed", __func__, nid);
53585c77f79SPavel Tatashin 		goto failed;
53685c77f79SPavel Tatashin 	}
53785c77f79SPavel Tatashin 	sparse_buffer_init(map_count * section_map_size(), nid);
53885c77f79SPavel Tatashin 	for_each_present_section_nr(pnum_begin, pnum) {
539e9c0a3f0SDan Williams 		unsigned long pfn = section_nr_to_pfn(pnum);
540e9c0a3f0SDan Williams 
54185c77f79SPavel Tatashin 		if (pnum >= pnum_end)
54285c77f79SPavel Tatashin 			break;
54385c77f79SPavel Tatashin 
544e9c0a3f0SDan Williams 		map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
545e9c0a3f0SDan Williams 				nid, NULL);
54685c77f79SPavel Tatashin 		if (!map) {
54785c77f79SPavel Tatashin 			pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
54885c77f79SPavel Tatashin 			       __func__, nid);
54985c77f79SPavel Tatashin 			pnum_begin = pnum;
550*2284f47fSWang Wensheng 			sparse_buffer_fini();
55185c77f79SPavel Tatashin 			goto failed;
55285c77f79SPavel Tatashin 		}
553f1eca35aSDan Williams 		check_usemap_section_nr(nid, usage);
554326e1b8fSDan Williams 		sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage,
555326e1b8fSDan Williams 				SECTION_IS_EARLY);
556f1eca35aSDan Williams 		usage = (void *) usage + mem_section_usage_size();
55785c77f79SPavel Tatashin 	}
55885c77f79SPavel Tatashin 	sparse_buffer_fini();
55985c77f79SPavel Tatashin 	return;
56085c77f79SPavel Tatashin failed:
56185c77f79SPavel Tatashin 	/* We failed to allocate, mark all the following pnums as not present */
56285c77f79SPavel Tatashin 	for_each_present_section_nr(pnum_begin, pnum) {
56385c77f79SPavel Tatashin 		struct mem_section *ms;
56485c77f79SPavel Tatashin 
56585c77f79SPavel Tatashin 		if (pnum >= pnum_end)
56685c77f79SPavel Tatashin 			break;
56785c77f79SPavel Tatashin 		ms = __nr_to_section(pnum);
56885c77f79SPavel Tatashin 		ms->section_mem_map = 0;
56985c77f79SPavel Tatashin 	}
57085c77f79SPavel Tatashin }
57185c77f79SPavel Tatashin 
57285c77f79SPavel Tatashin /*
57385c77f79SPavel Tatashin  * Allocate the accumulated non-linear sections, allocate a mem_map
57485c77f79SPavel Tatashin  * for each and record the physical to section mapping.
57585c77f79SPavel Tatashin  */
5762a3cb8baSPavel Tatashin void __init sparse_init(void)
57785c77f79SPavel Tatashin {
578c89ab04fSMike Rapoport 	unsigned long pnum_end, pnum_begin, map_count = 1;
579c89ab04fSMike Rapoport 	int nid_begin;
580c89ab04fSMike Rapoport 
581c89ab04fSMike Rapoport 	memblocks_present();
582c89ab04fSMike Rapoport 
583c89ab04fSMike Rapoport 	pnum_begin = first_present_section_nr();
584c89ab04fSMike Rapoport 	nid_begin = sparse_early_nid(__nr_to_section(pnum_begin));
58585c77f79SPavel Tatashin 
58685c77f79SPavel Tatashin 	/* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
58785c77f79SPavel Tatashin 	set_pageblock_order();
58885c77f79SPavel Tatashin 
58985c77f79SPavel Tatashin 	for_each_present_section_nr(pnum_begin + 1, pnum_end) {
59085c77f79SPavel Tatashin 		int nid = sparse_early_nid(__nr_to_section(pnum_end));
59185c77f79SPavel Tatashin 
59285c77f79SPavel Tatashin 		if (nid == nid_begin) {
59385c77f79SPavel Tatashin 			map_count++;
59485c77f79SPavel Tatashin 			continue;
59585c77f79SPavel Tatashin 		}
59685c77f79SPavel Tatashin 		/* Init node with sections in range [pnum_begin, pnum_end) */
59785c77f79SPavel Tatashin 		sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
59885c77f79SPavel Tatashin 		nid_begin = nid;
59985c77f79SPavel Tatashin 		pnum_begin = pnum_end;
60085c77f79SPavel Tatashin 		map_count = 1;
60185c77f79SPavel Tatashin 	}
60285c77f79SPavel Tatashin 	/* cover the last node */
60385c77f79SPavel Tatashin 	sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
60485c77f79SPavel Tatashin 	vmemmap_populate_print_last();
60585c77f79SPavel Tatashin }
60685c77f79SPavel Tatashin 
607193faea9SStephen Rothwell #ifdef CONFIG_MEMORY_HOTPLUG
6082d070eabSMichal Hocko 
6092d070eabSMichal Hocko /* Mark all memory sections within the pfn range as online */
6102d070eabSMichal Hocko void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
6112d070eabSMichal Hocko {
6122d070eabSMichal Hocko 	unsigned long pfn;
6132d070eabSMichal Hocko 
6142d070eabSMichal Hocko 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
615b4ccec41SMichal Hocko 		unsigned long section_nr = pfn_to_section_nr(pfn);
6162d070eabSMichal Hocko 		struct mem_section *ms;
6172d070eabSMichal Hocko 
6182d070eabSMichal Hocko 		/* onlining code should never touch invalid ranges */
6192d070eabSMichal Hocko 		if (WARN_ON(!valid_section_nr(section_nr)))
6202d070eabSMichal Hocko 			continue;
6212d070eabSMichal Hocko 
6222d070eabSMichal Hocko 		ms = __nr_to_section(section_nr);
6232d070eabSMichal Hocko 		ms->section_mem_map |= SECTION_IS_ONLINE;
6242d070eabSMichal Hocko 	}
6252d070eabSMichal Hocko }
6262d070eabSMichal Hocko 
6272d070eabSMichal Hocko #ifdef CONFIG_MEMORY_HOTREMOVE
6289b7ea46aSQian Cai /* Mark all memory sections within the pfn range as offline */
6292d070eabSMichal Hocko void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
6302d070eabSMichal Hocko {
6312d070eabSMichal Hocko 	unsigned long pfn;
6322d070eabSMichal Hocko 
6332d070eabSMichal Hocko 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
63427227c73SPavel Tatashin 		unsigned long section_nr = pfn_to_section_nr(pfn);
6352d070eabSMichal Hocko 		struct mem_section *ms;
6362d070eabSMichal Hocko 
6372d070eabSMichal Hocko 		/*
6382d070eabSMichal Hocko 		 * TODO this needs some double checking. Offlining code makes
6392d070eabSMichal Hocko 		 * sure to check pfn_valid but those checks might be just bogus
6402d070eabSMichal Hocko 		 */
6412d070eabSMichal Hocko 		if (WARN_ON(!valid_section_nr(section_nr)))
6422d070eabSMichal Hocko 			continue;
6432d070eabSMichal Hocko 
6442d070eabSMichal Hocko 		ms = __nr_to_section(section_nr);
6452d070eabSMichal Hocko 		ms->section_mem_map &= ~SECTION_IS_ONLINE;
6462d070eabSMichal Hocko 	}
6472d070eabSMichal Hocko }
6482d070eabSMichal Hocko #endif
6492d070eabSMichal Hocko 
65098f3cfc1SYasunori Goto #ifdef CONFIG_SPARSEMEM_VMEMMAP
651030eab4fSIlya Leoshkevich static struct page * __meminit populate_section_memmap(unsigned long pfn,
652e9c0a3f0SDan Williams 		unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
65398f3cfc1SYasunori Goto {
654e9c0a3f0SDan Williams 	return __populate_section_memmap(pfn, nr_pages, nid, altmap);
65598f3cfc1SYasunori Goto }
656e9c0a3f0SDan Williams 
657e9c0a3f0SDan Williams static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
65824b6d416SChristoph Hellwig 		struct vmem_altmap *altmap)
65998f3cfc1SYasunori Goto {
660e9c0a3f0SDan Williams 	unsigned long start = (unsigned long) pfn_to_page(pfn);
661e9c0a3f0SDan Williams 	unsigned long end = start + nr_pages * sizeof(struct page);
6620aad818bSJohannes Weiner 
66324b6d416SChristoph Hellwig 	vmemmap_free(start, end, altmap);
66498f3cfc1SYasunori Goto }
66581556b02SZhang Yanfei static void free_map_bootmem(struct page *memmap)
6660c0a4a51SYasunori Goto {
6670aad818bSJohannes Weiner 	unsigned long start = (unsigned long)memmap;
66881556b02SZhang Yanfei 	unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
6690aad818bSJohannes Weiner 
67024b6d416SChristoph Hellwig 	vmemmap_free(start, end, NULL);
6710c0a4a51SYasunori Goto }
6726ecb0fc6SBaoquan He 
6736ecb0fc6SBaoquan He static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
6746ecb0fc6SBaoquan He {
6756ecb0fc6SBaoquan He 	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
6766ecb0fc6SBaoquan He 	DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
6776ecb0fc6SBaoquan He 	struct mem_section *ms = __pfn_to_section(pfn);
6786ecb0fc6SBaoquan He 	unsigned long *subsection_map = ms->usage
6796ecb0fc6SBaoquan He 		? &ms->usage->subsection_map[0] : NULL;
6806ecb0fc6SBaoquan He 
6816ecb0fc6SBaoquan He 	subsection_mask_set(map, pfn, nr_pages);
6826ecb0fc6SBaoquan He 	if (subsection_map)
6836ecb0fc6SBaoquan He 		bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
6846ecb0fc6SBaoquan He 
6856ecb0fc6SBaoquan He 	if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
6866ecb0fc6SBaoquan He 				"section already deactivated (%#lx + %ld)\n",
6876ecb0fc6SBaoquan He 				pfn, nr_pages))
6886ecb0fc6SBaoquan He 		return -EINVAL;
6896ecb0fc6SBaoquan He 
6906ecb0fc6SBaoquan He 	bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
6916ecb0fc6SBaoquan He 	return 0;
6926ecb0fc6SBaoquan He }
6936ecb0fc6SBaoquan He 
6946ecb0fc6SBaoquan He static bool is_subsection_map_empty(struct mem_section *ms)
6956ecb0fc6SBaoquan He {
6966ecb0fc6SBaoquan He 	return bitmap_empty(&ms->usage->subsection_map[0],
6976ecb0fc6SBaoquan He 			    SUBSECTIONS_PER_SECTION);
6986ecb0fc6SBaoquan He }
6996ecb0fc6SBaoquan He 
7006ecb0fc6SBaoquan He static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
7016ecb0fc6SBaoquan He {
7026ecb0fc6SBaoquan He 	struct mem_section *ms = __pfn_to_section(pfn);
7036ecb0fc6SBaoquan He 	DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
7046ecb0fc6SBaoquan He 	unsigned long *subsection_map;
7056ecb0fc6SBaoquan He 	int rc = 0;
7066ecb0fc6SBaoquan He 
7076ecb0fc6SBaoquan He 	subsection_mask_set(map, pfn, nr_pages);
7086ecb0fc6SBaoquan He 
7096ecb0fc6SBaoquan He 	subsection_map = &ms->usage->subsection_map[0];
7106ecb0fc6SBaoquan He 
7116ecb0fc6SBaoquan He 	if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
7126ecb0fc6SBaoquan He 		rc = -EINVAL;
7136ecb0fc6SBaoquan He 	else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
7146ecb0fc6SBaoquan He 		rc = -EEXIST;
7156ecb0fc6SBaoquan He 	else
7166ecb0fc6SBaoquan He 		bitmap_or(subsection_map, map, subsection_map,
7176ecb0fc6SBaoquan He 				SUBSECTIONS_PER_SECTION);
7186ecb0fc6SBaoquan He 
7196ecb0fc6SBaoquan He 	return rc;
7206ecb0fc6SBaoquan He }
72198f3cfc1SYasunori Goto #else
722030eab4fSIlya Leoshkevich struct page * __meminit populate_section_memmap(unsigned long pfn,
723e9c0a3f0SDan Williams 		unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
7240b0acbecSDave Hansen {
7254027149aSBaoquan He 	return kvmalloc_node(array_size(sizeof(struct page),
7264027149aSBaoquan He 					PAGES_PER_SECTION), GFP_KERNEL, nid);
7270b0acbecSDave Hansen }
7280b0acbecSDave Hansen 
729e9c0a3f0SDan Williams static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
7307b73d978SChristoph Hellwig 		struct vmem_altmap *altmap)
73198f3cfc1SYasunori Goto {
7323af776f6SBaoquan He 	kvfree(pfn_to_page(pfn));
7330b0acbecSDave Hansen }
7340c0a4a51SYasunori Goto 
73581556b02SZhang Yanfei static void free_map_bootmem(struct page *memmap)
7360c0a4a51SYasunori Goto {
7370c0a4a51SYasunori Goto 	unsigned long maps_section_nr, removing_section_nr, i;
73881556b02SZhang Yanfei 	unsigned long magic, nr_pages;
739ae64ffcaSJianguo Wu 	struct page *page = virt_to_page(memmap);
7400c0a4a51SYasunori Goto 
74181556b02SZhang Yanfei 	nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
74281556b02SZhang Yanfei 		>> PAGE_SHIFT;
74381556b02SZhang Yanfei 
7440c0a4a51SYasunori Goto 	for (i = 0; i < nr_pages; i++, page++) {
745ddffe98dSYasuaki Ishimatsu 		magic = (unsigned long) page->freelist;
7460c0a4a51SYasunori Goto 
7470c0a4a51SYasunori Goto 		BUG_ON(magic == NODE_INFO);
7480c0a4a51SYasunori Goto 
7490c0a4a51SYasunori Goto 		maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
750857e522aSYasuaki Ishimatsu 		removing_section_nr = page_private(page);
7510c0a4a51SYasunori Goto 
7520c0a4a51SYasunori Goto 		/*
7530c0a4a51SYasunori Goto 		 * When this function is called, the removing section is
7540c0a4a51SYasunori Goto 		 * logical offlined state. This means all pages are isolated
7550c0a4a51SYasunori Goto 		 * from page allocator. If removing section's memmap is placed
7560c0a4a51SYasunori Goto 		 * on the same section, it must not be freed.
7570c0a4a51SYasunori Goto 		 * If it is freed, page allocator may allocate it which will
7580c0a4a51SYasunori Goto 		 * be removed physically soon.
7590c0a4a51SYasunori Goto 		 */
7600c0a4a51SYasunori Goto 		if (maps_section_nr != removing_section_nr)
7610c0a4a51SYasunori Goto 			put_page_bootmem(page);
7620c0a4a51SYasunori Goto 	}
7630c0a4a51SYasunori Goto }
7640b0acbecSDave Hansen 
7650a9f9f62SBaoquan He static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
7660a9f9f62SBaoquan He {
7670a9f9f62SBaoquan He 	return 0;
7680a9f9f62SBaoquan He }
7690a9f9f62SBaoquan He 
7700a9f9f62SBaoquan He static bool is_subsection_map_empty(struct mem_section *ms)
7710a9f9f62SBaoquan He {
7720a9f9f62SBaoquan He 	return true;
7730a9f9f62SBaoquan He }
7746ecb0fc6SBaoquan He 
7756ecb0fc6SBaoquan He static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
7766ecb0fc6SBaoquan He {
7776ecb0fc6SBaoquan He 	return 0;
7786ecb0fc6SBaoquan He }
7796ecb0fc6SBaoquan He #endif /* CONFIG_SPARSEMEM_VMEMMAP */
78037bc1502SBaoquan He 
78195a5a34dSBaoquan He /*
78295a5a34dSBaoquan He  * To deactivate a memory region, there are 3 cases to handle across
78395a5a34dSBaoquan He  * two configurations (SPARSEMEM_VMEMMAP={y,n}):
78495a5a34dSBaoquan He  *
78595a5a34dSBaoquan He  * 1. deactivation of a partial hot-added section (only possible in
78695a5a34dSBaoquan He  *    the SPARSEMEM_VMEMMAP=y case).
78795a5a34dSBaoquan He  *      a) section was present at memory init.
78895a5a34dSBaoquan He  *      b) section was hot-added post memory init.
78995a5a34dSBaoquan He  * 2. deactivation of a complete hot-added section.
79095a5a34dSBaoquan He  * 3. deactivation of a complete section from memory init.
79195a5a34dSBaoquan He  *
79295a5a34dSBaoquan He  * For 1, when subsection_map does not empty we will not be freeing the
79395a5a34dSBaoquan He  * usage map, but still need to free the vmemmap range.
79495a5a34dSBaoquan He  *
79595a5a34dSBaoquan He  * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified
79695a5a34dSBaoquan He  */
79737bc1502SBaoquan He static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
79837bc1502SBaoquan He 		struct vmem_altmap *altmap)
79937bc1502SBaoquan He {
80037bc1502SBaoquan He 	struct mem_section *ms = __pfn_to_section(pfn);
80137bc1502SBaoquan He 	bool section_is_early = early_section(ms);
80237bc1502SBaoquan He 	struct page *memmap = NULL;
80337bc1502SBaoquan He 	bool empty;
80437bc1502SBaoquan He 
80537bc1502SBaoquan He 	if (clear_subsection_map(pfn, nr_pages))
80637bc1502SBaoquan He 		return;
80795a5a34dSBaoquan He 
80837bc1502SBaoquan He 	empty = is_subsection_map_empty(ms);
809d41e2f3bSBaoquan He 	if (empty) {
810ba72b4c8SDan Williams 		unsigned long section_nr = pfn_to_section_nr(pfn);
811ba72b4c8SDan Williams 
8128068df3bSDavid Hildenbrand 		/*
8138068df3bSDavid Hildenbrand 		 * When removing an early section, the usage map is kept (as the
8148068df3bSDavid Hildenbrand 		 * usage maps of other sections fall into the same page). It
8158068df3bSDavid Hildenbrand 		 * will be re-used when re-adding the section - which is then no
8168068df3bSDavid Hildenbrand 		 * longer an early section. If the usage map is PageReserved, it
8178068df3bSDavid Hildenbrand 		 * was allocated during boot.
8188068df3bSDavid Hildenbrand 		 */
8198068df3bSDavid Hildenbrand 		if (!PageReserved(virt_to_page(ms->usage))) {
820ba72b4c8SDan Williams 			kfree(ms->usage);
821ba72b4c8SDan Williams 			ms->usage = NULL;
822ba72b4c8SDan Williams 		}
823ba72b4c8SDan Williams 		memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
824b943f045SAneesh Kumar K.V 		/*
825b943f045SAneesh Kumar K.V 		 * Mark the section invalid so that valid_section()
826b943f045SAneesh Kumar K.V 		 * return false. This prevents code from dereferencing
827b943f045SAneesh Kumar K.V 		 * ms->usage array.
828b943f045SAneesh Kumar K.V 		 */
829b943f045SAneesh Kumar K.V 		ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
830ba72b4c8SDan Williams 	}
831ba72b4c8SDan Williams 
832ef69bc9fSWei Yang 	/*
833ef69bc9fSWei Yang 	 * The memmap of early sections is always fully populated. See
834ef69bc9fSWei Yang 	 * section_activate() and pfn_valid() .
835ef69bc9fSWei Yang 	 */
836ef69bc9fSWei Yang 	if (!section_is_early)
837ba72b4c8SDan Williams 		depopulate_section_memmap(pfn, nr_pages, altmap);
838ef69bc9fSWei Yang 	else if (memmap)
839ef69bc9fSWei Yang 		free_map_bootmem(memmap);
840d41e2f3bSBaoquan He 
841d41e2f3bSBaoquan He 	if (empty)
842d41e2f3bSBaoquan He 		ms->section_mem_map = (unsigned long)NULL;
843ba72b4c8SDan Williams }
844ba72b4c8SDan Williams 
8455d87255cSBaoquan He static struct page * __meminit section_activate(int nid, unsigned long pfn,
8465d87255cSBaoquan He 		unsigned long nr_pages, struct vmem_altmap *altmap)
8475d87255cSBaoquan He {
8485d87255cSBaoquan He 	struct mem_section *ms = __pfn_to_section(pfn);
8495d87255cSBaoquan He 	struct mem_section_usage *usage = NULL;
8505d87255cSBaoquan He 	struct page *memmap;
8515d87255cSBaoquan He 	int rc = 0;
8525d87255cSBaoquan He 
8535d87255cSBaoquan He 	if (!ms->usage) {
8545d87255cSBaoquan He 		usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
8555d87255cSBaoquan He 		if (!usage)
8565d87255cSBaoquan He 			return ERR_PTR(-ENOMEM);
8575d87255cSBaoquan He 		ms->usage = usage;
8585d87255cSBaoquan He 	}
8595d87255cSBaoquan He 
8605d87255cSBaoquan He 	rc = fill_subsection_map(pfn, nr_pages);
861ba72b4c8SDan Williams 	if (rc) {
862ba72b4c8SDan Williams 		if (usage)
863ba72b4c8SDan Williams 			ms->usage = NULL;
864ba72b4c8SDan Williams 		kfree(usage);
865ba72b4c8SDan Williams 		return ERR_PTR(rc);
866ba72b4c8SDan Williams 	}
867ba72b4c8SDan Williams 
868ba72b4c8SDan Williams 	/*
869ba72b4c8SDan Williams 	 * The early init code does not consider partially populated
870ba72b4c8SDan Williams 	 * initial sections, it simply assumes that memory will never be
871ba72b4c8SDan Williams 	 * referenced.  If we hot-add memory into such a section then we
872ba72b4c8SDan Williams 	 * do not need to populate the memmap and can simply reuse what
873ba72b4c8SDan Williams 	 * is already there.
874ba72b4c8SDan Williams 	 */
875ba72b4c8SDan Williams 	if (nr_pages < PAGES_PER_SECTION && early_section(ms))
876ba72b4c8SDan Williams 		return pfn_to_page(pfn);
877ba72b4c8SDan Williams 
878ba72b4c8SDan Williams 	memmap = populate_section_memmap(pfn, nr_pages, nid, altmap);
879ba72b4c8SDan Williams 	if (!memmap) {
880ba72b4c8SDan Williams 		section_deactivate(pfn, nr_pages, altmap);
881ba72b4c8SDan Williams 		return ERR_PTR(-ENOMEM);
882ba72b4c8SDan Williams 	}
883ba72b4c8SDan Williams 
884ba72b4c8SDan Williams 	return memmap;
885ba72b4c8SDan Williams }
886ba72b4c8SDan Williams 
8877567cfc5SBaoquan He /**
888ba72b4c8SDan Williams  * sparse_add_section - add a memory section, or populate an existing one
8897567cfc5SBaoquan He  * @nid: The node to add section on
8907567cfc5SBaoquan He  * @start_pfn: start pfn of the memory range
891ba72b4c8SDan Williams  * @nr_pages: number of pfns to add in the section
8927567cfc5SBaoquan He  * @altmap: device page map
8937567cfc5SBaoquan He  *
8947567cfc5SBaoquan He  * This is only intended for hotplug.
8957567cfc5SBaoquan He  *
89695a5a34dSBaoquan He  * Note that only VMEMMAP supports sub-section aligned hotplug,
89795a5a34dSBaoquan He  * the proper alignment and size are gated by check_pfn_span().
89895a5a34dSBaoquan He  *
89995a5a34dSBaoquan He  *
9007567cfc5SBaoquan He  * Return:
9017567cfc5SBaoquan He  * * 0		- On success.
9027567cfc5SBaoquan He  * * -EEXIST	- Section has been present.
9037567cfc5SBaoquan He  * * -ENOMEM	- Out of memory.
904d41dee36SAndy Whitcroft  */
9057ea62160SDan Williams int __meminit sparse_add_section(int nid, unsigned long start_pfn,
9067ea62160SDan Williams 		unsigned long nr_pages, struct vmem_altmap *altmap)
90729751f69SAndy Whitcroft {
9080b0acbecSDave Hansen 	unsigned long section_nr = pfn_to_section_nr(start_pfn);
9090b0acbecSDave Hansen 	struct mem_section *ms;
9100b0acbecSDave Hansen 	struct page *memmap;
9110b0acbecSDave Hansen 	int ret;
91229751f69SAndy Whitcroft 
9134e0d2e7eSWei Yang 	ret = sparse_index_init(section_nr, nid);
914ba72b4c8SDan Williams 	if (ret < 0)
915bbd06825SWANG Cong 		return ret;
91629751f69SAndy Whitcroft 
917ba72b4c8SDan Williams 	memmap = section_activate(nid, start_pfn, nr_pages, altmap);
918ba72b4c8SDan Williams 	if (IS_ERR(memmap))
919ba72b4c8SDan Williams 		return PTR_ERR(memmap);
9205c0e3066SMel Gorman 
921d0dc12e8SPavel Tatashin 	/*
922d0dc12e8SPavel Tatashin 	 * Poison uninitialized struct pages in order to catch invalid flags
923d0dc12e8SPavel Tatashin 	 * combinations.
924d0dc12e8SPavel Tatashin 	 */
92518e19f19SWei Yang 	page_init_poison(memmap, sizeof(struct page) * nr_pages);
9263ac19f8eSWen Congyang 
927c1cbc3eeSWei Yang 	ms = __nr_to_section(section_nr);
92826f26bedSWei Yang 	set_section_nid(section_nr, nid);
929c4e1be9eSDave Hansen 	section_mark_present(ms);
9300b0acbecSDave Hansen 
931ba72b4c8SDan Williams 	/* Align memmap to section boundary in the subsection case */
932ba72b4c8SDan Williams 	if (section_nr_to_pfn(section_nr) != start_pfn)
9334627d76dSWei Yang 		memmap = pfn_to_page(section_nr_to_pfn(section_nr));
934ba72b4c8SDan Williams 	sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
935ba72b4c8SDan Williams 
936ba72b4c8SDan Williams 	return 0;
937d41dee36SAndy Whitcroft }
938ea01ea93SBadari Pulavarty 
93995a4774dSWen Congyang #ifdef CONFIG_MEMORY_FAILURE
94095a4774dSWen Congyang static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
94195a4774dSWen Congyang {
94295a4774dSWen Congyang 	int i;
94395a4774dSWen Congyang 
9445eb570a8SBalbir Singh 	/*
9455eb570a8SBalbir Singh 	 * A further optimization is to have per section refcounted
9465eb570a8SBalbir Singh 	 * num_poisoned_pages.  But that would need more space per memmap, so
9475eb570a8SBalbir Singh 	 * for now just do a quick global check to speed up this routine in the
9485eb570a8SBalbir Singh 	 * absence of bad pages.
9495eb570a8SBalbir Singh 	 */
9505eb570a8SBalbir Singh 	if (atomic_long_read(&num_poisoned_pages) == 0)
9515eb570a8SBalbir Singh 		return;
9525eb570a8SBalbir Singh 
9534b94ffdcSDan Williams 	for (i = 0; i < nr_pages; i++) {
95495a4774dSWen Congyang 		if (PageHWPoison(&memmap[i])) {
9559f82883cSAlastair D'Silva 			num_poisoned_pages_dec();
95695a4774dSWen Congyang 			ClearPageHWPoison(&memmap[i]);
95795a4774dSWen Congyang 		}
95895a4774dSWen Congyang 	}
95995a4774dSWen Congyang }
96095a4774dSWen Congyang #else
96195a4774dSWen Congyang static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
96295a4774dSWen Congyang {
96395a4774dSWen Congyang }
96495a4774dSWen Congyang #endif
96595a4774dSWen Congyang 
966ba72b4c8SDan Williams void sparse_remove_section(struct mem_section *ms, unsigned long pfn,
9677ea62160SDan Williams 		unsigned long nr_pages, unsigned long map_offset,
968b9bf8d34SDavid Hildenbrand 		struct vmem_altmap *altmap)
969ea01ea93SBadari Pulavarty {
970ba72b4c8SDan Williams 	clear_hwpoisoned_pages(pfn_to_page(pfn) + map_offset,
971ba72b4c8SDan Williams 			nr_pages - map_offset);
972ba72b4c8SDan Williams 	section_deactivate(pfn, nr_pages, altmap);
973ea01ea93SBadari Pulavarty }
9744edd7cefSDavid Rientjes #endif /* CONFIG_MEMORY_HOTPLUG */
975