1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2d41dee36SAndy Whitcroft /* 3d41dee36SAndy Whitcroft * sparse memory mappings. 4d41dee36SAndy Whitcroft */ 5d41dee36SAndy Whitcroft #include <linux/mm.h> 65a0e3ad6STejun Heo #include <linux/slab.h> 7d41dee36SAndy Whitcroft #include <linux/mmzone.h> 897ad1087SMike Rapoport #include <linux/memblock.h> 93b32123dSGideon Israel Dsouza #include <linux/compiler.h> 100b0acbecSDave Hansen #include <linux/highmem.h> 11b95f1b31SPaul Gortmaker #include <linux/export.h> 1228ae55c9SDave Hansen #include <linux/spinlock.h> 130b0acbecSDave Hansen #include <linux/vmalloc.h> 149f82883cSAlastair D'Silva #include <linux/swap.h> 159f82883cSAlastair D'Silva #include <linux/swapops.h> 16426e5c42SMuchun Song #include <linux/bootmem_info.h> 173b32123dSGideon Israel Dsouza 180c0a4a51SYasunori Goto #include "internal.h" 19d41dee36SAndy Whitcroft #include <asm/dma.h> 20d41dee36SAndy Whitcroft 21d41dee36SAndy Whitcroft /* 22d41dee36SAndy Whitcroft * Permanent SPARSEMEM data: 23d41dee36SAndy Whitcroft * 24d41dee36SAndy Whitcroft * 1) mem_section - memory sections, mem_map's for valid memory 25d41dee36SAndy Whitcroft */ 263e347261SBob Picco #ifdef CONFIG_SPARSEMEM_EXTREME 2783e3c487SKirill A. Shutemov struct mem_section **mem_section; 283e347261SBob Picco #else 293e347261SBob Picco struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] 3022fc6eccSRavikiran G Thirumalai ____cacheline_internodealigned_in_smp; 313e347261SBob Picco #endif 323e347261SBob Picco EXPORT_SYMBOL(mem_section); 333e347261SBob Picco 3489689ae7SChristoph Lameter #ifdef NODE_NOT_IN_PAGE_FLAGS 3589689ae7SChristoph Lameter /* 3689689ae7SChristoph Lameter * If we did not store the node number in the page then we have to 3789689ae7SChristoph Lameter * do a lookup in the section_to_node_table in order to find which 3889689ae7SChristoph Lameter * node the page belongs to. 3989689ae7SChristoph Lameter */ 4089689ae7SChristoph Lameter #if MAX_NUMNODES <= 256 4189689ae7SChristoph Lameter static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; 4289689ae7SChristoph Lameter #else 4389689ae7SChristoph Lameter static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; 4489689ae7SChristoph Lameter #endif 4589689ae7SChristoph Lameter 4633dd4e0eSIan Campbell int page_to_nid(const struct page *page) 4789689ae7SChristoph Lameter { 4889689ae7SChristoph Lameter return section_to_node_table[page_to_section(page)]; 4989689ae7SChristoph Lameter } 5089689ae7SChristoph Lameter EXPORT_SYMBOL(page_to_nid); 5185770ffeSAndy Whitcroft 5285770ffeSAndy Whitcroft static void set_section_nid(unsigned long section_nr, int nid) 5385770ffeSAndy Whitcroft { 5485770ffeSAndy Whitcroft section_to_node_table[section_nr] = nid; 5585770ffeSAndy Whitcroft } 5685770ffeSAndy Whitcroft #else /* !NODE_NOT_IN_PAGE_FLAGS */ 5785770ffeSAndy Whitcroft static inline void set_section_nid(unsigned long section_nr, int nid) 5885770ffeSAndy Whitcroft { 5985770ffeSAndy Whitcroft } 6089689ae7SChristoph Lameter #endif 6189689ae7SChristoph Lameter 623e347261SBob Picco #ifdef CONFIG_SPARSEMEM_EXTREME 63bd721ea7SFabian Frederick static noinline struct mem_section __ref *sparse_index_alloc(int nid) 64802f192eSBob Picco { 6528ae55c9SDave Hansen struct mem_section *section = NULL; 6628ae55c9SDave Hansen unsigned long array_size = SECTIONS_PER_ROOT * 6728ae55c9SDave Hansen sizeof(struct mem_section); 68802f192eSBob Picco 698a7f97b9SMike Rapoport if (slab_is_available()) { 705b760e64SGavin Shan section = kzalloc_node(array_size, GFP_KERNEL, nid); 718a7f97b9SMike Rapoport } else { 727e1c4e27SMike Rapoport section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, 737e1c4e27SMike Rapoport nid); 748a7f97b9SMike Rapoport if (!section) 758a7f97b9SMike Rapoport panic("%s: Failed to allocate %lu bytes nid=%d\n", 768a7f97b9SMike Rapoport __func__, array_size, nid); 778a7f97b9SMike Rapoport } 783e347261SBob Picco 7928ae55c9SDave Hansen return section; 80802f192eSBob Picco } 8128ae55c9SDave Hansen 82a3142c8eSYasunori Goto static int __meminit sparse_index_init(unsigned long section_nr, int nid) 8328ae55c9SDave Hansen { 8428ae55c9SDave Hansen unsigned long root = SECTION_NR_TO_ROOT(section_nr); 8528ae55c9SDave Hansen struct mem_section *section; 8628ae55c9SDave Hansen 87ba72b4c8SDan Williams /* 88ba72b4c8SDan Williams * An existing section is possible in the sub-section hotplug 89ba72b4c8SDan Williams * case. First hot-add instantiates, follow-on hot-add reuses 90ba72b4c8SDan Williams * the existing section. 91ba72b4c8SDan Williams * 92ba72b4c8SDan Williams * The mem_hotplug_lock resolves the apparent race below. 93ba72b4c8SDan Williams */ 9428ae55c9SDave Hansen if (mem_section[root]) 95ba72b4c8SDan Williams return 0; 9628ae55c9SDave Hansen 9728ae55c9SDave Hansen section = sparse_index_alloc(nid); 98af0cd5a7SWANG Cong if (!section) 99af0cd5a7SWANG Cong return -ENOMEM; 10028ae55c9SDave Hansen 10128ae55c9SDave Hansen mem_section[root] = section; 102c1c95183SGavin Shan 1039d1936cfSZhang Yanfei return 0; 10428ae55c9SDave Hansen } 10528ae55c9SDave Hansen #else /* !SPARSEMEM_EXTREME */ 10628ae55c9SDave Hansen static inline int sparse_index_init(unsigned long section_nr, int nid) 10728ae55c9SDave Hansen { 10828ae55c9SDave Hansen return 0; 10928ae55c9SDave Hansen } 11028ae55c9SDave Hansen #endif 11128ae55c9SDave Hansen 11230c253e6SAndy Whitcroft /* 11330c253e6SAndy Whitcroft * During early boot, before section_mem_map is used for an actual 11430c253e6SAndy Whitcroft * mem_map, we use section_mem_map to store the section's NUMA 11530c253e6SAndy Whitcroft * node. This keeps us from having to use another data structure. The 11630c253e6SAndy Whitcroft * node information is cleared just before we store the real mem_map. 11730c253e6SAndy Whitcroft */ 11830c253e6SAndy Whitcroft static inline unsigned long sparse_encode_early_nid(int nid) 11930c253e6SAndy Whitcroft { 120e0dbb2bcSMatthew Wilcox return ((unsigned long)nid << SECTION_NID_SHIFT); 12130c253e6SAndy Whitcroft } 12230c253e6SAndy Whitcroft 12330c253e6SAndy Whitcroft static inline int sparse_early_nid(struct mem_section *section) 12430c253e6SAndy Whitcroft { 12530c253e6SAndy Whitcroft return (section->section_mem_map >> SECTION_NID_SHIFT); 12630c253e6SAndy Whitcroft } 12730c253e6SAndy Whitcroft 1282dbb51c4SMel Gorman /* Validate the physical addressing limitations of the model */ 129c7878534SMiaohe Lin static void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, 1302dbb51c4SMel Gorman unsigned long *end_pfn) 131d41dee36SAndy Whitcroft { 1322dbb51c4SMel Gorman unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); 133d41dee36SAndy Whitcroft 134bead9a3aSIngo Molnar /* 135bead9a3aSIngo Molnar * Sanity checks - do not allow an architecture to pass 136bead9a3aSIngo Molnar * in larger pfns than the maximum scope of sparsemem: 137bead9a3aSIngo Molnar */ 1382dbb51c4SMel Gorman if (*start_pfn > max_sparsemem_pfn) { 1392dbb51c4SMel Gorman mminit_dprintk(MMINIT_WARNING, "pfnvalidation", 1402dbb51c4SMel Gorman "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", 1412dbb51c4SMel Gorman *start_pfn, *end_pfn, max_sparsemem_pfn); 1422dbb51c4SMel Gorman WARN_ON_ONCE(1); 1432dbb51c4SMel Gorman *start_pfn = max_sparsemem_pfn; 1442dbb51c4SMel Gorman *end_pfn = max_sparsemem_pfn; 145ef161a98SCyrill Gorcunov } else if (*end_pfn > max_sparsemem_pfn) { 1462dbb51c4SMel Gorman mminit_dprintk(MMINIT_WARNING, "pfnvalidation", 1472dbb51c4SMel Gorman "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", 1482dbb51c4SMel Gorman *start_pfn, *end_pfn, max_sparsemem_pfn); 1492dbb51c4SMel Gorman WARN_ON_ONCE(1); 1502dbb51c4SMel Gorman *end_pfn = max_sparsemem_pfn; 1512dbb51c4SMel Gorman } 1522dbb51c4SMel Gorman } 1532dbb51c4SMel Gorman 154c4e1be9eSDave Hansen /* 155c4e1be9eSDave Hansen * There are a number of times that we loop over NR_MEM_SECTIONS, 156c4e1be9eSDave Hansen * looking for section_present() on each. But, when we have very 157c4e1be9eSDave Hansen * large physical address spaces, NR_MEM_SECTIONS can also be 158c4e1be9eSDave Hansen * very large which makes the loops quite long. 159c4e1be9eSDave Hansen * 160c4e1be9eSDave Hansen * Keeping track of this gives us an easy way to break out of 161c4e1be9eSDave Hansen * those loops early. 162c4e1be9eSDave Hansen */ 1632491f0a2SDavid Hildenbrand unsigned long __highest_present_section_nr; 164a1bc561bSOhhoon Kwon static void __section_mark_present(struct mem_section *ms, 165a1bc561bSOhhoon Kwon unsigned long section_nr) 166c4e1be9eSDave Hansen { 167c4e1be9eSDave Hansen if (section_nr > __highest_present_section_nr) 168c4e1be9eSDave Hansen __highest_present_section_nr = section_nr; 169c4e1be9eSDave Hansen 170c4e1be9eSDave Hansen ms->section_mem_map |= SECTION_MARKED_PRESENT; 171c4e1be9eSDave Hansen } 172c4e1be9eSDave Hansen 173c4e1be9eSDave Hansen #define for_each_present_section_nr(start, section_nr) \ 174c4e1be9eSDave Hansen for (section_nr = next_present_section_nr(start-1); \ 175c200a711Sliuq section_nr != -1; \ 176c4e1be9eSDave Hansen section_nr = next_present_section_nr(section_nr)) 177c4e1be9eSDave Hansen 17885c77f79SPavel Tatashin static inline unsigned long first_present_section_nr(void) 17985c77f79SPavel Tatashin { 18085c77f79SPavel Tatashin return next_present_section_nr(-1); 18185c77f79SPavel Tatashin } 18285c77f79SPavel Tatashin 1830a9f9f62SBaoquan He #ifdef CONFIG_SPARSEMEM_VMEMMAP 184758b8db4SYi Wang static void subsection_mask_set(unsigned long *map, unsigned long pfn, 185f46edbd1SDan Williams unsigned long nr_pages) 186f46edbd1SDan Williams { 187f46edbd1SDan Williams int idx = subsection_map_index(pfn); 188f46edbd1SDan Williams int end = subsection_map_index(pfn + nr_pages - 1); 189f46edbd1SDan Williams 190f46edbd1SDan Williams bitmap_set(map, idx, end - idx + 1); 191f46edbd1SDan Williams } 192f46edbd1SDan Williams 193f46edbd1SDan Williams void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) 194f46edbd1SDan Williams { 195f46edbd1SDan Williams int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); 1969a845030SDan Williams unsigned long nr, start_sec = pfn_to_section_nr(pfn); 197f46edbd1SDan Williams 198f46edbd1SDan Williams if (!nr_pages) 199f46edbd1SDan Williams return; 200f46edbd1SDan Williams 2019a845030SDan Williams for (nr = start_sec; nr <= end_sec; nr++) { 202f46edbd1SDan Williams struct mem_section *ms; 203f46edbd1SDan Williams unsigned long pfns; 204f46edbd1SDan Williams 205f46edbd1SDan Williams pfns = min(nr_pages, PAGES_PER_SECTION 206f46edbd1SDan Williams - (pfn & ~PAGE_SECTION_MASK)); 2079a845030SDan Williams ms = __nr_to_section(nr); 208f46edbd1SDan Williams subsection_mask_set(ms->usage->subsection_map, pfn, pfns); 209f46edbd1SDan Williams 2109a845030SDan Williams pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr, 211f46edbd1SDan Williams pfns, subsection_map_index(pfn), 212f46edbd1SDan Williams subsection_map_index(pfn + pfns - 1)); 213f46edbd1SDan Williams 214f46edbd1SDan Williams pfn += pfns; 215f46edbd1SDan Williams nr_pages -= pfns; 216f46edbd1SDan Williams } 217f46edbd1SDan Williams } 2180a9f9f62SBaoquan He #else 2190a9f9f62SBaoquan He void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) 2200a9f9f62SBaoquan He { 2210a9f9f62SBaoquan He } 2220a9f9f62SBaoquan He #endif 223f46edbd1SDan Williams 2242dbb51c4SMel Gorman /* Record a memory area against a node. */ 225c89ab04fSMike Rapoport static void __init memory_present(int nid, unsigned long start, unsigned long end) 2262dbb51c4SMel Gorman { 2272dbb51c4SMel Gorman unsigned long pfn; 228bead9a3aSIngo Molnar 229629a359bSKirill A. Shutemov #ifdef CONFIG_SPARSEMEM_EXTREME 230629a359bSKirill A. Shutemov if (unlikely(!mem_section)) { 231629a359bSKirill A. Shutemov unsigned long size, align; 232629a359bSKirill A. Shutemov 233d09cfbbfSBaoquan He size = sizeof(struct mem_section *) * NR_SECTION_ROOTS; 234629a359bSKirill A. Shutemov align = 1 << (INTERNODE_CACHE_SHIFT); 235eb31d559SMike Rapoport mem_section = memblock_alloc(size, align); 2368a7f97b9SMike Rapoport if (!mem_section) 2378a7f97b9SMike Rapoport panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 2388a7f97b9SMike Rapoport __func__, size, align); 239629a359bSKirill A. Shutemov } 240629a359bSKirill A. Shutemov #endif 241629a359bSKirill A. Shutemov 242d41dee36SAndy Whitcroft start &= PAGE_SECTION_MASK; 2432dbb51c4SMel Gorman mminit_validate_memmodel_limits(&start, &end); 244d41dee36SAndy Whitcroft for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { 245d41dee36SAndy Whitcroft unsigned long section = pfn_to_section_nr(pfn); 246802f192eSBob Picco struct mem_section *ms; 247802f192eSBob Picco 248802f192eSBob Picco sparse_index_init(section, nid); 24985770ffeSAndy Whitcroft set_section_nid(section, nid); 250802f192eSBob Picco 251802f192eSBob Picco ms = __nr_to_section(section); 252c4e1be9eSDave Hansen if (!ms->section_mem_map) { 2532d070eabSMichal Hocko ms->section_mem_map = sparse_encode_early_nid(nid) | 2542d070eabSMichal Hocko SECTION_IS_ONLINE; 255a1bc561bSOhhoon Kwon __section_mark_present(ms, section); 256c4e1be9eSDave Hansen } 257d41dee36SAndy Whitcroft } 258d41dee36SAndy Whitcroft } 259d41dee36SAndy Whitcroft 260d41dee36SAndy Whitcroft /* 261c89ab04fSMike Rapoport * Mark all memblocks as present using memory_present(). 262c89ab04fSMike Rapoport * This is a convenience function that is useful to mark all of the systems 263c89ab04fSMike Rapoport * memory as present during initialization. 2649def36e0SLogan Gunthorpe */ 265c89ab04fSMike Rapoport static void __init memblocks_present(void) 2669def36e0SLogan Gunthorpe { 267c9118e6cSMike Rapoport unsigned long start, end; 268c9118e6cSMike Rapoport int i, nid; 2699def36e0SLogan Gunthorpe 270c9118e6cSMike Rapoport for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) 271c9118e6cSMike Rapoport memory_present(nid, start, end); 2729def36e0SLogan Gunthorpe } 2739def36e0SLogan Gunthorpe 2749def36e0SLogan Gunthorpe /* 27529751f69SAndy Whitcroft * Subtle, we encode the real pfn into the mem_map such that 27629751f69SAndy Whitcroft * the identity pfn - section_mem_map will return the actual 27729751f69SAndy Whitcroft * physical page frame number. 27829751f69SAndy Whitcroft */ 27929751f69SAndy Whitcroft static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) 28029751f69SAndy Whitcroft { 281def9b71eSPetr Tesarik unsigned long coded_mem_map = 282def9b71eSPetr Tesarik (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); 283ed7802ddSMuchun Song BUILD_BUG_ON(SECTION_MAP_LAST_BIT > PFN_SECTION_SHIFT); 284def9b71eSPetr Tesarik BUG_ON(coded_mem_map & ~SECTION_MAP_MASK); 285def9b71eSPetr Tesarik return coded_mem_map; 28629751f69SAndy Whitcroft } 28729751f69SAndy Whitcroft 2883a0aaefeSDavid Hildenbrand #ifdef CONFIG_MEMORY_HOTPLUG 28929751f69SAndy Whitcroft /* 290ea01ea93SBadari Pulavarty * Decode mem_map from the coded memmap 29129751f69SAndy Whitcroft */ 29229751f69SAndy Whitcroft struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) 29329751f69SAndy Whitcroft { 294ea01ea93SBadari Pulavarty /* mask off the extra low bits of information */ 295ea01ea93SBadari Pulavarty coded_mem_map &= SECTION_MAP_MASK; 29629751f69SAndy Whitcroft return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); 29729751f69SAndy Whitcroft } 2983a0aaefeSDavid Hildenbrand #endif /* CONFIG_MEMORY_HOTPLUG */ 29929751f69SAndy Whitcroft 3004e40987fSOscar Salvador static void __meminit sparse_init_one_section(struct mem_section *ms, 3015c0e3066SMel Gorman unsigned long pnum, struct page *mem_map, 302326e1b8fSDan Williams struct mem_section_usage *usage, unsigned long flags) 30329751f69SAndy Whitcroft { 30430c253e6SAndy Whitcroft ms->section_mem_map &= ~SECTION_MAP_MASK; 305326e1b8fSDan Williams ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) 306326e1b8fSDan Williams | SECTION_HAS_MEM_MAP | flags; 307f1eca35aSDan Williams ms->usage = usage; 30829751f69SAndy Whitcroft } 30929751f69SAndy Whitcroft 310f1eca35aSDan Williams static unsigned long usemap_size(void) 3115c0e3066SMel Gorman { 31260a7a88dSWei Yang return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long); 3135c0e3066SMel Gorman } 3145c0e3066SMel Gorman 315f1eca35aSDan Williams size_t mem_section_usage_size(void) 3165c0e3066SMel Gorman { 317f1eca35aSDan Williams return sizeof(struct mem_section_usage) + usemap_size(); 3185c0e3066SMel Gorman } 3195c0e3066SMel Gorman 3202e126aa2SMike Rapoport #ifdef CONFIG_MEMORY_HOTREMOVE 321ccbd6283SMiles Chen static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat) 322ccbd6283SMiles Chen { 323a9ee6cf5SMike Rapoport #ifndef CONFIG_NUMA 324bdbda735SMiles Chen VM_BUG_ON(pgdat != &contig_page_data); 325bdbda735SMiles Chen return __pa_symbol(&contig_page_data); 326ccbd6283SMiles Chen #else 327ccbd6283SMiles Chen return __pa(pgdat); 328ccbd6283SMiles Chen #endif 329ccbd6283SMiles Chen } 330ccbd6283SMiles Chen 331f1eca35aSDan Williams static struct mem_section_usage * __init 332a4322e1bSYinghai Lu sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 333238305bbSJohannes Weiner unsigned long size) 33448c90682SYasunori Goto { 335f1eca35aSDan Williams struct mem_section_usage *usage; 33699ab7b19SYinghai Lu unsigned long goal, limit; 33799ab7b19SYinghai Lu int nid; 33848c90682SYasunori Goto /* 33948c90682SYasunori Goto * A page may contain usemaps for other sections preventing the 34048c90682SYasunori Goto * page being freed and making a section unremovable while 341c800bcd5SLi Zhong * other sections referencing the usemap remain active. Similarly, 34248c90682SYasunori Goto * a pgdat can prevent a section being removed. If section A 34348c90682SYasunori Goto * contains a pgdat and section B contains the usemap, both 34448c90682SYasunori Goto * sections become inter-dependent. This allocates usemaps 34548c90682SYasunori Goto * from the same section as the pgdat where possible to avoid 34648c90682SYasunori Goto * this problem. 34748c90682SYasunori Goto */ 348ccbd6283SMiles Chen goal = pgdat_to_phys(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); 34999ab7b19SYinghai Lu limit = goal + (1UL << PA_SECTION_SHIFT); 35099ab7b19SYinghai Lu nid = early_pfn_to_nid(goal >> PAGE_SHIFT); 35199ab7b19SYinghai Lu again: 352f1eca35aSDan Williams usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid); 353f1eca35aSDan Williams if (!usage && limit) { 35499ab7b19SYinghai Lu limit = 0; 35599ab7b19SYinghai Lu goto again; 35699ab7b19SYinghai Lu } 357f1eca35aSDan Williams return usage; 35848c90682SYasunori Goto } 35948c90682SYasunori Goto 360f1eca35aSDan Williams static void __init check_usemap_section_nr(int nid, 361f1eca35aSDan Williams struct mem_section_usage *usage) 36248c90682SYasunori Goto { 36348c90682SYasunori Goto unsigned long usemap_snr, pgdat_snr; 36483e3c487SKirill A. Shutemov static unsigned long old_usemap_snr; 36583e3c487SKirill A. Shutemov static unsigned long old_pgdat_snr; 36648c90682SYasunori Goto struct pglist_data *pgdat = NODE_DATA(nid); 36748c90682SYasunori Goto int usemap_nid; 36848c90682SYasunori Goto 36983e3c487SKirill A. Shutemov /* First call */ 37083e3c487SKirill A. Shutemov if (!old_usemap_snr) { 37183e3c487SKirill A. Shutemov old_usemap_snr = NR_MEM_SECTIONS; 37283e3c487SKirill A. Shutemov old_pgdat_snr = NR_MEM_SECTIONS; 37383e3c487SKirill A. Shutemov } 37483e3c487SKirill A. Shutemov 375f1eca35aSDan Williams usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT); 376ccbd6283SMiles Chen pgdat_snr = pfn_to_section_nr(pgdat_to_phys(pgdat) >> PAGE_SHIFT); 37748c90682SYasunori Goto if (usemap_snr == pgdat_snr) 37848c90682SYasunori Goto return; 37948c90682SYasunori Goto 38048c90682SYasunori Goto if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) 38148c90682SYasunori Goto /* skip redundant message */ 38248c90682SYasunori Goto return; 38348c90682SYasunori Goto 38448c90682SYasunori Goto old_usemap_snr = usemap_snr; 38548c90682SYasunori Goto old_pgdat_snr = pgdat_snr; 38648c90682SYasunori Goto 38748c90682SYasunori Goto usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); 38848c90682SYasunori Goto if (usemap_nid != nid) { 3891170532bSJoe Perches pr_info("node %d must be removed before remove section %ld\n", 39048c90682SYasunori Goto nid, usemap_snr); 39148c90682SYasunori Goto return; 39248c90682SYasunori Goto } 39348c90682SYasunori Goto /* 39448c90682SYasunori Goto * There is a circular dependency. 39548c90682SYasunori Goto * Some platforms allow un-removable section because they will just 39648c90682SYasunori Goto * gather other removable sections for dynamic partitioning. 39748c90682SYasunori Goto * Just notify un-removable section's number here. 39848c90682SYasunori Goto */ 3991170532bSJoe Perches pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", 4001170532bSJoe Perches usemap_snr, pgdat_snr, nid); 40148c90682SYasunori Goto } 40248c90682SYasunori Goto #else 403f1eca35aSDan Williams static struct mem_section_usage * __init 404a4322e1bSYinghai Lu sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 405238305bbSJohannes Weiner unsigned long size) 40648c90682SYasunori Goto { 40726fb3daeSMike Rapoport return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id); 40848c90682SYasunori Goto } 40948c90682SYasunori Goto 410f1eca35aSDan Williams static void __init check_usemap_section_nr(int nid, 411f1eca35aSDan Williams struct mem_section_usage *usage) 41248c90682SYasunori Goto { 41348c90682SYasunori Goto } 41448c90682SYasunori Goto #endif /* CONFIG_MEMORY_HOTREMOVE */ 41548c90682SYasunori Goto 41635fd1eb1SPavel Tatashin #ifdef CONFIG_SPARSEMEM_VMEMMAP 417afda57bcSPavel Tatashin static unsigned long __init section_map_size(void) 41835fd1eb1SPavel Tatashin { 41935fd1eb1SPavel Tatashin return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); 42035fd1eb1SPavel Tatashin } 42135fd1eb1SPavel Tatashin 42235fd1eb1SPavel Tatashin #else 423afda57bcSPavel Tatashin static unsigned long __init section_map_size(void) 424e131c06bSPavel Tatashin { 425e131c06bSPavel Tatashin return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); 426e131c06bSPavel Tatashin } 427e131c06bSPavel Tatashin 428e9c0a3f0SDan Williams struct page __init *__populate_section_memmap(unsigned long pfn, 429e3246d8fSJoao Martins unsigned long nr_pages, int nid, struct vmem_altmap *altmap, 430e3246d8fSJoao Martins struct dev_pagemap *pgmap) 43129751f69SAndy Whitcroft { 432e131c06bSPavel Tatashin unsigned long size = section_map_size(); 433e131c06bSPavel Tatashin struct page *map = sparse_buffer_alloc(size); 4348a7f97b9SMike Rapoport phys_addr_t addr = __pa(MAX_DMA_ADDRESS); 43529751f69SAndy Whitcroft 436e131c06bSPavel Tatashin if (map) 437e131c06bSPavel Tatashin return map; 438e131c06bSPavel Tatashin 439c803b3c8SMike Rapoport map = memmap_alloc(size, size, addr, nid, false); 4408a7f97b9SMike Rapoport if (!map) 4418a7f97b9SMike Rapoport panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n", 4428a7f97b9SMike Rapoport __func__, size, PAGE_SIZE, nid, &addr); 4438a7f97b9SMike Rapoport 4448f6aac41SChristoph Lameter return map; 4458f6aac41SChristoph Lameter } 4468f6aac41SChristoph Lameter #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 4478f6aac41SChristoph Lameter 44835fd1eb1SPavel Tatashin static void *sparsemap_buf __meminitdata; 44935fd1eb1SPavel Tatashin static void *sparsemap_buf_end __meminitdata; 45035fd1eb1SPavel Tatashin 451ae831894SLecopzer Chen static inline void __meminit sparse_buffer_free(unsigned long size) 452ae831894SLecopzer Chen { 453ae831894SLecopzer Chen WARN_ON(!sparsemap_buf || size == 0); 4544421cca0SMike Rapoport memblock_free(sparsemap_buf, size); 455ae831894SLecopzer Chen } 456ae831894SLecopzer Chen 457afda57bcSPavel Tatashin static void __init sparse_buffer_init(unsigned long size, int nid) 45835fd1eb1SPavel Tatashin { 4598a7f97b9SMike Rapoport phys_addr_t addr = __pa(MAX_DMA_ADDRESS); 46035fd1eb1SPavel Tatashin WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ 46109dbcf42SMichal Hocko /* 46209dbcf42SMichal Hocko * Pre-allocated buffer is mainly used by __populate_section_memmap 46309dbcf42SMichal Hocko * and we want it to be properly aligned to the section size - this is 46409dbcf42SMichal Hocko * especially the case for VMEMMAP which maps memmap to PMDs 46509dbcf42SMichal Hocko */ 466c803b3c8SMike Rapoport sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true); 46735fd1eb1SPavel Tatashin sparsemap_buf_end = sparsemap_buf + size; 46835fd1eb1SPavel Tatashin } 46935fd1eb1SPavel Tatashin 470afda57bcSPavel Tatashin static void __init sparse_buffer_fini(void) 47135fd1eb1SPavel Tatashin { 47235fd1eb1SPavel Tatashin unsigned long size = sparsemap_buf_end - sparsemap_buf; 47335fd1eb1SPavel Tatashin 47435fd1eb1SPavel Tatashin if (sparsemap_buf && size > 0) 475ae831894SLecopzer Chen sparse_buffer_free(size); 47635fd1eb1SPavel Tatashin sparsemap_buf = NULL; 47735fd1eb1SPavel Tatashin } 47835fd1eb1SPavel Tatashin 47935fd1eb1SPavel Tatashin void * __meminit sparse_buffer_alloc(unsigned long size) 48035fd1eb1SPavel Tatashin { 48135fd1eb1SPavel Tatashin void *ptr = NULL; 48235fd1eb1SPavel Tatashin 48335fd1eb1SPavel Tatashin if (sparsemap_buf) { 484db57e98dSLecopzer Chen ptr = (void *) roundup((unsigned long)sparsemap_buf, size); 48535fd1eb1SPavel Tatashin if (ptr + size > sparsemap_buf_end) 48635fd1eb1SPavel Tatashin ptr = NULL; 487ae831894SLecopzer Chen else { 488ae831894SLecopzer Chen /* Free redundant aligned space */ 489ae831894SLecopzer Chen if ((unsigned long)(ptr - sparsemap_buf) > 0) 490ae831894SLecopzer Chen sparse_buffer_free((unsigned long)(ptr - sparsemap_buf)); 49135fd1eb1SPavel Tatashin sparsemap_buf = ptr + size; 49235fd1eb1SPavel Tatashin } 493ae831894SLecopzer Chen } 49435fd1eb1SPavel Tatashin return ptr; 49535fd1eb1SPavel Tatashin } 49635fd1eb1SPavel Tatashin 4973b32123dSGideon Israel Dsouza void __weak __meminit vmemmap_populate_print_last(void) 498c2b91e2eSYinghai Lu { 499c2b91e2eSYinghai Lu } 500a4322e1bSYinghai Lu 50185c77f79SPavel Tatashin /* 50285c77f79SPavel Tatashin * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end) 50385c77f79SPavel Tatashin * And number of present sections in this node is map_count. 50485c77f79SPavel Tatashin */ 50585c77f79SPavel Tatashin static void __init sparse_init_nid(int nid, unsigned long pnum_begin, 50685c77f79SPavel Tatashin unsigned long pnum_end, 50785c77f79SPavel Tatashin unsigned long map_count) 50885c77f79SPavel Tatashin { 509f1eca35aSDan Williams struct mem_section_usage *usage; 510f1eca35aSDan Williams unsigned long pnum; 51185c77f79SPavel Tatashin struct page *map; 51285c77f79SPavel Tatashin 513f1eca35aSDan Williams usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid), 514f1eca35aSDan Williams mem_section_usage_size() * map_count); 515f1eca35aSDan Williams if (!usage) { 51685c77f79SPavel Tatashin pr_err("%s: node[%d] usemap allocation failed", __func__, nid); 51785c77f79SPavel Tatashin goto failed; 51885c77f79SPavel Tatashin } 51985c77f79SPavel Tatashin sparse_buffer_init(map_count * section_map_size(), nid); 52085c77f79SPavel Tatashin for_each_present_section_nr(pnum_begin, pnum) { 521e9c0a3f0SDan Williams unsigned long pfn = section_nr_to_pfn(pnum); 522e9c0a3f0SDan Williams 52385c77f79SPavel Tatashin if (pnum >= pnum_end) 52485c77f79SPavel Tatashin break; 52585c77f79SPavel Tatashin 526e9c0a3f0SDan Williams map = __populate_section_memmap(pfn, PAGES_PER_SECTION, 527e3246d8fSJoao Martins nid, NULL, NULL); 52885c77f79SPavel Tatashin if (!map) { 52985c77f79SPavel Tatashin pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", 53085c77f79SPavel Tatashin __func__, nid); 53185c77f79SPavel Tatashin pnum_begin = pnum; 5322284f47fSWang Wensheng sparse_buffer_fini(); 53385c77f79SPavel Tatashin goto failed; 53485c77f79SPavel Tatashin } 535f1eca35aSDan Williams check_usemap_section_nr(nid, usage); 536326e1b8fSDan Williams sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage, 537326e1b8fSDan Williams SECTION_IS_EARLY); 538f1eca35aSDan Williams usage = (void *) usage + mem_section_usage_size(); 53985c77f79SPavel Tatashin } 54085c77f79SPavel Tatashin sparse_buffer_fini(); 54185c77f79SPavel Tatashin return; 54285c77f79SPavel Tatashin failed: 54385c77f79SPavel Tatashin /* We failed to allocate, mark all the following pnums as not present */ 54485c77f79SPavel Tatashin for_each_present_section_nr(pnum_begin, pnum) { 54585c77f79SPavel Tatashin struct mem_section *ms; 54685c77f79SPavel Tatashin 54785c77f79SPavel Tatashin if (pnum >= pnum_end) 54885c77f79SPavel Tatashin break; 54985c77f79SPavel Tatashin ms = __nr_to_section(pnum); 55085c77f79SPavel Tatashin ms->section_mem_map = 0; 55185c77f79SPavel Tatashin } 55285c77f79SPavel Tatashin } 55385c77f79SPavel Tatashin 55485c77f79SPavel Tatashin /* 55585c77f79SPavel Tatashin * Allocate the accumulated non-linear sections, allocate a mem_map 55685c77f79SPavel Tatashin * for each and record the physical to section mapping. 55785c77f79SPavel Tatashin */ 5582a3cb8baSPavel Tatashin void __init sparse_init(void) 55985c77f79SPavel Tatashin { 560c89ab04fSMike Rapoport unsigned long pnum_end, pnum_begin, map_count = 1; 561c89ab04fSMike Rapoport int nid_begin; 562c89ab04fSMike Rapoport 563c89ab04fSMike Rapoport memblocks_present(); 564c89ab04fSMike Rapoport 565c89ab04fSMike Rapoport pnum_begin = first_present_section_nr(); 566c89ab04fSMike Rapoport nid_begin = sparse_early_nid(__nr_to_section(pnum_begin)); 56785c77f79SPavel Tatashin 56885c77f79SPavel Tatashin /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ 56985c77f79SPavel Tatashin set_pageblock_order(); 57085c77f79SPavel Tatashin 57185c77f79SPavel Tatashin for_each_present_section_nr(pnum_begin + 1, pnum_end) { 57285c77f79SPavel Tatashin int nid = sparse_early_nid(__nr_to_section(pnum_end)); 57385c77f79SPavel Tatashin 57485c77f79SPavel Tatashin if (nid == nid_begin) { 57585c77f79SPavel Tatashin map_count++; 57685c77f79SPavel Tatashin continue; 57785c77f79SPavel Tatashin } 57885c77f79SPavel Tatashin /* Init node with sections in range [pnum_begin, pnum_end) */ 57985c77f79SPavel Tatashin sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); 58085c77f79SPavel Tatashin nid_begin = nid; 58185c77f79SPavel Tatashin pnum_begin = pnum_end; 58285c77f79SPavel Tatashin map_count = 1; 58385c77f79SPavel Tatashin } 58485c77f79SPavel Tatashin /* cover the last node */ 58585c77f79SPavel Tatashin sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); 58685c77f79SPavel Tatashin vmemmap_populate_print_last(); 58785c77f79SPavel Tatashin } 58885c77f79SPavel Tatashin 589193faea9SStephen Rothwell #ifdef CONFIG_MEMORY_HOTPLUG 5902d070eabSMichal Hocko 5912d070eabSMichal Hocko /* Mark all memory sections within the pfn range as online */ 5922d070eabSMichal Hocko void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) 5932d070eabSMichal Hocko { 5942d070eabSMichal Hocko unsigned long pfn; 5952d070eabSMichal Hocko 5962d070eabSMichal Hocko for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 597b4ccec41SMichal Hocko unsigned long section_nr = pfn_to_section_nr(pfn); 5982d070eabSMichal Hocko struct mem_section *ms; 5992d070eabSMichal Hocko 6002d070eabSMichal Hocko /* onlining code should never touch invalid ranges */ 6012d070eabSMichal Hocko if (WARN_ON(!valid_section_nr(section_nr))) 6022d070eabSMichal Hocko continue; 6032d070eabSMichal Hocko 6042d070eabSMichal Hocko ms = __nr_to_section(section_nr); 6052d070eabSMichal Hocko ms->section_mem_map |= SECTION_IS_ONLINE; 6062d070eabSMichal Hocko } 6072d070eabSMichal Hocko } 6082d070eabSMichal Hocko 6099b7ea46aSQian Cai /* Mark all memory sections within the pfn range as offline */ 6102d070eabSMichal Hocko void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) 6112d070eabSMichal Hocko { 6122d070eabSMichal Hocko unsigned long pfn; 6132d070eabSMichal Hocko 6142d070eabSMichal Hocko for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 61527227c73SPavel Tatashin unsigned long section_nr = pfn_to_section_nr(pfn); 6162d070eabSMichal Hocko struct mem_section *ms; 6172d070eabSMichal Hocko 6182d070eabSMichal Hocko /* 6192d070eabSMichal Hocko * TODO this needs some double checking. Offlining code makes 6202d070eabSMichal Hocko * sure to check pfn_valid but those checks might be just bogus 6212d070eabSMichal Hocko */ 6222d070eabSMichal Hocko if (WARN_ON(!valid_section_nr(section_nr))) 6232d070eabSMichal Hocko continue; 6242d070eabSMichal Hocko 6252d070eabSMichal Hocko ms = __nr_to_section(section_nr); 6262d070eabSMichal Hocko ms->section_mem_map &= ~SECTION_IS_ONLINE; 6272d070eabSMichal Hocko } 6282d070eabSMichal Hocko } 6292d070eabSMichal Hocko 63098f3cfc1SYasunori Goto #ifdef CONFIG_SPARSEMEM_VMEMMAP 631030eab4fSIlya Leoshkevich static struct page * __meminit populate_section_memmap(unsigned long pfn, 632e3246d8fSJoao Martins unsigned long nr_pages, int nid, struct vmem_altmap *altmap, 633e3246d8fSJoao Martins struct dev_pagemap *pgmap) 63498f3cfc1SYasunori Goto { 635e3246d8fSJoao Martins return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap); 63698f3cfc1SYasunori Goto } 637e9c0a3f0SDan Williams 638e9c0a3f0SDan Williams static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, 63924b6d416SChristoph Hellwig struct vmem_altmap *altmap) 64098f3cfc1SYasunori Goto { 641e9c0a3f0SDan Williams unsigned long start = (unsigned long) pfn_to_page(pfn); 642e9c0a3f0SDan Williams unsigned long end = start + nr_pages * sizeof(struct page); 6430aad818bSJohannes Weiner 64424b6d416SChristoph Hellwig vmemmap_free(start, end, altmap); 64598f3cfc1SYasunori Goto } 64681556b02SZhang Yanfei static void free_map_bootmem(struct page *memmap) 6470c0a4a51SYasunori Goto { 6480aad818bSJohannes Weiner unsigned long start = (unsigned long)memmap; 64981556b02SZhang Yanfei unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); 6500aad818bSJohannes Weiner 65124b6d416SChristoph Hellwig vmemmap_free(start, end, NULL); 6520c0a4a51SYasunori Goto } 6536ecb0fc6SBaoquan He 6546ecb0fc6SBaoquan He static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages) 6556ecb0fc6SBaoquan He { 6566ecb0fc6SBaoquan He DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; 6576ecb0fc6SBaoquan He DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 }; 6586ecb0fc6SBaoquan He struct mem_section *ms = __pfn_to_section(pfn); 6596ecb0fc6SBaoquan He unsigned long *subsection_map = ms->usage 6606ecb0fc6SBaoquan He ? &ms->usage->subsection_map[0] : NULL; 6616ecb0fc6SBaoquan He 6626ecb0fc6SBaoquan He subsection_mask_set(map, pfn, nr_pages); 6636ecb0fc6SBaoquan He if (subsection_map) 6646ecb0fc6SBaoquan He bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION); 6656ecb0fc6SBaoquan He 6666ecb0fc6SBaoquan He if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION), 6676ecb0fc6SBaoquan He "section already deactivated (%#lx + %ld)\n", 6686ecb0fc6SBaoquan He pfn, nr_pages)) 6696ecb0fc6SBaoquan He return -EINVAL; 6706ecb0fc6SBaoquan He 6716ecb0fc6SBaoquan He bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION); 6726ecb0fc6SBaoquan He return 0; 6736ecb0fc6SBaoquan He } 6746ecb0fc6SBaoquan He 6756ecb0fc6SBaoquan He static bool is_subsection_map_empty(struct mem_section *ms) 6766ecb0fc6SBaoquan He { 6776ecb0fc6SBaoquan He return bitmap_empty(&ms->usage->subsection_map[0], 6786ecb0fc6SBaoquan He SUBSECTIONS_PER_SECTION); 6796ecb0fc6SBaoquan He } 6806ecb0fc6SBaoquan He 6816ecb0fc6SBaoquan He static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) 6826ecb0fc6SBaoquan He { 6836ecb0fc6SBaoquan He struct mem_section *ms = __pfn_to_section(pfn); 6846ecb0fc6SBaoquan He DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; 6856ecb0fc6SBaoquan He unsigned long *subsection_map; 6866ecb0fc6SBaoquan He int rc = 0; 6876ecb0fc6SBaoquan He 6886ecb0fc6SBaoquan He subsection_mask_set(map, pfn, nr_pages); 6896ecb0fc6SBaoquan He 6906ecb0fc6SBaoquan He subsection_map = &ms->usage->subsection_map[0]; 6916ecb0fc6SBaoquan He 6926ecb0fc6SBaoquan He if (bitmap_empty(map, SUBSECTIONS_PER_SECTION)) 6936ecb0fc6SBaoquan He rc = -EINVAL; 6946ecb0fc6SBaoquan He else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION)) 6956ecb0fc6SBaoquan He rc = -EEXIST; 6966ecb0fc6SBaoquan He else 6976ecb0fc6SBaoquan He bitmap_or(subsection_map, map, subsection_map, 6986ecb0fc6SBaoquan He SUBSECTIONS_PER_SECTION); 6996ecb0fc6SBaoquan He 7006ecb0fc6SBaoquan He return rc; 7016ecb0fc6SBaoquan He } 70298f3cfc1SYasunori Goto #else 70352bb85d6SArnd Bergmann static struct page * __meminit populate_section_memmap(unsigned long pfn, 704e3246d8fSJoao Martins unsigned long nr_pages, int nid, struct vmem_altmap *altmap, 705e3246d8fSJoao Martins struct dev_pagemap *pgmap) 7060b0acbecSDave Hansen { 7074027149aSBaoquan He return kvmalloc_node(array_size(sizeof(struct page), 7084027149aSBaoquan He PAGES_PER_SECTION), GFP_KERNEL, nid); 7090b0acbecSDave Hansen } 7100b0acbecSDave Hansen 711e9c0a3f0SDan Williams static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, 7127b73d978SChristoph Hellwig struct vmem_altmap *altmap) 71398f3cfc1SYasunori Goto { 7143af776f6SBaoquan He kvfree(pfn_to_page(pfn)); 7150b0acbecSDave Hansen } 7160c0a4a51SYasunori Goto 71781556b02SZhang Yanfei static void free_map_bootmem(struct page *memmap) 7180c0a4a51SYasunori Goto { 7190c0a4a51SYasunori Goto unsigned long maps_section_nr, removing_section_nr, i; 72081556b02SZhang Yanfei unsigned long magic, nr_pages; 721ae64ffcaSJianguo Wu struct page *page = virt_to_page(memmap); 7220c0a4a51SYasunori Goto 72381556b02SZhang Yanfei nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) 72481556b02SZhang Yanfei >> PAGE_SHIFT; 72581556b02SZhang Yanfei 7260c0a4a51SYasunori Goto for (i = 0; i < nr_pages; i++, page++) { 727c5e97ed1SMatthew Wilcox (Oracle) magic = page->index; 7280c0a4a51SYasunori Goto 7290c0a4a51SYasunori Goto BUG_ON(magic == NODE_INFO); 7300c0a4a51SYasunori Goto 7310c0a4a51SYasunori Goto maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); 732857e522aSYasuaki Ishimatsu removing_section_nr = page_private(page); 7330c0a4a51SYasunori Goto 7340c0a4a51SYasunori Goto /* 7350c0a4a51SYasunori Goto * When this function is called, the removing section is 7360c0a4a51SYasunori Goto * logical offlined state. This means all pages are isolated 7370c0a4a51SYasunori Goto * from page allocator. If removing section's memmap is placed 7380c0a4a51SYasunori Goto * on the same section, it must not be freed. 7390c0a4a51SYasunori Goto * If it is freed, page allocator may allocate it which will 7400c0a4a51SYasunori Goto * be removed physically soon. 7410c0a4a51SYasunori Goto */ 7420c0a4a51SYasunori Goto if (maps_section_nr != removing_section_nr) 7430c0a4a51SYasunori Goto put_page_bootmem(page); 7440c0a4a51SYasunori Goto } 7450c0a4a51SYasunori Goto } 7460b0acbecSDave Hansen 7470a9f9f62SBaoquan He static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages) 7480a9f9f62SBaoquan He { 7490a9f9f62SBaoquan He return 0; 7500a9f9f62SBaoquan He } 7510a9f9f62SBaoquan He 7520a9f9f62SBaoquan He static bool is_subsection_map_empty(struct mem_section *ms) 7530a9f9f62SBaoquan He { 7540a9f9f62SBaoquan He return true; 7550a9f9f62SBaoquan He } 7566ecb0fc6SBaoquan He 7576ecb0fc6SBaoquan He static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) 7586ecb0fc6SBaoquan He { 7596ecb0fc6SBaoquan He return 0; 7606ecb0fc6SBaoquan He } 7616ecb0fc6SBaoquan He #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 76237bc1502SBaoquan He 76395a5a34dSBaoquan He /* 76495a5a34dSBaoquan He * To deactivate a memory region, there are 3 cases to handle across 76595a5a34dSBaoquan He * two configurations (SPARSEMEM_VMEMMAP={y,n}): 76695a5a34dSBaoquan He * 76795a5a34dSBaoquan He * 1. deactivation of a partial hot-added section (only possible in 76895a5a34dSBaoquan He * the SPARSEMEM_VMEMMAP=y case). 76995a5a34dSBaoquan He * a) section was present at memory init. 77095a5a34dSBaoquan He * b) section was hot-added post memory init. 77195a5a34dSBaoquan He * 2. deactivation of a complete hot-added section. 77295a5a34dSBaoquan He * 3. deactivation of a complete section from memory init. 77395a5a34dSBaoquan He * 77495a5a34dSBaoquan He * For 1, when subsection_map does not empty we will not be freeing the 77595a5a34dSBaoquan He * usage map, but still need to free the vmemmap range. 77695a5a34dSBaoquan He * 77795a5a34dSBaoquan He * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified 77895a5a34dSBaoquan He */ 77937bc1502SBaoquan He static void section_deactivate(unsigned long pfn, unsigned long nr_pages, 78037bc1502SBaoquan He struct vmem_altmap *altmap) 78137bc1502SBaoquan He { 78237bc1502SBaoquan He struct mem_section *ms = __pfn_to_section(pfn); 78337bc1502SBaoquan He bool section_is_early = early_section(ms); 78437bc1502SBaoquan He struct page *memmap = NULL; 78537bc1502SBaoquan He bool empty; 78637bc1502SBaoquan He 78737bc1502SBaoquan He if (clear_subsection_map(pfn, nr_pages)) 78837bc1502SBaoquan He return; 78995a5a34dSBaoquan He 79037bc1502SBaoquan He empty = is_subsection_map_empty(ms); 791d41e2f3bSBaoquan He if (empty) { 792ba72b4c8SDan Williams unsigned long section_nr = pfn_to_section_nr(pfn); 793ba72b4c8SDan Williams 7948068df3bSDavid Hildenbrand /* 7955ec8e8eaSCharan Teja Kalla * Mark the section invalid so that valid_section() 7965ec8e8eaSCharan Teja Kalla * return false. This prevents code from dereferencing 7975ec8e8eaSCharan Teja Kalla * ms->usage array. 7985ec8e8eaSCharan Teja Kalla */ 7995ec8e8eaSCharan Teja Kalla ms->section_mem_map &= ~SECTION_HAS_MEM_MAP; 8005ec8e8eaSCharan Teja Kalla 8015ec8e8eaSCharan Teja Kalla /* 8028068df3bSDavid Hildenbrand * When removing an early section, the usage map is kept (as the 8038068df3bSDavid Hildenbrand * usage maps of other sections fall into the same page). It 8048068df3bSDavid Hildenbrand * will be re-used when re-adding the section - which is then no 8058068df3bSDavid Hildenbrand * longer an early section. If the usage map is PageReserved, it 8068068df3bSDavid Hildenbrand * was allocated during boot. 8078068df3bSDavid Hildenbrand */ 8088068df3bSDavid Hildenbrand if (!PageReserved(virt_to_page(ms->usage))) { 8095ec8e8eaSCharan Teja Kalla kfree_rcu(ms->usage, rcu); 8105ec8e8eaSCharan Teja Kalla WRITE_ONCE(ms->usage, NULL); 811ba72b4c8SDan Williams } 812ba72b4c8SDan Williams memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); 813ba72b4c8SDan Williams } 814ba72b4c8SDan Williams 815ef69bc9fSWei Yang /* 816ef69bc9fSWei Yang * The memmap of early sections is always fully populated. See 817ef69bc9fSWei Yang * section_activate() and pfn_valid() . 818ef69bc9fSWei Yang */ 819ef69bc9fSWei Yang if (!section_is_early) 820ba72b4c8SDan Williams depopulate_section_memmap(pfn, nr_pages, altmap); 821ef69bc9fSWei Yang else if (memmap) 822ef69bc9fSWei Yang free_map_bootmem(memmap); 823d41e2f3bSBaoquan He 824d41e2f3bSBaoquan He if (empty) 825d41e2f3bSBaoquan He ms->section_mem_map = (unsigned long)NULL; 826ba72b4c8SDan Williams } 827ba72b4c8SDan Williams 8285d87255cSBaoquan He static struct page * __meminit section_activate(int nid, unsigned long pfn, 829e3246d8fSJoao Martins unsigned long nr_pages, struct vmem_altmap *altmap, 830e3246d8fSJoao Martins struct dev_pagemap *pgmap) 8315d87255cSBaoquan He { 8325d87255cSBaoquan He struct mem_section *ms = __pfn_to_section(pfn); 8335d87255cSBaoquan He struct mem_section_usage *usage = NULL; 8345d87255cSBaoquan He struct page *memmap; 835f0ca8c25SLi zeming int rc; 8365d87255cSBaoquan He 8375d87255cSBaoquan He if (!ms->usage) { 8385d87255cSBaoquan He usage = kzalloc(mem_section_usage_size(), GFP_KERNEL); 8395d87255cSBaoquan He if (!usage) 8405d87255cSBaoquan He return ERR_PTR(-ENOMEM); 8415d87255cSBaoquan He ms->usage = usage; 8425d87255cSBaoquan He } 8435d87255cSBaoquan He 8445d87255cSBaoquan He rc = fill_subsection_map(pfn, nr_pages); 845ba72b4c8SDan Williams if (rc) { 846ba72b4c8SDan Williams if (usage) 847ba72b4c8SDan Williams ms->usage = NULL; 848ba72b4c8SDan Williams kfree(usage); 849ba72b4c8SDan Williams return ERR_PTR(rc); 850ba72b4c8SDan Williams } 851ba72b4c8SDan Williams 852ba72b4c8SDan Williams /* 853ba72b4c8SDan Williams * The early init code does not consider partially populated 854ba72b4c8SDan Williams * initial sections, it simply assumes that memory will never be 855ba72b4c8SDan Williams * referenced. If we hot-add memory into such a section then we 856ba72b4c8SDan Williams * do not need to populate the memmap and can simply reuse what 857ba72b4c8SDan Williams * is already there. 858ba72b4c8SDan Williams */ 859ba72b4c8SDan Williams if (nr_pages < PAGES_PER_SECTION && early_section(ms)) 860ba72b4c8SDan Williams return pfn_to_page(pfn); 861ba72b4c8SDan Williams 862e3246d8fSJoao Martins memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap); 863ba72b4c8SDan Williams if (!memmap) { 864ba72b4c8SDan Williams section_deactivate(pfn, nr_pages, altmap); 865ba72b4c8SDan Williams return ERR_PTR(-ENOMEM); 866ba72b4c8SDan Williams } 867ba72b4c8SDan Williams 868ba72b4c8SDan Williams return memmap; 869ba72b4c8SDan Williams } 870ba72b4c8SDan Williams 8717567cfc5SBaoquan He /** 872ba72b4c8SDan Williams * sparse_add_section - add a memory section, or populate an existing one 8737567cfc5SBaoquan He * @nid: The node to add section on 8747567cfc5SBaoquan He * @start_pfn: start pfn of the memory range 875ba72b4c8SDan Williams * @nr_pages: number of pfns to add in the section 876e3246d8fSJoao Martins * @altmap: alternate pfns to allocate the memmap backing store 877e3246d8fSJoao Martins * @pgmap: alternate compound page geometry for devmap mappings 8787567cfc5SBaoquan He * 8797567cfc5SBaoquan He * This is only intended for hotplug. 8807567cfc5SBaoquan He * 88195a5a34dSBaoquan He * Note that only VMEMMAP supports sub-section aligned hotplug, 88295a5a34dSBaoquan He * the proper alignment and size are gated by check_pfn_span(). 88395a5a34dSBaoquan He * 88495a5a34dSBaoquan He * 8857567cfc5SBaoquan He * Return: 8867567cfc5SBaoquan He * * 0 - On success. 8877567cfc5SBaoquan He * * -EEXIST - Section has been present. 8887567cfc5SBaoquan He * * -ENOMEM - Out of memory. 889d41dee36SAndy Whitcroft */ 8907ea62160SDan Williams int __meminit sparse_add_section(int nid, unsigned long start_pfn, 891e3246d8fSJoao Martins unsigned long nr_pages, struct vmem_altmap *altmap, 892e3246d8fSJoao Martins struct dev_pagemap *pgmap) 89329751f69SAndy Whitcroft { 8940b0acbecSDave Hansen unsigned long section_nr = pfn_to_section_nr(start_pfn); 8950b0acbecSDave Hansen struct mem_section *ms; 8960b0acbecSDave Hansen struct page *memmap; 8970b0acbecSDave Hansen int ret; 89829751f69SAndy Whitcroft 8994e0d2e7eSWei Yang ret = sparse_index_init(section_nr, nid); 900ba72b4c8SDan Williams if (ret < 0) 901bbd06825SWANG Cong return ret; 90229751f69SAndy Whitcroft 903e3246d8fSJoao Martins memmap = section_activate(nid, start_pfn, nr_pages, altmap, pgmap); 904ba72b4c8SDan Williams if (IS_ERR(memmap)) 905ba72b4c8SDan Williams return PTR_ERR(memmap); 9065c0e3066SMel Gorman 907d0dc12e8SPavel Tatashin /* 908d0dc12e8SPavel Tatashin * Poison uninitialized struct pages in order to catch invalid flags 909d0dc12e8SPavel Tatashin * combinations. 910d0dc12e8SPavel Tatashin */ 911*c5f1e2d1SSumanth Korikkar if (!altmap || !altmap->inaccessible) 91218e19f19SWei Yang page_init_poison(memmap, sizeof(struct page) * nr_pages); 9133ac19f8eSWen Congyang 914c1cbc3eeSWei Yang ms = __nr_to_section(section_nr); 91526f26bedSWei Yang set_section_nid(section_nr, nid); 916a1bc561bSOhhoon Kwon __section_mark_present(ms, section_nr); 9170b0acbecSDave Hansen 918ba72b4c8SDan Williams /* Align memmap to section boundary in the subsection case */ 919ba72b4c8SDan Williams if (section_nr_to_pfn(section_nr) != start_pfn) 9204627d76dSWei Yang memmap = pfn_to_page(section_nr_to_pfn(section_nr)); 921ba72b4c8SDan Williams sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0); 922ba72b4c8SDan Williams 923ba72b4c8SDan Williams return 0; 924d41dee36SAndy Whitcroft } 925ea01ea93SBadari Pulavarty 926bd5f79abSYajun Deng void sparse_remove_section(unsigned long pfn, unsigned long nr_pages, 927b9bf8d34SDavid Hildenbrand struct vmem_altmap *altmap) 928ea01ea93SBadari Pulavarty { 929bd5f79abSYajun Deng struct mem_section *ms = __pfn_to_section(pfn); 930bd5f79abSYajun Deng 931bd5f79abSYajun Deng if (WARN_ON_ONCE(!valid_section(ms))) 932bd5f79abSYajun Deng return; 933bd5f79abSYajun Deng 934ba72b4c8SDan Williams section_deactivate(pfn, nr_pages, altmap); 935ea01ea93SBadari Pulavarty } 9364edd7cefSDavid Rientjes #endif /* CONFIG_MEMORY_HOTPLUG */ 937