1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2d41dee36SAndy Whitcroft /* 3d41dee36SAndy Whitcroft * sparse memory mappings. 4d41dee36SAndy Whitcroft */ 5d41dee36SAndy Whitcroft #include <linux/mm.h> 65a0e3ad6STejun Heo #include <linux/slab.h> 7d41dee36SAndy Whitcroft #include <linux/mmzone.h> 897ad1087SMike Rapoport #include <linux/memblock.h> 93b32123dSGideon Israel Dsouza #include <linux/compiler.h> 100b0acbecSDave Hansen #include <linux/highmem.h> 11b95f1b31SPaul Gortmaker #include <linux/export.h> 1228ae55c9SDave Hansen #include <linux/spinlock.h> 130b0acbecSDave Hansen #include <linux/vmalloc.h> 143b32123dSGideon Israel Dsouza 150c0a4a51SYasunori Goto #include "internal.h" 16d41dee36SAndy Whitcroft #include <asm/dma.h> 178f6aac41SChristoph Lameter #include <asm/pgalloc.h> 188f6aac41SChristoph Lameter #include <asm/pgtable.h> 19d41dee36SAndy Whitcroft 20d41dee36SAndy Whitcroft /* 21d41dee36SAndy Whitcroft * Permanent SPARSEMEM data: 22d41dee36SAndy Whitcroft * 23d41dee36SAndy Whitcroft * 1) mem_section - memory sections, mem_map's for valid memory 24d41dee36SAndy Whitcroft */ 253e347261SBob Picco #ifdef CONFIG_SPARSEMEM_EXTREME 2683e3c487SKirill A. Shutemov struct mem_section **mem_section; 273e347261SBob Picco #else 283e347261SBob Picco struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] 2922fc6eccSRavikiran G Thirumalai ____cacheline_internodealigned_in_smp; 303e347261SBob Picco #endif 313e347261SBob Picco EXPORT_SYMBOL(mem_section); 323e347261SBob Picco 3389689ae7SChristoph Lameter #ifdef NODE_NOT_IN_PAGE_FLAGS 3489689ae7SChristoph Lameter /* 3589689ae7SChristoph Lameter * If we did not store the node number in the page then we have to 3689689ae7SChristoph Lameter * do a lookup in the section_to_node_table in order to find which 3789689ae7SChristoph Lameter * node the page belongs to. 3889689ae7SChristoph Lameter */ 3989689ae7SChristoph Lameter #if MAX_NUMNODES <= 256 4089689ae7SChristoph Lameter static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; 4189689ae7SChristoph Lameter #else 4289689ae7SChristoph Lameter static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; 4389689ae7SChristoph Lameter #endif 4489689ae7SChristoph Lameter 4533dd4e0eSIan Campbell int page_to_nid(const struct page *page) 4689689ae7SChristoph Lameter { 4789689ae7SChristoph Lameter return section_to_node_table[page_to_section(page)]; 4889689ae7SChristoph Lameter } 4989689ae7SChristoph Lameter EXPORT_SYMBOL(page_to_nid); 5085770ffeSAndy Whitcroft 5185770ffeSAndy Whitcroft static void set_section_nid(unsigned long section_nr, int nid) 5285770ffeSAndy Whitcroft { 5385770ffeSAndy Whitcroft section_to_node_table[section_nr] = nid; 5485770ffeSAndy Whitcroft } 5585770ffeSAndy Whitcroft #else /* !NODE_NOT_IN_PAGE_FLAGS */ 5685770ffeSAndy Whitcroft static inline void set_section_nid(unsigned long section_nr, int nid) 5785770ffeSAndy Whitcroft { 5885770ffeSAndy Whitcroft } 5989689ae7SChristoph Lameter #endif 6089689ae7SChristoph Lameter 613e347261SBob Picco #ifdef CONFIG_SPARSEMEM_EXTREME 62bd721ea7SFabian Frederick static noinline struct mem_section __ref *sparse_index_alloc(int nid) 63802f192eSBob Picco { 6428ae55c9SDave Hansen struct mem_section *section = NULL; 6528ae55c9SDave Hansen unsigned long array_size = SECTIONS_PER_ROOT * 6628ae55c9SDave Hansen sizeof(struct mem_section); 67802f192eSBob Picco 688a7f97b9SMike Rapoport if (slab_is_available()) { 695b760e64SGavin Shan section = kzalloc_node(array_size, GFP_KERNEL, nid); 708a7f97b9SMike Rapoport } else { 717e1c4e27SMike Rapoport section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, 727e1c4e27SMike Rapoport nid); 738a7f97b9SMike Rapoport if (!section) 748a7f97b9SMike Rapoport panic("%s: Failed to allocate %lu bytes nid=%d\n", 758a7f97b9SMike Rapoport __func__, array_size, nid); 768a7f97b9SMike Rapoport } 773e347261SBob Picco 7828ae55c9SDave Hansen return section; 79802f192eSBob Picco } 8028ae55c9SDave Hansen 81a3142c8eSYasunori Goto static int __meminit sparse_index_init(unsigned long section_nr, int nid) 8228ae55c9SDave Hansen { 8328ae55c9SDave Hansen unsigned long root = SECTION_NR_TO_ROOT(section_nr); 8428ae55c9SDave Hansen struct mem_section *section; 8528ae55c9SDave Hansen 8628ae55c9SDave Hansen if (mem_section[root]) 8728ae55c9SDave Hansen return -EEXIST; 8828ae55c9SDave Hansen 8928ae55c9SDave Hansen section = sparse_index_alloc(nid); 90af0cd5a7SWANG Cong if (!section) 91af0cd5a7SWANG Cong return -ENOMEM; 9228ae55c9SDave Hansen 9328ae55c9SDave Hansen mem_section[root] = section; 94c1c95183SGavin Shan 959d1936cfSZhang Yanfei return 0; 9628ae55c9SDave Hansen } 9728ae55c9SDave Hansen #else /* !SPARSEMEM_EXTREME */ 9828ae55c9SDave Hansen static inline int sparse_index_init(unsigned long section_nr, int nid) 9928ae55c9SDave Hansen { 10028ae55c9SDave Hansen return 0; 10128ae55c9SDave Hansen } 10228ae55c9SDave Hansen #endif 10328ae55c9SDave Hansen 10491fd8b95SZhou Chengming #ifdef CONFIG_SPARSEMEM_EXTREME 1054ca644d9SDave Hansen int __section_nr(struct mem_section* ms) 1064ca644d9SDave Hansen { 1074ca644d9SDave Hansen unsigned long root_nr; 10883e3c487SKirill A. Shutemov struct mem_section *root = NULL; 1094ca644d9SDave Hansen 11012783b00SMike Kravetz for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { 11112783b00SMike Kravetz root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); 1124ca644d9SDave Hansen if (!root) 1134ca644d9SDave Hansen continue; 1144ca644d9SDave Hansen 1154ca644d9SDave Hansen if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) 1164ca644d9SDave Hansen break; 1174ca644d9SDave Hansen } 1184ca644d9SDave Hansen 11983e3c487SKirill A. Shutemov VM_BUG_ON(!root); 120db36a461SGavin Shan 1214ca644d9SDave Hansen return (root_nr * SECTIONS_PER_ROOT) + (ms - root); 1224ca644d9SDave Hansen } 12391fd8b95SZhou Chengming #else 12491fd8b95SZhou Chengming int __section_nr(struct mem_section* ms) 12591fd8b95SZhou Chengming { 12691fd8b95SZhou Chengming return (int)(ms - mem_section[0]); 12791fd8b95SZhou Chengming } 12891fd8b95SZhou Chengming #endif 1294ca644d9SDave Hansen 13030c253e6SAndy Whitcroft /* 13130c253e6SAndy Whitcroft * During early boot, before section_mem_map is used for an actual 13230c253e6SAndy Whitcroft * mem_map, we use section_mem_map to store the section's NUMA 13330c253e6SAndy Whitcroft * node. This keeps us from having to use another data structure. The 13430c253e6SAndy Whitcroft * node information is cleared just before we store the real mem_map. 13530c253e6SAndy Whitcroft */ 13630c253e6SAndy Whitcroft static inline unsigned long sparse_encode_early_nid(int nid) 13730c253e6SAndy Whitcroft { 13830c253e6SAndy Whitcroft return (nid << SECTION_NID_SHIFT); 13930c253e6SAndy Whitcroft } 14030c253e6SAndy Whitcroft 14130c253e6SAndy Whitcroft static inline int sparse_early_nid(struct mem_section *section) 14230c253e6SAndy Whitcroft { 14330c253e6SAndy Whitcroft return (section->section_mem_map >> SECTION_NID_SHIFT); 14430c253e6SAndy Whitcroft } 14530c253e6SAndy Whitcroft 1462dbb51c4SMel Gorman /* Validate the physical addressing limitations of the model */ 1472dbb51c4SMel Gorman void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, 1482dbb51c4SMel Gorman unsigned long *end_pfn) 149d41dee36SAndy Whitcroft { 1502dbb51c4SMel Gorman unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); 151d41dee36SAndy Whitcroft 152bead9a3aSIngo Molnar /* 153bead9a3aSIngo Molnar * Sanity checks - do not allow an architecture to pass 154bead9a3aSIngo Molnar * in larger pfns than the maximum scope of sparsemem: 155bead9a3aSIngo Molnar */ 1562dbb51c4SMel Gorman if (*start_pfn > max_sparsemem_pfn) { 1572dbb51c4SMel Gorman mminit_dprintk(MMINIT_WARNING, "pfnvalidation", 1582dbb51c4SMel Gorman "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", 1592dbb51c4SMel Gorman *start_pfn, *end_pfn, max_sparsemem_pfn); 1602dbb51c4SMel Gorman WARN_ON_ONCE(1); 1612dbb51c4SMel Gorman *start_pfn = max_sparsemem_pfn; 1622dbb51c4SMel Gorman *end_pfn = max_sparsemem_pfn; 163ef161a98SCyrill Gorcunov } else if (*end_pfn > max_sparsemem_pfn) { 1642dbb51c4SMel Gorman mminit_dprintk(MMINIT_WARNING, "pfnvalidation", 1652dbb51c4SMel Gorman "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", 1662dbb51c4SMel Gorman *start_pfn, *end_pfn, max_sparsemem_pfn); 1672dbb51c4SMel Gorman WARN_ON_ONCE(1); 1682dbb51c4SMel Gorman *end_pfn = max_sparsemem_pfn; 1692dbb51c4SMel Gorman } 1702dbb51c4SMel Gorman } 1712dbb51c4SMel Gorman 172c4e1be9eSDave Hansen /* 173c4e1be9eSDave Hansen * There are a number of times that we loop over NR_MEM_SECTIONS, 174c4e1be9eSDave Hansen * looking for section_present() on each. But, when we have very 175c4e1be9eSDave Hansen * large physical address spaces, NR_MEM_SECTIONS can also be 176c4e1be9eSDave Hansen * very large which makes the loops quite long. 177c4e1be9eSDave Hansen * 178c4e1be9eSDave Hansen * Keeping track of this gives us an easy way to break out of 179c4e1be9eSDave Hansen * those loops early. 180c4e1be9eSDave Hansen */ 181c4e1be9eSDave Hansen int __highest_present_section_nr; 182c4e1be9eSDave Hansen static void section_mark_present(struct mem_section *ms) 183c4e1be9eSDave Hansen { 184c4e1be9eSDave Hansen int section_nr = __section_nr(ms); 185c4e1be9eSDave Hansen 186c4e1be9eSDave Hansen if (section_nr > __highest_present_section_nr) 187c4e1be9eSDave Hansen __highest_present_section_nr = section_nr; 188c4e1be9eSDave Hansen 189c4e1be9eSDave Hansen ms->section_mem_map |= SECTION_MARKED_PRESENT; 190c4e1be9eSDave Hansen } 191c4e1be9eSDave Hansen 192c4e1be9eSDave Hansen static inline int next_present_section_nr(int section_nr) 193c4e1be9eSDave Hansen { 194c4e1be9eSDave Hansen do { 195c4e1be9eSDave Hansen section_nr++; 196c4e1be9eSDave Hansen if (present_section_nr(section_nr)) 197c4e1be9eSDave Hansen return section_nr; 198d538c164SWei Yang } while ((section_nr <= __highest_present_section_nr)); 199c4e1be9eSDave Hansen 200c4e1be9eSDave Hansen return -1; 201c4e1be9eSDave Hansen } 202c4e1be9eSDave Hansen #define for_each_present_section_nr(start, section_nr) \ 203c4e1be9eSDave Hansen for (section_nr = next_present_section_nr(start-1); \ 204d778015aSQian Cai ((section_nr != -1) && \ 205c4e1be9eSDave Hansen (section_nr <= __highest_present_section_nr)); \ 206c4e1be9eSDave Hansen section_nr = next_present_section_nr(section_nr)) 207c4e1be9eSDave Hansen 20885c77f79SPavel Tatashin static inline unsigned long first_present_section_nr(void) 20985c77f79SPavel Tatashin { 21085c77f79SPavel Tatashin return next_present_section_nr(-1); 21185c77f79SPavel Tatashin } 21285c77f79SPavel Tatashin 2132dbb51c4SMel Gorman /* Record a memory area against a node. */ 2142dbb51c4SMel Gorman void __init memory_present(int nid, unsigned long start, unsigned long end) 2152dbb51c4SMel Gorman { 2162dbb51c4SMel Gorman unsigned long pfn; 217bead9a3aSIngo Molnar 218629a359bSKirill A. Shutemov #ifdef CONFIG_SPARSEMEM_EXTREME 219629a359bSKirill A. Shutemov if (unlikely(!mem_section)) { 220629a359bSKirill A. Shutemov unsigned long size, align; 221629a359bSKirill A. Shutemov 222d09cfbbfSBaoquan He size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; 223629a359bSKirill A. Shutemov align = 1 << (INTERNODE_CACHE_SHIFT); 224eb31d559SMike Rapoport mem_section = memblock_alloc(size, align); 2258a7f97b9SMike Rapoport if (!mem_section) 2268a7f97b9SMike Rapoport panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 2278a7f97b9SMike Rapoport __func__, size, align); 228629a359bSKirill A. Shutemov } 229629a359bSKirill A. Shutemov #endif 230629a359bSKirill A. Shutemov 231d41dee36SAndy Whitcroft start &= PAGE_SECTION_MASK; 2322dbb51c4SMel Gorman mminit_validate_memmodel_limits(&start, &end); 233d41dee36SAndy Whitcroft for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { 234d41dee36SAndy Whitcroft unsigned long section = pfn_to_section_nr(pfn); 235802f192eSBob Picco struct mem_section *ms; 236802f192eSBob Picco 237802f192eSBob Picco sparse_index_init(section, nid); 23885770ffeSAndy Whitcroft set_section_nid(section, nid); 239802f192eSBob Picco 240802f192eSBob Picco ms = __nr_to_section(section); 241c4e1be9eSDave Hansen if (!ms->section_mem_map) { 2422d070eabSMichal Hocko ms->section_mem_map = sparse_encode_early_nid(nid) | 2432d070eabSMichal Hocko SECTION_IS_ONLINE; 244c4e1be9eSDave Hansen section_mark_present(ms); 245c4e1be9eSDave Hansen } 246d41dee36SAndy Whitcroft } 247d41dee36SAndy Whitcroft } 248d41dee36SAndy Whitcroft 249d41dee36SAndy Whitcroft /* 2509def36e0SLogan Gunthorpe * Mark all memblocks as present using memory_present(). This is a 2519def36e0SLogan Gunthorpe * convienence function that is useful for a number of arches 2529def36e0SLogan Gunthorpe * to mark all of the systems memory as present during initialization. 2539def36e0SLogan Gunthorpe */ 2549def36e0SLogan Gunthorpe void __init memblocks_present(void) 2559def36e0SLogan Gunthorpe { 2569def36e0SLogan Gunthorpe struct memblock_region *reg; 2579def36e0SLogan Gunthorpe 2589def36e0SLogan Gunthorpe for_each_memblock(memory, reg) { 2599def36e0SLogan Gunthorpe memory_present(memblock_get_region_node(reg), 2609def36e0SLogan Gunthorpe memblock_region_memory_base_pfn(reg), 2619def36e0SLogan Gunthorpe memblock_region_memory_end_pfn(reg)); 2629def36e0SLogan Gunthorpe } 2639def36e0SLogan Gunthorpe } 2649def36e0SLogan Gunthorpe 2659def36e0SLogan Gunthorpe /* 26629751f69SAndy Whitcroft * Subtle, we encode the real pfn into the mem_map such that 26729751f69SAndy Whitcroft * the identity pfn - section_mem_map will return the actual 26829751f69SAndy Whitcroft * physical page frame number. 26929751f69SAndy Whitcroft */ 27029751f69SAndy Whitcroft static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) 27129751f69SAndy Whitcroft { 272def9b71eSPetr Tesarik unsigned long coded_mem_map = 273def9b71eSPetr Tesarik (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); 274def9b71eSPetr Tesarik BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT)); 275def9b71eSPetr Tesarik BUG_ON(coded_mem_map & ~SECTION_MAP_MASK); 276def9b71eSPetr Tesarik return coded_mem_map; 27729751f69SAndy Whitcroft } 27829751f69SAndy Whitcroft 27929751f69SAndy Whitcroft /* 280ea01ea93SBadari Pulavarty * Decode mem_map from the coded memmap 28129751f69SAndy Whitcroft */ 28229751f69SAndy Whitcroft struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) 28329751f69SAndy Whitcroft { 284ea01ea93SBadari Pulavarty /* mask off the extra low bits of information */ 285ea01ea93SBadari Pulavarty coded_mem_map &= SECTION_MAP_MASK; 28629751f69SAndy Whitcroft return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); 28729751f69SAndy Whitcroft } 28829751f69SAndy Whitcroft 2894e40987fSOscar Salvador static void __meminit sparse_init_one_section(struct mem_section *ms, 2905c0e3066SMel Gorman unsigned long pnum, struct page *mem_map, 2915c0e3066SMel Gorman unsigned long *pageblock_bitmap) 29229751f69SAndy Whitcroft { 29330c253e6SAndy Whitcroft ms->section_mem_map &= ~SECTION_MAP_MASK; 294540557b9SAndy Whitcroft ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | 295540557b9SAndy Whitcroft SECTION_HAS_MEM_MAP; 2965c0e3066SMel Gorman ms->pageblock_flags = pageblock_bitmap; 29729751f69SAndy Whitcroft } 29829751f69SAndy Whitcroft 29904753278SYasunori Goto unsigned long usemap_size(void) 3005c0e3066SMel Gorman { 30160a7a88dSWei Yang return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long); 3025c0e3066SMel Gorman } 3035c0e3066SMel Gorman 3045c0e3066SMel Gorman #ifdef CONFIG_MEMORY_HOTPLUG 3055c0e3066SMel Gorman static unsigned long *__kmalloc_section_usemap(void) 3065c0e3066SMel Gorman { 3075c0e3066SMel Gorman return kmalloc(usemap_size(), GFP_KERNEL); 3085c0e3066SMel Gorman } 3095c0e3066SMel Gorman #endif /* CONFIG_MEMORY_HOTPLUG */ 3105c0e3066SMel Gorman 31148c90682SYasunori Goto #ifdef CONFIG_MEMORY_HOTREMOVE 31248c90682SYasunori Goto static unsigned long * __init 313a4322e1bSYinghai Lu sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 314238305bbSJohannes Weiner unsigned long size) 31548c90682SYasunori Goto { 31699ab7b19SYinghai Lu unsigned long goal, limit; 31799ab7b19SYinghai Lu unsigned long *p; 31899ab7b19SYinghai Lu int nid; 31948c90682SYasunori Goto /* 32048c90682SYasunori Goto * A page may contain usemaps for other sections preventing the 32148c90682SYasunori Goto * page being freed and making a section unremovable while 322c800bcd5SLi Zhong * other sections referencing the usemap remain active. Similarly, 32348c90682SYasunori Goto * a pgdat can prevent a section being removed. If section A 32448c90682SYasunori Goto * contains a pgdat and section B contains the usemap, both 32548c90682SYasunori Goto * sections become inter-dependent. This allocates usemaps 32648c90682SYasunori Goto * from the same section as the pgdat where possible to avoid 32748c90682SYasunori Goto * this problem. 32848c90682SYasunori Goto */ 32907b4e2bcSYinghai Lu goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); 33099ab7b19SYinghai Lu limit = goal + (1UL << PA_SECTION_SHIFT); 33199ab7b19SYinghai Lu nid = early_pfn_to_nid(goal >> PAGE_SHIFT); 33299ab7b19SYinghai Lu again: 33326fb3daeSMike Rapoport p = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid); 33499ab7b19SYinghai Lu if (!p && limit) { 33599ab7b19SYinghai Lu limit = 0; 33699ab7b19SYinghai Lu goto again; 33799ab7b19SYinghai Lu } 33899ab7b19SYinghai Lu return p; 33948c90682SYasunori Goto } 34048c90682SYasunori Goto 34148c90682SYasunori Goto static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 34248c90682SYasunori Goto { 34348c90682SYasunori Goto unsigned long usemap_snr, pgdat_snr; 34483e3c487SKirill A. Shutemov static unsigned long old_usemap_snr; 34583e3c487SKirill A. Shutemov static unsigned long old_pgdat_snr; 34648c90682SYasunori Goto struct pglist_data *pgdat = NODE_DATA(nid); 34748c90682SYasunori Goto int usemap_nid; 34848c90682SYasunori Goto 34983e3c487SKirill A. Shutemov /* First call */ 35083e3c487SKirill A. Shutemov if (!old_usemap_snr) { 35183e3c487SKirill A. Shutemov old_usemap_snr = NR_MEM_SECTIONS; 35283e3c487SKirill A. Shutemov old_pgdat_snr = NR_MEM_SECTIONS; 35383e3c487SKirill A. Shutemov } 35483e3c487SKirill A. Shutemov 35548c90682SYasunori Goto usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); 35648c90682SYasunori Goto pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); 35748c90682SYasunori Goto if (usemap_snr == pgdat_snr) 35848c90682SYasunori Goto return; 35948c90682SYasunori Goto 36048c90682SYasunori Goto if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) 36148c90682SYasunori Goto /* skip redundant message */ 36248c90682SYasunori Goto return; 36348c90682SYasunori Goto 36448c90682SYasunori Goto old_usemap_snr = usemap_snr; 36548c90682SYasunori Goto old_pgdat_snr = pgdat_snr; 36648c90682SYasunori Goto 36748c90682SYasunori Goto usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); 36848c90682SYasunori Goto if (usemap_nid != nid) { 3691170532bSJoe Perches pr_info("node %d must be removed before remove section %ld\n", 37048c90682SYasunori Goto nid, usemap_snr); 37148c90682SYasunori Goto return; 37248c90682SYasunori Goto } 37348c90682SYasunori Goto /* 37448c90682SYasunori Goto * There is a circular dependency. 37548c90682SYasunori Goto * Some platforms allow un-removable section because they will just 37648c90682SYasunori Goto * gather other removable sections for dynamic partitioning. 37748c90682SYasunori Goto * Just notify un-removable section's number here. 37848c90682SYasunori Goto */ 3791170532bSJoe Perches pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", 3801170532bSJoe Perches usemap_snr, pgdat_snr, nid); 38148c90682SYasunori Goto } 38248c90682SYasunori Goto #else 38348c90682SYasunori Goto static unsigned long * __init 384a4322e1bSYinghai Lu sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 385238305bbSJohannes Weiner unsigned long size) 38648c90682SYasunori Goto { 38726fb3daeSMike Rapoport return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id); 38848c90682SYasunori Goto } 38948c90682SYasunori Goto 39048c90682SYasunori Goto static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 39148c90682SYasunori Goto { 39248c90682SYasunori Goto } 39348c90682SYasunori Goto #endif /* CONFIG_MEMORY_HOTREMOVE */ 39448c90682SYasunori Goto 39535fd1eb1SPavel Tatashin #ifdef CONFIG_SPARSEMEM_VMEMMAP 396afda57bcSPavel Tatashin static unsigned long __init section_map_size(void) 39735fd1eb1SPavel Tatashin { 39835fd1eb1SPavel Tatashin return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); 39935fd1eb1SPavel Tatashin } 40035fd1eb1SPavel Tatashin 40135fd1eb1SPavel Tatashin #else 402afda57bcSPavel Tatashin static unsigned long __init section_map_size(void) 403e131c06bSPavel Tatashin { 404e131c06bSPavel Tatashin return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); 405e131c06bSPavel Tatashin } 406e131c06bSPavel Tatashin 4077b73d978SChristoph Hellwig struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid, 4087b73d978SChristoph Hellwig struct vmem_altmap *altmap) 40929751f69SAndy Whitcroft { 410e131c06bSPavel Tatashin unsigned long size = section_map_size(); 411e131c06bSPavel Tatashin struct page *map = sparse_buffer_alloc(size); 4128a7f97b9SMike Rapoport phys_addr_t addr = __pa(MAX_DMA_ADDRESS); 41329751f69SAndy Whitcroft 414e131c06bSPavel Tatashin if (map) 415e131c06bSPavel Tatashin return map; 416e131c06bSPavel Tatashin 417eb31d559SMike Rapoport map = memblock_alloc_try_nid(size, 4188a7f97b9SMike Rapoport PAGE_SIZE, addr, 41997ad1087SMike Rapoport MEMBLOCK_ALLOC_ACCESSIBLE, nid); 4208a7f97b9SMike Rapoport if (!map) 4218a7f97b9SMike Rapoport panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n", 4228a7f97b9SMike Rapoport __func__, size, PAGE_SIZE, nid, &addr); 4238a7f97b9SMike Rapoport 4248f6aac41SChristoph Lameter return map; 4258f6aac41SChristoph Lameter } 4268f6aac41SChristoph Lameter #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 4278f6aac41SChristoph Lameter 42835fd1eb1SPavel Tatashin static void *sparsemap_buf __meminitdata; 42935fd1eb1SPavel Tatashin static void *sparsemap_buf_end __meminitdata; 43035fd1eb1SPavel Tatashin 431afda57bcSPavel Tatashin static void __init sparse_buffer_init(unsigned long size, int nid) 43235fd1eb1SPavel Tatashin { 4338a7f97b9SMike Rapoport phys_addr_t addr = __pa(MAX_DMA_ADDRESS); 43435fd1eb1SPavel Tatashin WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ 43535fd1eb1SPavel Tatashin sparsemap_buf = 436eb31d559SMike Rapoport memblock_alloc_try_nid_raw(size, PAGE_SIZE, 4378a7f97b9SMike Rapoport addr, 43897ad1087SMike Rapoport MEMBLOCK_ALLOC_ACCESSIBLE, nid); 43935fd1eb1SPavel Tatashin sparsemap_buf_end = sparsemap_buf + size; 44035fd1eb1SPavel Tatashin } 44135fd1eb1SPavel Tatashin 442afda57bcSPavel Tatashin static void __init sparse_buffer_fini(void) 44335fd1eb1SPavel Tatashin { 44435fd1eb1SPavel Tatashin unsigned long size = sparsemap_buf_end - sparsemap_buf; 44535fd1eb1SPavel Tatashin 44635fd1eb1SPavel Tatashin if (sparsemap_buf && size > 0) 44735fd1eb1SPavel Tatashin memblock_free_early(__pa(sparsemap_buf), size); 44835fd1eb1SPavel Tatashin sparsemap_buf = NULL; 44935fd1eb1SPavel Tatashin } 45035fd1eb1SPavel Tatashin 45135fd1eb1SPavel Tatashin void * __meminit sparse_buffer_alloc(unsigned long size) 45235fd1eb1SPavel Tatashin { 45335fd1eb1SPavel Tatashin void *ptr = NULL; 45435fd1eb1SPavel Tatashin 45535fd1eb1SPavel Tatashin if (sparsemap_buf) { 45635fd1eb1SPavel Tatashin ptr = PTR_ALIGN(sparsemap_buf, size); 45735fd1eb1SPavel Tatashin if (ptr + size > sparsemap_buf_end) 45835fd1eb1SPavel Tatashin ptr = NULL; 45935fd1eb1SPavel Tatashin else 46035fd1eb1SPavel Tatashin sparsemap_buf = ptr + size; 46135fd1eb1SPavel Tatashin } 46235fd1eb1SPavel Tatashin return ptr; 46335fd1eb1SPavel Tatashin } 46435fd1eb1SPavel Tatashin 4653b32123dSGideon Israel Dsouza void __weak __meminit vmemmap_populate_print_last(void) 466c2b91e2eSYinghai Lu { 467c2b91e2eSYinghai Lu } 468a4322e1bSYinghai Lu 46985c77f79SPavel Tatashin /* 47085c77f79SPavel Tatashin * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end) 47185c77f79SPavel Tatashin * And number of present sections in this node is map_count. 47285c77f79SPavel Tatashin */ 47385c77f79SPavel Tatashin static void __init sparse_init_nid(int nid, unsigned long pnum_begin, 47485c77f79SPavel Tatashin unsigned long pnum_end, 47585c77f79SPavel Tatashin unsigned long map_count) 47685c77f79SPavel Tatashin { 47785c77f79SPavel Tatashin unsigned long pnum, usemap_longs, *usemap; 47885c77f79SPavel Tatashin struct page *map; 47985c77f79SPavel Tatashin 48085c77f79SPavel Tatashin usemap_longs = BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS); 48185c77f79SPavel Tatashin usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid), 48285c77f79SPavel Tatashin usemap_size() * 48385c77f79SPavel Tatashin map_count); 48485c77f79SPavel Tatashin if (!usemap) { 48585c77f79SPavel Tatashin pr_err("%s: node[%d] usemap allocation failed", __func__, nid); 48685c77f79SPavel Tatashin goto failed; 48785c77f79SPavel Tatashin } 48885c77f79SPavel Tatashin sparse_buffer_init(map_count * section_map_size(), nid); 48985c77f79SPavel Tatashin for_each_present_section_nr(pnum_begin, pnum) { 49085c77f79SPavel Tatashin if (pnum >= pnum_end) 49185c77f79SPavel Tatashin break; 49285c77f79SPavel Tatashin 49385c77f79SPavel Tatashin map = sparse_mem_map_populate(pnum, nid, NULL); 49485c77f79SPavel Tatashin if (!map) { 49585c77f79SPavel Tatashin pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", 49685c77f79SPavel Tatashin __func__, nid); 49785c77f79SPavel Tatashin pnum_begin = pnum; 49885c77f79SPavel Tatashin goto failed; 49985c77f79SPavel Tatashin } 50085c77f79SPavel Tatashin check_usemap_section_nr(nid, usemap); 50185c77f79SPavel Tatashin sparse_init_one_section(__nr_to_section(pnum), pnum, map, usemap); 50285c77f79SPavel Tatashin usemap += usemap_longs; 50385c77f79SPavel Tatashin } 50485c77f79SPavel Tatashin sparse_buffer_fini(); 50585c77f79SPavel Tatashin return; 50685c77f79SPavel Tatashin failed: 50785c77f79SPavel Tatashin /* We failed to allocate, mark all the following pnums as not present */ 50885c77f79SPavel Tatashin for_each_present_section_nr(pnum_begin, pnum) { 50985c77f79SPavel Tatashin struct mem_section *ms; 51085c77f79SPavel Tatashin 51185c77f79SPavel Tatashin if (pnum >= pnum_end) 51285c77f79SPavel Tatashin break; 51385c77f79SPavel Tatashin ms = __nr_to_section(pnum); 51485c77f79SPavel Tatashin ms->section_mem_map = 0; 51585c77f79SPavel Tatashin } 51685c77f79SPavel Tatashin } 51785c77f79SPavel Tatashin 51885c77f79SPavel Tatashin /* 51985c77f79SPavel Tatashin * Allocate the accumulated non-linear sections, allocate a mem_map 52085c77f79SPavel Tatashin * for each and record the physical to section mapping. 52185c77f79SPavel Tatashin */ 5222a3cb8baSPavel Tatashin void __init sparse_init(void) 52385c77f79SPavel Tatashin { 52485c77f79SPavel Tatashin unsigned long pnum_begin = first_present_section_nr(); 52585c77f79SPavel Tatashin int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin)); 52685c77f79SPavel Tatashin unsigned long pnum_end, map_count = 1; 52785c77f79SPavel Tatashin 52885c77f79SPavel Tatashin /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ 52985c77f79SPavel Tatashin set_pageblock_order(); 53085c77f79SPavel Tatashin 53185c77f79SPavel Tatashin for_each_present_section_nr(pnum_begin + 1, pnum_end) { 53285c77f79SPavel Tatashin int nid = sparse_early_nid(__nr_to_section(pnum_end)); 53385c77f79SPavel Tatashin 53485c77f79SPavel Tatashin if (nid == nid_begin) { 53585c77f79SPavel Tatashin map_count++; 53685c77f79SPavel Tatashin continue; 53785c77f79SPavel Tatashin } 53885c77f79SPavel Tatashin /* Init node with sections in range [pnum_begin, pnum_end) */ 53985c77f79SPavel Tatashin sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); 54085c77f79SPavel Tatashin nid_begin = nid; 54185c77f79SPavel Tatashin pnum_begin = pnum_end; 54285c77f79SPavel Tatashin map_count = 1; 54385c77f79SPavel Tatashin } 54485c77f79SPavel Tatashin /* cover the last node */ 54585c77f79SPavel Tatashin sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); 54685c77f79SPavel Tatashin vmemmap_populate_print_last(); 54785c77f79SPavel Tatashin } 54885c77f79SPavel Tatashin 549193faea9SStephen Rothwell #ifdef CONFIG_MEMORY_HOTPLUG 5502d070eabSMichal Hocko 5512d070eabSMichal Hocko /* Mark all memory sections within the pfn range as online */ 5522d070eabSMichal Hocko void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) 5532d070eabSMichal Hocko { 5542d070eabSMichal Hocko unsigned long pfn; 5552d070eabSMichal Hocko 5562d070eabSMichal Hocko for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 557b4ccec41SMichal Hocko unsigned long section_nr = pfn_to_section_nr(pfn); 5582d070eabSMichal Hocko struct mem_section *ms; 5592d070eabSMichal Hocko 5602d070eabSMichal Hocko /* onlining code should never touch invalid ranges */ 5612d070eabSMichal Hocko if (WARN_ON(!valid_section_nr(section_nr))) 5622d070eabSMichal Hocko continue; 5632d070eabSMichal Hocko 5642d070eabSMichal Hocko ms = __nr_to_section(section_nr); 5652d070eabSMichal Hocko ms->section_mem_map |= SECTION_IS_ONLINE; 5662d070eabSMichal Hocko } 5672d070eabSMichal Hocko } 5682d070eabSMichal Hocko 5692d070eabSMichal Hocko #ifdef CONFIG_MEMORY_HOTREMOVE 5709b7ea46aSQian Cai /* Mark all memory sections within the pfn range as offline */ 5712d070eabSMichal Hocko void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) 5722d070eabSMichal Hocko { 5732d070eabSMichal Hocko unsigned long pfn; 5742d070eabSMichal Hocko 5752d070eabSMichal Hocko for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 57627227c73SPavel Tatashin unsigned long section_nr = pfn_to_section_nr(pfn); 5772d070eabSMichal Hocko struct mem_section *ms; 5782d070eabSMichal Hocko 5792d070eabSMichal Hocko /* 5802d070eabSMichal Hocko * TODO this needs some double checking. Offlining code makes 5812d070eabSMichal Hocko * sure to check pfn_valid but those checks might be just bogus 5822d070eabSMichal Hocko */ 5832d070eabSMichal Hocko if (WARN_ON(!valid_section_nr(section_nr))) 5842d070eabSMichal Hocko continue; 5852d070eabSMichal Hocko 5862d070eabSMichal Hocko ms = __nr_to_section(section_nr); 5872d070eabSMichal Hocko ms->section_mem_map &= ~SECTION_IS_ONLINE; 5882d070eabSMichal Hocko } 5892d070eabSMichal Hocko } 5902d070eabSMichal Hocko #endif 5912d070eabSMichal Hocko 59298f3cfc1SYasunori Goto #ifdef CONFIG_SPARSEMEM_VMEMMAP 5937b73d978SChristoph Hellwig static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, 5947b73d978SChristoph Hellwig struct vmem_altmap *altmap) 59598f3cfc1SYasunori Goto { 59698f3cfc1SYasunori Goto /* This will make the necessary allocations eventually. */ 5977b73d978SChristoph Hellwig return sparse_mem_map_populate(pnum, nid, altmap); 59898f3cfc1SYasunori Goto } 59924b6d416SChristoph Hellwig static void __kfree_section_memmap(struct page *memmap, 60024b6d416SChristoph Hellwig struct vmem_altmap *altmap) 60198f3cfc1SYasunori Goto { 6020aad818bSJohannes Weiner unsigned long start = (unsigned long)memmap; 60385b35feaSZhang Yanfei unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); 6040aad818bSJohannes Weiner 60524b6d416SChristoph Hellwig vmemmap_free(start, end, altmap); 60698f3cfc1SYasunori Goto } 60781556b02SZhang Yanfei static void free_map_bootmem(struct page *memmap) 6080c0a4a51SYasunori Goto { 6090aad818bSJohannes Weiner unsigned long start = (unsigned long)memmap; 61081556b02SZhang Yanfei unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); 6110aad818bSJohannes Weiner 61224b6d416SChristoph Hellwig vmemmap_free(start, end, NULL); 6130c0a4a51SYasunori Goto } 61498f3cfc1SYasunori Goto #else 61585b35feaSZhang Yanfei static struct page *__kmalloc_section_memmap(void) 6160b0acbecSDave Hansen { 6170b0acbecSDave Hansen struct page *page, *ret; 61885b35feaSZhang Yanfei unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; 6190b0acbecSDave Hansen 620f2d0aa5bSYasunori Goto page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); 6210b0acbecSDave Hansen if (page) 6220b0acbecSDave Hansen goto got_map_page; 6230b0acbecSDave Hansen 6240b0acbecSDave Hansen ret = vmalloc(memmap_size); 6250b0acbecSDave Hansen if (ret) 6260b0acbecSDave Hansen goto got_map_ptr; 6270b0acbecSDave Hansen 6280b0acbecSDave Hansen return NULL; 6290b0acbecSDave Hansen got_map_page: 6300b0acbecSDave Hansen ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); 6310b0acbecSDave Hansen got_map_ptr: 6320b0acbecSDave Hansen 6330b0acbecSDave Hansen return ret; 6340b0acbecSDave Hansen } 6350b0acbecSDave Hansen 6367b73d978SChristoph Hellwig static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, 6377b73d978SChristoph Hellwig struct vmem_altmap *altmap) 63898f3cfc1SYasunori Goto { 63985b35feaSZhang Yanfei return __kmalloc_section_memmap(); 64098f3cfc1SYasunori Goto } 64198f3cfc1SYasunori Goto 64224b6d416SChristoph Hellwig static void __kfree_section_memmap(struct page *memmap, 64324b6d416SChristoph Hellwig struct vmem_altmap *altmap) 6440b0acbecSDave Hansen { 6459e2779faSChristoph Lameter if (is_vmalloc_addr(memmap)) 6460b0acbecSDave Hansen vfree(memmap); 6470b0acbecSDave Hansen else 6480b0acbecSDave Hansen free_pages((unsigned long)memmap, 64985b35feaSZhang Yanfei get_order(sizeof(struct page) * PAGES_PER_SECTION)); 6500b0acbecSDave Hansen } 6510c0a4a51SYasunori Goto 65281556b02SZhang Yanfei static void free_map_bootmem(struct page *memmap) 6530c0a4a51SYasunori Goto { 6540c0a4a51SYasunori Goto unsigned long maps_section_nr, removing_section_nr, i; 65581556b02SZhang Yanfei unsigned long magic, nr_pages; 656ae64ffcaSJianguo Wu struct page *page = virt_to_page(memmap); 6570c0a4a51SYasunori Goto 65881556b02SZhang Yanfei nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) 65981556b02SZhang Yanfei >> PAGE_SHIFT; 66081556b02SZhang Yanfei 6610c0a4a51SYasunori Goto for (i = 0; i < nr_pages; i++, page++) { 662ddffe98dSYasuaki Ishimatsu magic = (unsigned long) page->freelist; 6630c0a4a51SYasunori Goto 6640c0a4a51SYasunori Goto BUG_ON(magic == NODE_INFO); 6650c0a4a51SYasunori Goto 6660c0a4a51SYasunori Goto maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); 667857e522aSYasuaki Ishimatsu removing_section_nr = page_private(page); 6680c0a4a51SYasunori Goto 6690c0a4a51SYasunori Goto /* 6700c0a4a51SYasunori Goto * When this function is called, the removing section is 6710c0a4a51SYasunori Goto * logical offlined state. This means all pages are isolated 6720c0a4a51SYasunori Goto * from page allocator. If removing section's memmap is placed 6730c0a4a51SYasunori Goto * on the same section, it must not be freed. 6740c0a4a51SYasunori Goto * If it is freed, page allocator may allocate it which will 6750c0a4a51SYasunori Goto * be removed physically soon. 6760c0a4a51SYasunori Goto */ 6770c0a4a51SYasunori Goto if (maps_section_nr != removing_section_nr) 6780c0a4a51SYasunori Goto put_page_bootmem(page); 6790c0a4a51SYasunori Goto } 6800c0a4a51SYasunori Goto } 68198f3cfc1SYasunori Goto #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 6820b0acbecSDave Hansen 6837567cfc5SBaoquan He /** 6847567cfc5SBaoquan He * sparse_add_one_section - add a memory section 6857567cfc5SBaoquan He * @nid: The node to add section on 6867567cfc5SBaoquan He * @start_pfn: start pfn of the memory range 6877567cfc5SBaoquan He * @altmap: device page map 6887567cfc5SBaoquan He * 6897567cfc5SBaoquan He * This is only intended for hotplug. 6907567cfc5SBaoquan He * 6917567cfc5SBaoquan He * Return: 6927567cfc5SBaoquan He * * 0 - On success. 6937567cfc5SBaoquan He * * -EEXIST - Section has been present. 6947567cfc5SBaoquan He * * -ENOMEM - Out of memory. 695d41dee36SAndy Whitcroft */ 6964e0d2e7eSWei Yang int __meminit sparse_add_one_section(int nid, unsigned long start_pfn, 6974e0d2e7eSWei Yang struct vmem_altmap *altmap) 69829751f69SAndy Whitcroft { 6990b0acbecSDave Hansen unsigned long section_nr = pfn_to_section_nr(start_pfn); 7000b0acbecSDave Hansen struct mem_section *ms; 7010b0acbecSDave Hansen struct page *memmap; 7025c0e3066SMel Gorman unsigned long *usemap; 7030b0acbecSDave Hansen int ret; 70429751f69SAndy Whitcroft 7050b0acbecSDave Hansen /* 7060b0acbecSDave Hansen * no locking for this, because it does its own 7070b0acbecSDave Hansen * plus, it does a kmalloc 7080b0acbecSDave Hansen */ 7094e0d2e7eSWei Yang ret = sparse_index_init(section_nr, nid); 710bbd06825SWANG Cong if (ret < 0 && ret != -EEXIST) 711bbd06825SWANG Cong return ret; 7124e40987fSOscar Salvador ret = 0; 7134e0d2e7eSWei Yang memmap = kmalloc_section_memmap(section_nr, nid, altmap); 714bbd06825SWANG Cong if (!memmap) 715bbd06825SWANG Cong return -ENOMEM; 7165c0e3066SMel Gorman usemap = __kmalloc_section_usemap(); 717bbd06825SWANG Cong if (!usemap) { 71824b6d416SChristoph Hellwig __kfree_section_memmap(memmap, altmap); 719bbd06825SWANG Cong return -ENOMEM; 720bbd06825SWANG Cong } 72129751f69SAndy Whitcroft 7220b0acbecSDave Hansen ms = __pfn_to_section(start_pfn); 7230b0acbecSDave Hansen if (ms->section_mem_map & SECTION_MARKED_PRESENT) { 7240b0acbecSDave Hansen ret = -EEXIST; 7250b0acbecSDave Hansen goto out; 7260b0acbecSDave Hansen } 7275c0e3066SMel Gorman 728d0dc12e8SPavel Tatashin /* 729d0dc12e8SPavel Tatashin * Poison uninitialized struct pages in order to catch invalid flags 730d0dc12e8SPavel Tatashin * combinations. 731d0dc12e8SPavel Tatashin */ 732f682a97aSAlexander Duyck page_init_poison(memmap, sizeof(struct page) * PAGES_PER_SECTION); 7333ac19f8eSWen Congyang 734c4e1be9eSDave Hansen section_mark_present(ms); 7354e40987fSOscar Salvador sparse_init_one_section(ms, section_nr, memmap, usemap); 7360b0acbecSDave Hansen 7370b0acbecSDave Hansen out: 7384e40987fSOscar Salvador if (ret < 0) { 739bbd06825SWANG Cong kfree(usemap); 74024b6d416SChristoph Hellwig __kfree_section_memmap(memmap, altmap); 741bbd06825SWANG Cong } 7420b0acbecSDave Hansen return ret; 743d41dee36SAndy Whitcroft } 744ea01ea93SBadari Pulavarty 74595a4774dSWen Congyang #ifdef CONFIG_MEMORY_FAILURE 74695a4774dSWen Congyang static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) 74795a4774dSWen Congyang { 74895a4774dSWen Congyang int i; 74995a4774dSWen Congyang 75095a4774dSWen Congyang if (!memmap) 75195a4774dSWen Congyang return; 75295a4774dSWen Congyang 7535eb570a8SBalbir Singh /* 7545eb570a8SBalbir Singh * A further optimization is to have per section refcounted 7555eb570a8SBalbir Singh * num_poisoned_pages. But that would need more space per memmap, so 7565eb570a8SBalbir Singh * for now just do a quick global check to speed up this routine in the 7575eb570a8SBalbir Singh * absence of bad pages. 7585eb570a8SBalbir Singh */ 7595eb570a8SBalbir Singh if (atomic_long_read(&num_poisoned_pages) == 0) 7605eb570a8SBalbir Singh return; 7615eb570a8SBalbir Singh 7624b94ffdcSDan Williams for (i = 0; i < nr_pages; i++) { 76395a4774dSWen Congyang if (PageHWPoison(&memmap[i])) { 764293c07e3SXishi Qiu atomic_long_sub(1, &num_poisoned_pages); 76595a4774dSWen Congyang ClearPageHWPoison(&memmap[i]); 76695a4774dSWen Congyang } 76795a4774dSWen Congyang } 76895a4774dSWen Congyang } 76995a4774dSWen Congyang #else 77095a4774dSWen Congyang static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) 77195a4774dSWen Congyang { 77295a4774dSWen Congyang } 77395a4774dSWen Congyang #endif 77495a4774dSWen Congyang 77524b6d416SChristoph Hellwig static void free_section_usemap(struct page *memmap, unsigned long *usemap, 77624b6d416SChristoph Hellwig struct vmem_altmap *altmap) 7774edd7cefSDavid Rientjes { 7784edd7cefSDavid Rientjes struct page *usemap_page; 7794edd7cefSDavid Rientjes 7804edd7cefSDavid Rientjes if (!usemap) 7814edd7cefSDavid Rientjes return; 7824edd7cefSDavid Rientjes 7834edd7cefSDavid Rientjes usemap_page = virt_to_page(usemap); 7844edd7cefSDavid Rientjes /* 7854edd7cefSDavid Rientjes * Check to see if allocation came from hot-plug-add 7864edd7cefSDavid Rientjes */ 7874edd7cefSDavid Rientjes if (PageSlab(usemap_page) || PageCompound(usemap_page)) { 7884edd7cefSDavid Rientjes kfree(usemap); 7894edd7cefSDavid Rientjes if (memmap) 79024b6d416SChristoph Hellwig __kfree_section_memmap(memmap, altmap); 7914edd7cefSDavid Rientjes return; 7924edd7cefSDavid Rientjes } 7934edd7cefSDavid Rientjes 7944edd7cefSDavid Rientjes /* 7954edd7cefSDavid Rientjes * The usemap came from bootmem. This is packed with other usemaps 7964edd7cefSDavid Rientjes * on the section which has pgdat at boot time. Just keep it as is now. 7974edd7cefSDavid Rientjes */ 7984edd7cefSDavid Rientjes 79981556b02SZhang Yanfei if (memmap) 80081556b02SZhang Yanfei free_map_bootmem(memmap); 8014edd7cefSDavid Rientjes } 8024edd7cefSDavid Rientjes 803*b9bf8d34SDavid Hildenbrand void sparse_remove_one_section(struct mem_section *ms, unsigned long map_offset, 804*b9bf8d34SDavid Hildenbrand struct vmem_altmap *altmap) 805ea01ea93SBadari Pulavarty { 806ea01ea93SBadari Pulavarty struct page *memmap = NULL; 80783af6588SWei Yang unsigned long *usemap = NULL; 808ea01ea93SBadari Pulavarty 809ea01ea93SBadari Pulavarty if (ms->section_mem_map) { 810ea01ea93SBadari Pulavarty usemap = ms->pageblock_flags; 811ea01ea93SBadari Pulavarty memmap = sparse_decode_mem_map(ms->section_mem_map, 812ea01ea93SBadari Pulavarty __section_nr(ms)); 813ea01ea93SBadari Pulavarty ms->section_mem_map = 0; 814ea01ea93SBadari Pulavarty ms->pageblock_flags = NULL; 815ea01ea93SBadari Pulavarty } 816ea01ea93SBadari Pulavarty 8174b94ffdcSDan Williams clear_hwpoisoned_pages(memmap + map_offset, 8184b94ffdcSDan Williams PAGES_PER_SECTION - map_offset); 81924b6d416SChristoph Hellwig free_section_usemap(memmap, usemap, altmap); 820ea01ea93SBadari Pulavarty } 8214edd7cefSDavid Rientjes #endif /* CONFIG_MEMORY_HOTPLUG */ 822