1d41dee36SAndy Whitcroft /* 2d41dee36SAndy Whitcroft * sparse memory mappings. 3d41dee36SAndy Whitcroft */ 4d41dee36SAndy Whitcroft #include <linux/mm.h> 55a0e3ad6STejun Heo #include <linux/slab.h> 6d41dee36SAndy Whitcroft #include <linux/mmzone.h> 7d41dee36SAndy Whitcroft #include <linux/bootmem.h> 83b32123dSGideon Israel Dsouza #include <linux/compiler.h> 90b0acbecSDave Hansen #include <linux/highmem.h> 10b95f1b31SPaul Gortmaker #include <linux/export.h> 1128ae55c9SDave Hansen #include <linux/spinlock.h> 120b0acbecSDave Hansen #include <linux/vmalloc.h> 133b32123dSGideon Israel Dsouza 140c0a4a51SYasunori Goto #include "internal.h" 15d41dee36SAndy Whitcroft #include <asm/dma.h> 168f6aac41SChristoph Lameter #include <asm/pgalloc.h> 178f6aac41SChristoph Lameter #include <asm/pgtable.h> 18d41dee36SAndy Whitcroft 19d41dee36SAndy Whitcroft /* 20d41dee36SAndy Whitcroft * Permanent SPARSEMEM data: 21d41dee36SAndy Whitcroft * 22d41dee36SAndy Whitcroft * 1) mem_section - memory sections, mem_map's for valid memory 23d41dee36SAndy Whitcroft */ 243e347261SBob Picco #ifdef CONFIG_SPARSEMEM_EXTREME 25802f192eSBob Picco struct mem_section *mem_section[NR_SECTION_ROOTS] 2622fc6eccSRavikiran G Thirumalai ____cacheline_internodealigned_in_smp; 273e347261SBob Picco #else 283e347261SBob Picco struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] 2922fc6eccSRavikiran G Thirumalai ____cacheline_internodealigned_in_smp; 303e347261SBob Picco #endif 313e347261SBob Picco EXPORT_SYMBOL(mem_section); 323e347261SBob Picco 3389689ae7SChristoph Lameter #ifdef NODE_NOT_IN_PAGE_FLAGS 3489689ae7SChristoph Lameter /* 3589689ae7SChristoph Lameter * If we did not store the node number in the page then we have to 3689689ae7SChristoph Lameter * do a lookup in the section_to_node_table in order to find which 3789689ae7SChristoph Lameter * node the page belongs to. 3889689ae7SChristoph Lameter */ 3989689ae7SChristoph Lameter #if MAX_NUMNODES <= 256 4089689ae7SChristoph Lameter static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; 4189689ae7SChristoph Lameter #else 4289689ae7SChristoph Lameter static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; 4389689ae7SChristoph Lameter #endif 4489689ae7SChristoph Lameter 4533dd4e0eSIan Campbell int page_to_nid(const struct page *page) 4689689ae7SChristoph Lameter { 4789689ae7SChristoph Lameter return section_to_node_table[page_to_section(page)]; 4889689ae7SChristoph Lameter } 4989689ae7SChristoph Lameter EXPORT_SYMBOL(page_to_nid); 5085770ffeSAndy Whitcroft 5185770ffeSAndy Whitcroft static void set_section_nid(unsigned long section_nr, int nid) 5285770ffeSAndy Whitcroft { 5385770ffeSAndy Whitcroft section_to_node_table[section_nr] = nid; 5485770ffeSAndy Whitcroft } 5585770ffeSAndy Whitcroft #else /* !NODE_NOT_IN_PAGE_FLAGS */ 5685770ffeSAndy Whitcroft static inline void set_section_nid(unsigned long section_nr, int nid) 5785770ffeSAndy Whitcroft { 5885770ffeSAndy Whitcroft } 5989689ae7SChristoph Lameter #endif 6089689ae7SChristoph Lameter 613e347261SBob Picco #ifdef CONFIG_SPARSEMEM_EXTREME 62577a32f6SSam Ravnborg static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) 63802f192eSBob Picco { 6428ae55c9SDave Hansen struct mem_section *section = NULL; 6528ae55c9SDave Hansen unsigned long array_size = SECTIONS_PER_ROOT * 6628ae55c9SDave Hansen sizeof(struct mem_section); 67802f192eSBob Picco 68f52407ceSShaohua Li if (slab_is_available()) { 69f52407ceSShaohua Li if (node_state(nid, N_HIGH_MEMORY)) 705b760e64SGavin Shan section = kzalloc_node(array_size, GFP_KERNEL, nid); 7146a66eecSMike Kravetz else 725b760e64SGavin Shan section = kzalloc(array_size, GFP_KERNEL); 735b760e64SGavin Shan } else { 74bb016b84SSantosh Shilimkar section = memblock_virt_alloc_node(array_size, nid); 755b760e64SGavin Shan } 763e347261SBob Picco 7728ae55c9SDave Hansen return section; 78802f192eSBob Picco } 7928ae55c9SDave Hansen 80a3142c8eSYasunori Goto static int __meminit sparse_index_init(unsigned long section_nr, int nid) 8128ae55c9SDave Hansen { 8228ae55c9SDave Hansen unsigned long root = SECTION_NR_TO_ROOT(section_nr); 8328ae55c9SDave Hansen struct mem_section *section; 8428ae55c9SDave Hansen 8528ae55c9SDave Hansen if (mem_section[root]) 8628ae55c9SDave Hansen return -EEXIST; 8728ae55c9SDave Hansen 8828ae55c9SDave Hansen section = sparse_index_alloc(nid); 89af0cd5a7SWANG Cong if (!section) 90af0cd5a7SWANG Cong return -ENOMEM; 9128ae55c9SDave Hansen 9228ae55c9SDave Hansen mem_section[root] = section; 93c1c95183SGavin Shan 949d1936cfSZhang Yanfei return 0; 9528ae55c9SDave Hansen } 9628ae55c9SDave Hansen #else /* !SPARSEMEM_EXTREME */ 9728ae55c9SDave Hansen static inline int sparse_index_init(unsigned long section_nr, int nid) 9828ae55c9SDave Hansen { 9928ae55c9SDave Hansen return 0; 10028ae55c9SDave Hansen } 10128ae55c9SDave Hansen #endif 10228ae55c9SDave Hansen 1034ca644d9SDave Hansen /* 1044ca644d9SDave Hansen * Although written for the SPARSEMEM_EXTREME case, this happens 105cd881a6bSAndy Whitcroft * to also work for the flat array case because 1064ca644d9SDave Hansen * NR_SECTION_ROOTS==NR_MEM_SECTIONS. 1074ca644d9SDave Hansen */ 1084ca644d9SDave Hansen int __section_nr(struct mem_section* ms) 1094ca644d9SDave Hansen { 1104ca644d9SDave Hansen unsigned long root_nr; 1114ca644d9SDave Hansen struct mem_section* root; 1124ca644d9SDave Hansen 11312783b00SMike Kravetz for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { 11412783b00SMike Kravetz root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); 1154ca644d9SDave Hansen if (!root) 1164ca644d9SDave Hansen continue; 1174ca644d9SDave Hansen 1184ca644d9SDave Hansen if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) 1194ca644d9SDave Hansen break; 1204ca644d9SDave Hansen } 1214ca644d9SDave Hansen 122db36a461SGavin Shan VM_BUG_ON(root_nr == NR_SECTION_ROOTS); 123db36a461SGavin Shan 1244ca644d9SDave Hansen return (root_nr * SECTIONS_PER_ROOT) + (ms - root); 1254ca644d9SDave Hansen } 1264ca644d9SDave Hansen 12730c253e6SAndy Whitcroft /* 12830c253e6SAndy Whitcroft * During early boot, before section_mem_map is used for an actual 12930c253e6SAndy Whitcroft * mem_map, we use section_mem_map to store the section's NUMA 13030c253e6SAndy Whitcroft * node. This keeps us from having to use another data structure. The 13130c253e6SAndy Whitcroft * node information is cleared just before we store the real mem_map. 13230c253e6SAndy Whitcroft */ 13330c253e6SAndy Whitcroft static inline unsigned long sparse_encode_early_nid(int nid) 13430c253e6SAndy Whitcroft { 13530c253e6SAndy Whitcroft return (nid << SECTION_NID_SHIFT); 13630c253e6SAndy Whitcroft } 13730c253e6SAndy Whitcroft 13830c253e6SAndy Whitcroft static inline int sparse_early_nid(struct mem_section *section) 13930c253e6SAndy Whitcroft { 14030c253e6SAndy Whitcroft return (section->section_mem_map >> SECTION_NID_SHIFT); 14130c253e6SAndy Whitcroft } 14230c253e6SAndy Whitcroft 1432dbb51c4SMel Gorman /* Validate the physical addressing limitations of the model */ 1442dbb51c4SMel Gorman void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, 1452dbb51c4SMel Gorman unsigned long *end_pfn) 146d41dee36SAndy Whitcroft { 1472dbb51c4SMel Gorman unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); 148d41dee36SAndy Whitcroft 149bead9a3aSIngo Molnar /* 150bead9a3aSIngo Molnar * Sanity checks - do not allow an architecture to pass 151bead9a3aSIngo Molnar * in larger pfns than the maximum scope of sparsemem: 152bead9a3aSIngo Molnar */ 1532dbb51c4SMel Gorman if (*start_pfn > max_sparsemem_pfn) { 1542dbb51c4SMel Gorman mminit_dprintk(MMINIT_WARNING, "pfnvalidation", 1552dbb51c4SMel Gorman "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", 1562dbb51c4SMel Gorman *start_pfn, *end_pfn, max_sparsemem_pfn); 1572dbb51c4SMel Gorman WARN_ON_ONCE(1); 1582dbb51c4SMel Gorman *start_pfn = max_sparsemem_pfn; 1592dbb51c4SMel Gorman *end_pfn = max_sparsemem_pfn; 160ef161a98SCyrill Gorcunov } else if (*end_pfn > max_sparsemem_pfn) { 1612dbb51c4SMel Gorman mminit_dprintk(MMINIT_WARNING, "pfnvalidation", 1622dbb51c4SMel Gorman "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", 1632dbb51c4SMel Gorman *start_pfn, *end_pfn, max_sparsemem_pfn); 1642dbb51c4SMel Gorman WARN_ON_ONCE(1); 1652dbb51c4SMel Gorman *end_pfn = max_sparsemem_pfn; 1662dbb51c4SMel Gorman } 1672dbb51c4SMel Gorman } 1682dbb51c4SMel Gorman 1692dbb51c4SMel Gorman /* Record a memory area against a node. */ 1702dbb51c4SMel Gorman void __init memory_present(int nid, unsigned long start, unsigned long end) 1712dbb51c4SMel Gorman { 1722dbb51c4SMel Gorman unsigned long pfn; 173bead9a3aSIngo Molnar 174d41dee36SAndy Whitcroft start &= PAGE_SECTION_MASK; 1752dbb51c4SMel Gorman mminit_validate_memmodel_limits(&start, &end); 176d41dee36SAndy Whitcroft for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { 177d41dee36SAndy Whitcroft unsigned long section = pfn_to_section_nr(pfn); 178802f192eSBob Picco struct mem_section *ms; 179802f192eSBob Picco 180802f192eSBob Picco sparse_index_init(section, nid); 18185770ffeSAndy Whitcroft set_section_nid(section, nid); 182802f192eSBob Picco 183802f192eSBob Picco ms = __nr_to_section(section); 184802f192eSBob Picco if (!ms->section_mem_map) 18530c253e6SAndy Whitcroft ms->section_mem_map = sparse_encode_early_nid(nid) | 18630c253e6SAndy Whitcroft SECTION_MARKED_PRESENT; 187d41dee36SAndy Whitcroft } 188d41dee36SAndy Whitcroft } 189d41dee36SAndy Whitcroft 190d41dee36SAndy Whitcroft /* 191d41dee36SAndy Whitcroft * Only used by the i386 NUMA architecures, but relatively 192d41dee36SAndy Whitcroft * generic code. 193d41dee36SAndy Whitcroft */ 194d41dee36SAndy Whitcroft unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, 195d41dee36SAndy Whitcroft unsigned long end_pfn) 196d41dee36SAndy Whitcroft { 197d41dee36SAndy Whitcroft unsigned long pfn; 198d41dee36SAndy Whitcroft unsigned long nr_pages = 0; 199d41dee36SAndy Whitcroft 2002dbb51c4SMel Gorman mminit_validate_memmodel_limits(&start_pfn, &end_pfn); 201d41dee36SAndy Whitcroft for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 202d41dee36SAndy Whitcroft if (nid != early_pfn_to_nid(pfn)) 203d41dee36SAndy Whitcroft continue; 204d41dee36SAndy Whitcroft 205540557b9SAndy Whitcroft if (pfn_present(pfn)) 206d41dee36SAndy Whitcroft nr_pages += PAGES_PER_SECTION; 207d41dee36SAndy Whitcroft } 208d41dee36SAndy Whitcroft 209d41dee36SAndy Whitcroft return nr_pages * sizeof(struct page); 210d41dee36SAndy Whitcroft } 211d41dee36SAndy Whitcroft 212d41dee36SAndy Whitcroft /* 21329751f69SAndy Whitcroft * Subtle, we encode the real pfn into the mem_map such that 21429751f69SAndy Whitcroft * the identity pfn - section_mem_map will return the actual 21529751f69SAndy Whitcroft * physical page frame number. 21629751f69SAndy Whitcroft */ 21729751f69SAndy Whitcroft static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) 21829751f69SAndy Whitcroft { 21929751f69SAndy Whitcroft return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); 22029751f69SAndy Whitcroft } 22129751f69SAndy Whitcroft 22229751f69SAndy Whitcroft /* 223ea01ea93SBadari Pulavarty * Decode mem_map from the coded memmap 22429751f69SAndy Whitcroft */ 22529751f69SAndy Whitcroft struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) 22629751f69SAndy Whitcroft { 227ea01ea93SBadari Pulavarty /* mask off the extra low bits of information */ 228ea01ea93SBadari Pulavarty coded_mem_map &= SECTION_MAP_MASK; 22929751f69SAndy Whitcroft return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); 23029751f69SAndy Whitcroft } 23129751f69SAndy Whitcroft 232a3142c8eSYasunori Goto static int __meminit sparse_init_one_section(struct mem_section *ms, 2335c0e3066SMel Gorman unsigned long pnum, struct page *mem_map, 2345c0e3066SMel Gorman unsigned long *pageblock_bitmap) 23529751f69SAndy Whitcroft { 236540557b9SAndy Whitcroft if (!present_section(ms)) 23729751f69SAndy Whitcroft return -EINVAL; 23829751f69SAndy Whitcroft 23930c253e6SAndy Whitcroft ms->section_mem_map &= ~SECTION_MAP_MASK; 240540557b9SAndy Whitcroft ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | 241540557b9SAndy Whitcroft SECTION_HAS_MEM_MAP; 2425c0e3066SMel Gorman ms->pageblock_flags = pageblock_bitmap; 24329751f69SAndy Whitcroft 24429751f69SAndy Whitcroft return 1; 24529751f69SAndy Whitcroft } 24629751f69SAndy Whitcroft 24704753278SYasunori Goto unsigned long usemap_size(void) 2485c0e3066SMel Gorman { 2495c0e3066SMel Gorman unsigned long size_bytes; 2505c0e3066SMel Gorman size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8; 2515c0e3066SMel Gorman size_bytes = roundup(size_bytes, sizeof(unsigned long)); 2525c0e3066SMel Gorman return size_bytes; 2535c0e3066SMel Gorman } 2545c0e3066SMel Gorman 2555c0e3066SMel Gorman #ifdef CONFIG_MEMORY_HOTPLUG 2565c0e3066SMel Gorman static unsigned long *__kmalloc_section_usemap(void) 2575c0e3066SMel Gorman { 2585c0e3066SMel Gorman return kmalloc(usemap_size(), GFP_KERNEL); 2595c0e3066SMel Gorman } 2605c0e3066SMel Gorman #endif /* CONFIG_MEMORY_HOTPLUG */ 2615c0e3066SMel Gorman 26248c90682SYasunori Goto #ifdef CONFIG_MEMORY_HOTREMOVE 26348c90682SYasunori Goto static unsigned long * __init 264a4322e1bSYinghai Lu sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 265238305bbSJohannes Weiner unsigned long size) 26648c90682SYasunori Goto { 26799ab7b19SYinghai Lu unsigned long goal, limit; 26899ab7b19SYinghai Lu unsigned long *p; 26999ab7b19SYinghai Lu int nid; 27048c90682SYasunori Goto /* 27148c90682SYasunori Goto * A page may contain usemaps for other sections preventing the 27248c90682SYasunori Goto * page being freed and making a section unremovable while 273c800bcd5SLi Zhong * other sections referencing the usemap remain active. Similarly, 27448c90682SYasunori Goto * a pgdat can prevent a section being removed. If section A 27548c90682SYasunori Goto * contains a pgdat and section B contains the usemap, both 27648c90682SYasunori Goto * sections become inter-dependent. This allocates usemaps 27748c90682SYasunori Goto * from the same section as the pgdat where possible to avoid 27848c90682SYasunori Goto * this problem. 27948c90682SYasunori Goto */ 28007b4e2bcSYinghai Lu goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); 28199ab7b19SYinghai Lu limit = goal + (1UL << PA_SECTION_SHIFT); 28299ab7b19SYinghai Lu nid = early_pfn_to_nid(goal >> PAGE_SHIFT); 28399ab7b19SYinghai Lu again: 284bb016b84SSantosh Shilimkar p = memblock_virt_alloc_try_nid_nopanic(size, 285bb016b84SSantosh Shilimkar SMP_CACHE_BYTES, goal, limit, 286bb016b84SSantosh Shilimkar nid); 28799ab7b19SYinghai Lu if (!p && limit) { 28899ab7b19SYinghai Lu limit = 0; 28999ab7b19SYinghai Lu goto again; 29099ab7b19SYinghai Lu } 29199ab7b19SYinghai Lu return p; 29248c90682SYasunori Goto } 29348c90682SYasunori Goto 29448c90682SYasunori Goto static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 29548c90682SYasunori Goto { 29648c90682SYasunori Goto unsigned long usemap_snr, pgdat_snr; 29748c90682SYasunori Goto static unsigned long old_usemap_snr = NR_MEM_SECTIONS; 29848c90682SYasunori Goto static unsigned long old_pgdat_snr = NR_MEM_SECTIONS; 29948c90682SYasunori Goto struct pglist_data *pgdat = NODE_DATA(nid); 30048c90682SYasunori Goto int usemap_nid; 30148c90682SYasunori Goto 30248c90682SYasunori Goto usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); 30348c90682SYasunori Goto pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); 30448c90682SYasunori Goto if (usemap_snr == pgdat_snr) 30548c90682SYasunori Goto return; 30648c90682SYasunori Goto 30748c90682SYasunori Goto if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) 30848c90682SYasunori Goto /* skip redundant message */ 30948c90682SYasunori Goto return; 31048c90682SYasunori Goto 31148c90682SYasunori Goto old_usemap_snr = usemap_snr; 31248c90682SYasunori Goto old_pgdat_snr = pgdat_snr; 31348c90682SYasunori Goto 31448c90682SYasunori Goto usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); 31548c90682SYasunori Goto if (usemap_nid != nid) { 316*1170532bSJoe Perches pr_info("node %d must be removed before remove section %ld\n", 31748c90682SYasunori Goto nid, usemap_snr); 31848c90682SYasunori Goto return; 31948c90682SYasunori Goto } 32048c90682SYasunori Goto /* 32148c90682SYasunori Goto * There is a circular dependency. 32248c90682SYasunori Goto * Some platforms allow un-removable section because they will just 32348c90682SYasunori Goto * gather other removable sections for dynamic partitioning. 32448c90682SYasunori Goto * Just notify un-removable section's number here. 32548c90682SYasunori Goto */ 326*1170532bSJoe Perches pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", 327*1170532bSJoe Perches usemap_snr, pgdat_snr, nid); 32848c90682SYasunori Goto } 32948c90682SYasunori Goto #else 33048c90682SYasunori Goto static unsigned long * __init 331a4322e1bSYinghai Lu sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 332238305bbSJohannes Weiner unsigned long size) 33348c90682SYasunori Goto { 334bb016b84SSantosh Shilimkar return memblock_virt_alloc_node_nopanic(size, pgdat->node_id); 33548c90682SYasunori Goto } 33648c90682SYasunori Goto 33748c90682SYasunori Goto static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 33848c90682SYasunori Goto { 33948c90682SYasunori Goto } 34048c90682SYasunori Goto #endif /* CONFIG_MEMORY_HOTREMOVE */ 34148c90682SYasunori Goto 34218732093SWanpeng Li static void __init sparse_early_usemaps_alloc_node(void *data, 343a4322e1bSYinghai Lu unsigned long pnum_begin, 344a4322e1bSYinghai Lu unsigned long pnum_end, 345a4322e1bSYinghai Lu unsigned long usemap_count, int nodeid) 3465c0e3066SMel Gorman { 347a4322e1bSYinghai Lu void *usemap; 348a4322e1bSYinghai Lu unsigned long pnum; 34918732093SWanpeng Li unsigned long **usemap_map = (unsigned long **)data; 350a4322e1bSYinghai Lu int size = usemap_size(); 3515c0e3066SMel Gorman 352a4322e1bSYinghai Lu usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), 353238305bbSJohannes Weiner size * usemap_count); 354f5bf18faSNishanth Aravamudan if (!usemap) { 355*1170532bSJoe Perches pr_warn("%s: allocation failed\n", __func__); 356a4322e1bSYinghai Lu return; 35748c90682SYasunori Goto } 35848c90682SYasunori Goto 359a4322e1bSYinghai Lu for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 360a4322e1bSYinghai Lu if (!present_section_nr(pnum)) 361a4322e1bSYinghai Lu continue; 362a4322e1bSYinghai Lu usemap_map[pnum] = usemap; 363a4322e1bSYinghai Lu usemap += size; 364a4322e1bSYinghai Lu check_usemap_section_nr(nodeid, usemap_map[pnum]); 365a4322e1bSYinghai Lu } 3665c0e3066SMel Gorman } 3675c0e3066SMel Gorman 3688f6aac41SChristoph Lameter #ifndef CONFIG_SPARSEMEM_VMEMMAP 36998f3cfc1SYasunori Goto struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) 37029751f69SAndy Whitcroft { 37129751f69SAndy Whitcroft struct page *map; 372e48e67e0SYinghai Lu unsigned long size; 37329751f69SAndy Whitcroft 37429751f69SAndy Whitcroft map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); 37529751f69SAndy Whitcroft if (map) 37629751f69SAndy Whitcroft return map; 37729751f69SAndy Whitcroft 378e48e67e0SYinghai Lu size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); 379bb016b84SSantosh Shilimkar map = memblock_virt_alloc_try_nid(size, 380bb016b84SSantosh Shilimkar PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 381bb016b84SSantosh Shilimkar BOOTMEM_ALLOC_ACCESSIBLE, nid); 3828f6aac41SChristoph Lameter return map; 3838f6aac41SChristoph Lameter } 3849bdac914SYinghai Lu void __init sparse_mem_maps_populate_node(struct page **map_map, 3859bdac914SYinghai Lu unsigned long pnum_begin, 3869bdac914SYinghai Lu unsigned long pnum_end, 3879bdac914SYinghai Lu unsigned long map_count, int nodeid) 3889bdac914SYinghai Lu { 3899bdac914SYinghai Lu void *map; 3909bdac914SYinghai Lu unsigned long pnum; 3919bdac914SYinghai Lu unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; 3929bdac914SYinghai Lu 3939bdac914SYinghai Lu map = alloc_remap(nodeid, size * map_count); 3949bdac914SYinghai Lu if (map) { 3959bdac914SYinghai Lu for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 3969bdac914SYinghai Lu if (!present_section_nr(pnum)) 3979bdac914SYinghai Lu continue; 3989bdac914SYinghai Lu map_map[pnum] = map; 3999bdac914SYinghai Lu map += size; 4009bdac914SYinghai Lu } 4019bdac914SYinghai Lu return; 4029bdac914SYinghai Lu } 4039bdac914SYinghai Lu 4049bdac914SYinghai Lu size = PAGE_ALIGN(size); 405bb016b84SSantosh Shilimkar map = memblock_virt_alloc_try_nid(size * map_count, 406bb016b84SSantosh Shilimkar PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 407bb016b84SSantosh Shilimkar BOOTMEM_ALLOC_ACCESSIBLE, nodeid); 4089bdac914SYinghai Lu if (map) { 4099bdac914SYinghai Lu for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 4109bdac914SYinghai Lu if (!present_section_nr(pnum)) 4119bdac914SYinghai Lu continue; 4129bdac914SYinghai Lu map_map[pnum] = map; 4139bdac914SYinghai Lu map += size; 4149bdac914SYinghai Lu } 4159bdac914SYinghai Lu return; 4169bdac914SYinghai Lu } 4179bdac914SYinghai Lu 4189bdac914SYinghai Lu /* fallback */ 4199bdac914SYinghai Lu for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 4209bdac914SYinghai Lu struct mem_section *ms; 4219bdac914SYinghai Lu 4229bdac914SYinghai Lu if (!present_section_nr(pnum)) 4239bdac914SYinghai Lu continue; 4249bdac914SYinghai Lu map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); 4259bdac914SYinghai Lu if (map_map[pnum]) 4269bdac914SYinghai Lu continue; 4279bdac914SYinghai Lu ms = __nr_to_section(pnum); 428*1170532bSJoe Perches pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", 429756a025fSJoe Perches __func__); 4309bdac914SYinghai Lu ms->section_mem_map = 0; 4319bdac914SYinghai Lu } 4329bdac914SYinghai Lu } 4338f6aac41SChristoph Lameter #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 4348f6aac41SChristoph Lameter 43581d0d950SYinghai Lu #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 43618732093SWanpeng Li static void __init sparse_early_mem_maps_alloc_node(void *data, 4379bdac914SYinghai Lu unsigned long pnum_begin, 4389bdac914SYinghai Lu unsigned long pnum_end, 4399bdac914SYinghai Lu unsigned long map_count, int nodeid) 4409bdac914SYinghai Lu { 44118732093SWanpeng Li struct page **map_map = (struct page **)data; 4429bdac914SYinghai Lu sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, 4439bdac914SYinghai Lu map_count, nodeid); 4449bdac914SYinghai Lu } 44581d0d950SYinghai Lu #else 4469e5c6da7SAdrian Bunk static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) 4478f6aac41SChristoph Lameter { 4488f6aac41SChristoph Lameter struct page *map; 4498f6aac41SChristoph Lameter struct mem_section *ms = __nr_to_section(pnum); 4508f6aac41SChristoph Lameter int nid = sparse_early_nid(ms); 4518f6aac41SChristoph Lameter 45298f3cfc1SYasunori Goto map = sparse_mem_map_populate(pnum, nid); 45329751f69SAndy Whitcroft if (map) 45429751f69SAndy Whitcroft return map; 45529751f69SAndy Whitcroft 456*1170532bSJoe Perches pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", 457756a025fSJoe Perches __func__); 458802f192eSBob Picco ms->section_mem_map = 0; 45929751f69SAndy Whitcroft return NULL; 46029751f69SAndy Whitcroft } 4619bdac914SYinghai Lu #endif 46229751f69SAndy Whitcroft 4633b32123dSGideon Israel Dsouza void __weak __meminit vmemmap_populate_print_last(void) 464c2b91e2eSYinghai Lu { 465c2b91e2eSYinghai Lu } 466a4322e1bSYinghai Lu 46718732093SWanpeng Li /** 46818732093SWanpeng Li * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap 46918732093SWanpeng Li * @map: usemap_map for pageblock flags or mmap_map for vmemmap 470193faea9SStephen Rothwell */ 47118732093SWanpeng Li static void __init alloc_usemap_and_memmap(void (*alloc_func) 47218732093SWanpeng Li (void *, unsigned long, unsigned long, 47318732093SWanpeng Li unsigned long, int), void *data) 474193faea9SStephen Rothwell { 475193faea9SStephen Rothwell unsigned long pnum; 47618732093SWanpeng Li unsigned long map_count; 477a4322e1bSYinghai Lu int nodeid_begin = 0; 478a4322e1bSYinghai Lu unsigned long pnum_begin = 0; 4799bdac914SYinghai Lu 4809bdac914SYinghai Lu for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { 4819bdac914SYinghai Lu struct mem_section *ms; 4829bdac914SYinghai Lu 4839bdac914SYinghai Lu if (!present_section_nr(pnum)) 4849bdac914SYinghai Lu continue; 4859bdac914SYinghai Lu ms = __nr_to_section(pnum); 4869bdac914SYinghai Lu nodeid_begin = sparse_early_nid(ms); 4879bdac914SYinghai Lu pnum_begin = pnum; 4889bdac914SYinghai Lu break; 4899bdac914SYinghai Lu } 4909bdac914SYinghai Lu map_count = 1; 4919bdac914SYinghai Lu for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { 4929bdac914SYinghai Lu struct mem_section *ms; 4939bdac914SYinghai Lu int nodeid; 4949bdac914SYinghai Lu 4959bdac914SYinghai Lu if (!present_section_nr(pnum)) 4969bdac914SYinghai Lu continue; 4979bdac914SYinghai Lu ms = __nr_to_section(pnum); 4989bdac914SYinghai Lu nodeid = sparse_early_nid(ms); 4999bdac914SYinghai Lu if (nodeid == nodeid_begin) { 5009bdac914SYinghai Lu map_count++; 5019bdac914SYinghai Lu continue; 5029bdac914SYinghai Lu } 5039bdac914SYinghai Lu /* ok, we need to take cake of from pnum_begin to pnum - 1*/ 50418732093SWanpeng Li alloc_func(data, pnum_begin, pnum, 5059bdac914SYinghai Lu map_count, nodeid_begin); 5069bdac914SYinghai Lu /* new start, update count etc*/ 5079bdac914SYinghai Lu nodeid_begin = nodeid; 5089bdac914SYinghai Lu pnum_begin = pnum; 5099bdac914SYinghai Lu map_count = 1; 5109bdac914SYinghai Lu } 5119bdac914SYinghai Lu /* ok, last chunk */ 51218732093SWanpeng Li alloc_func(data, pnum_begin, NR_MEM_SECTIONS, 5139bdac914SYinghai Lu map_count, nodeid_begin); 51418732093SWanpeng Li } 51518732093SWanpeng Li 51618732093SWanpeng Li /* 51718732093SWanpeng Li * Allocate the accumulated non-linear sections, allocate a mem_map 51818732093SWanpeng Li * for each and record the physical to section mapping. 51918732093SWanpeng Li */ 52018732093SWanpeng Li void __init sparse_init(void) 52118732093SWanpeng Li { 52218732093SWanpeng Li unsigned long pnum; 52318732093SWanpeng Li struct page *map; 52418732093SWanpeng Li unsigned long *usemap; 52518732093SWanpeng Li unsigned long **usemap_map; 52618732093SWanpeng Li int size; 52718732093SWanpeng Li #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 52818732093SWanpeng Li int size2; 52918732093SWanpeng Li struct page **map_map; 53018732093SWanpeng Li #endif 53118732093SWanpeng Li 53218732093SWanpeng Li /* see include/linux/mmzone.h 'struct mem_section' definition */ 53318732093SWanpeng Li BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section))); 53418732093SWanpeng Li 53518732093SWanpeng Li /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ 53618732093SWanpeng Li set_pageblock_order(); 53718732093SWanpeng Li 53818732093SWanpeng Li /* 53918732093SWanpeng Li * map is using big page (aka 2M in x86 64 bit) 54018732093SWanpeng Li * usemap is less one page (aka 24 bytes) 54118732093SWanpeng Li * so alloc 2M (with 2M align) and 24 bytes in turn will 54218732093SWanpeng Li * make next 2M slip to one more 2M later. 54318732093SWanpeng Li * then in big system, the memory will have a lot of holes... 54418732093SWanpeng Li * here try to allocate 2M pages continuously. 54518732093SWanpeng Li * 54618732093SWanpeng Li * powerpc need to call sparse_init_one_section right after each 54718732093SWanpeng Li * sparse_early_mem_map_alloc, so allocate usemap_map at first. 54818732093SWanpeng Li */ 54918732093SWanpeng Li size = sizeof(unsigned long *) * NR_MEM_SECTIONS; 550bb016b84SSantosh Shilimkar usemap_map = memblock_virt_alloc(size, 0); 55118732093SWanpeng Li if (!usemap_map) 55218732093SWanpeng Li panic("can not allocate usemap_map\n"); 55318732093SWanpeng Li alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node, 55418732093SWanpeng Li (void *)usemap_map); 55518732093SWanpeng Li 55618732093SWanpeng Li #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 55718732093SWanpeng Li size2 = sizeof(struct page *) * NR_MEM_SECTIONS; 558bb016b84SSantosh Shilimkar map_map = memblock_virt_alloc(size2, 0); 55918732093SWanpeng Li if (!map_map) 56018732093SWanpeng Li panic("can not allocate map_map\n"); 56118732093SWanpeng Li alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node, 56218732093SWanpeng Li (void *)map_map); 5639bdac914SYinghai Lu #endif 5649bdac914SYinghai Lu 565e123dd3fSYinghai Lu for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { 566e123dd3fSYinghai Lu if (!present_section_nr(pnum)) 567e123dd3fSYinghai Lu continue; 568e123dd3fSYinghai Lu 569e123dd3fSYinghai Lu usemap = usemap_map[pnum]; 570e123dd3fSYinghai Lu if (!usemap) 571e123dd3fSYinghai Lu continue; 572193faea9SStephen Rothwell 5739bdac914SYinghai Lu #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 5749bdac914SYinghai Lu map = map_map[pnum]; 5759bdac914SYinghai Lu #else 576193faea9SStephen Rothwell map = sparse_early_mem_map_alloc(pnum); 5779bdac914SYinghai Lu #endif 578193faea9SStephen Rothwell if (!map) 579193faea9SStephen Rothwell continue; 5805c0e3066SMel Gorman 5815c0e3066SMel Gorman sparse_init_one_section(__nr_to_section(pnum), pnum, map, 5825c0e3066SMel Gorman usemap); 583193faea9SStephen Rothwell } 584e123dd3fSYinghai Lu 585c2b91e2eSYinghai Lu vmemmap_populate_print_last(); 586c2b91e2eSYinghai Lu 5879bdac914SYinghai Lu #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 588bb016b84SSantosh Shilimkar memblock_free_early(__pa(map_map), size2); 5899bdac914SYinghai Lu #endif 590bb016b84SSantosh Shilimkar memblock_free_early(__pa(usemap_map), size); 591193faea9SStephen Rothwell } 592193faea9SStephen Rothwell 593193faea9SStephen Rothwell #ifdef CONFIG_MEMORY_HOTPLUG 59498f3cfc1SYasunori Goto #ifdef CONFIG_SPARSEMEM_VMEMMAP 59585b35feaSZhang Yanfei static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) 59698f3cfc1SYasunori Goto { 59798f3cfc1SYasunori Goto /* This will make the necessary allocations eventually. */ 59898f3cfc1SYasunori Goto return sparse_mem_map_populate(pnum, nid); 59998f3cfc1SYasunori Goto } 60085b35feaSZhang Yanfei static void __kfree_section_memmap(struct page *memmap) 60198f3cfc1SYasunori Goto { 6020aad818bSJohannes Weiner unsigned long start = (unsigned long)memmap; 60385b35feaSZhang Yanfei unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); 6040aad818bSJohannes Weiner 6050aad818bSJohannes Weiner vmemmap_free(start, end); 60698f3cfc1SYasunori Goto } 6074edd7cefSDavid Rientjes #ifdef CONFIG_MEMORY_HOTREMOVE 60881556b02SZhang Yanfei static void free_map_bootmem(struct page *memmap) 6090c0a4a51SYasunori Goto { 6100aad818bSJohannes Weiner unsigned long start = (unsigned long)memmap; 61181556b02SZhang Yanfei unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); 6120aad818bSJohannes Weiner 6130aad818bSJohannes Weiner vmemmap_free(start, end); 6140c0a4a51SYasunori Goto } 6154edd7cefSDavid Rientjes #endif /* CONFIG_MEMORY_HOTREMOVE */ 61698f3cfc1SYasunori Goto #else 61785b35feaSZhang Yanfei static struct page *__kmalloc_section_memmap(void) 6180b0acbecSDave Hansen { 6190b0acbecSDave Hansen struct page *page, *ret; 62085b35feaSZhang Yanfei unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; 6210b0acbecSDave Hansen 622f2d0aa5bSYasunori Goto page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); 6230b0acbecSDave Hansen if (page) 6240b0acbecSDave Hansen goto got_map_page; 6250b0acbecSDave Hansen 6260b0acbecSDave Hansen ret = vmalloc(memmap_size); 6270b0acbecSDave Hansen if (ret) 6280b0acbecSDave Hansen goto got_map_ptr; 6290b0acbecSDave Hansen 6300b0acbecSDave Hansen return NULL; 6310b0acbecSDave Hansen got_map_page: 6320b0acbecSDave Hansen ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); 6330b0acbecSDave Hansen got_map_ptr: 6340b0acbecSDave Hansen 6350b0acbecSDave Hansen return ret; 6360b0acbecSDave Hansen } 6370b0acbecSDave Hansen 63885b35feaSZhang Yanfei static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) 63998f3cfc1SYasunori Goto { 64085b35feaSZhang Yanfei return __kmalloc_section_memmap(); 64198f3cfc1SYasunori Goto } 64298f3cfc1SYasunori Goto 64385b35feaSZhang Yanfei static void __kfree_section_memmap(struct page *memmap) 6440b0acbecSDave Hansen { 6459e2779faSChristoph Lameter if (is_vmalloc_addr(memmap)) 6460b0acbecSDave Hansen vfree(memmap); 6470b0acbecSDave Hansen else 6480b0acbecSDave Hansen free_pages((unsigned long)memmap, 64985b35feaSZhang Yanfei get_order(sizeof(struct page) * PAGES_PER_SECTION)); 6500b0acbecSDave Hansen } 6510c0a4a51SYasunori Goto 6524edd7cefSDavid Rientjes #ifdef CONFIG_MEMORY_HOTREMOVE 65381556b02SZhang Yanfei static void free_map_bootmem(struct page *memmap) 6540c0a4a51SYasunori Goto { 6550c0a4a51SYasunori Goto unsigned long maps_section_nr, removing_section_nr, i; 65681556b02SZhang Yanfei unsigned long magic, nr_pages; 657ae64ffcaSJianguo Wu struct page *page = virt_to_page(memmap); 6580c0a4a51SYasunori Goto 65981556b02SZhang Yanfei nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) 66081556b02SZhang Yanfei >> PAGE_SHIFT; 66181556b02SZhang Yanfei 6620c0a4a51SYasunori Goto for (i = 0; i < nr_pages; i++, page++) { 6635f24ce5fSAndrea Arcangeli magic = (unsigned long) page->lru.next; 6640c0a4a51SYasunori Goto 6650c0a4a51SYasunori Goto BUG_ON(magic == NODE_INFO); 6660c0a4a51SYasunori Goto 6670c0a4a51SYasunori Goto maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); 6680c0a4a51SYasunori Goto removing_section_nr = page->private; 6690c0a4a51SYasunori Goto 6700c0a4a51SYasunori Goto /* 6710c0a4a51SYasunori Goto * When this function is called, the removing section is 6720c0a4a51SYasunori Goto * logical offlined state. This means all pages are isolated 6730c0a4a51SYasunori Goto * from page allocator. If removing section's memmap is placed 6740c0a4a51SYasunori Goto * on the same section, it must not be freed. 6750c0a4a51SYasunori Goto * If it is freed, page allocator may allocate it which will 6760c0a4a51SYasunori Goto * be removed physically soon. 6770c0a4a51SYasunori Goto */ 6780c0a4a51SYasunori Goto if (maps_section_nr != removing_section_nr) 6790c0a4a51SYasunori Goto put_page_bootmem(page); 6800c0a4a51SYasunori Goto } 6810c0a4a51SYasunori Goto } 6824edd7cefSDavid Rientjes #endif /* CONFIG_MEMORY_HOTREMOVE */ 68398f3cfc1SYasunori Goto #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 6840b0acbecSDave Hansen 68529751f69SAndy Whitcroft /* 68629751f69SAndy Whitcroft * returns the number of sections whose mem_maps were properly 68729751f69SAndy Whitcroft * set. If this is <=0, then that means that the passed-in 68829751f69SAndy Whitcroft * map was not consumed and must be freed. 689d41dee36SAndy Whitcroft */ 69085b35feaSZhang Yanfei int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn) 69129751f69SAndy Whitcroft { 6920b0acbecSDave Hansen unsigned long section_nr = pfn_to_section_nr(start_pfn); 6930b0acbecSDave Hansen struct pglist_data *pgdat = zone->zone_pgdat; 6940b0acbecSDave Hansen struct mem_section *ms; 6950b0acbecSDave Hansen struct page *memmap; 6965c0e3066SMel Gorman unsigned long *usemap; 6970b0acbecSDave Hansen unsigned long flags; 6980b0acbecSDave Hansen int ret; 69929751f69SAndy Whitcroft 7000b0acbecSDave Hansen /* 7010b0acbecSDave Hansen * no locking for this, because it does its own 7020b0acbecSDave Hansen * plus, it does a kmalloc 7030b0acbecSDave Hansen */ 704bbd06825SWANG Cong ret = sparse_index_init(section_nr, pgdat->node_id); 705bbd06825SWANG Cong if (ret < 0 && ret != -EEXIST) 706bbd06825SWANG Cong return ret; 70785b35feaSZhang Yanfei memmap = kmalloc_section_memmap(section_nr, pgdat->node_id); 708bbd06825SWANG Cong if (!memmap) 709bbd06825SWANG Cong return -ENOMEM; 7105c0e3066SMel Gorman usemap = __kmalloc_section_usemap(); 711bbd06825SWANG Cong if (!usemap) { 71285b35feaSZhang Yanfei __kfree_section_memmap(memmap); 713bbd06825SWANG Cong return -ENOMEM; 714bbd06825SWANG Cong } 71529751f69SAndy Whitcroft 7160b0acbecSDave Hansen pgdat_resize_lock(pgdat, &flags); 7170b0acbecSDave Hansen 7180b0acbecSDave Hansen ms = __pfn_to_section(start_pfn); 7190b0acbecSDave Hansen if (ms->section_mem_map & SECTION_MARKED_PRESENT) { 7200b0acbecSDave Hansen ret = -EEXIST; 7210b0acbecSDave Hansen goto out; 7220b0acbecSDave Hansen } 7235c0e3066SMel Gorman 72485b35feaSZhang Yanfei memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION); 7253ac19f8eSWen Congyang 72629751f69SAndy Whitcroft ms->section_mem_map |= SECTION_MARKED_PRESENT; 72729751f69SAndy Whitcroft 7285c0e3066SMel Gorman ret = sparse_init_one_section(ms, section_nr, memmap, usemap); 7290b0acbecSDave Hansen 7300b0acbecSDave Hansen out: 7310b0acbecSDave Hansen pgdat_resize_unlock(pgdat, &flags); 732bbd06825SWANG Cong if (ret <= 0) { 733bbd06825SWANG Cong kfree(usemap); 73485b35feaSZhang Yanfei __kfree_section_memmap(memmap); 735bbd06825SWANG Cong } 7360b0acbecSDave Hansen return ret; 737d41dee36SAndy Whitcroft } 738ea01ea93SBadari Pulavarty 739f3deb687SZhang Yanfei #ifdef CONFIG_MEMORY_HOTREMOVE 74095a4774dSWen Congyang #ifdef CONFIG_MEMORY_FAILURE 74195a4774dSWen Congyang static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) 74295a4774dSWen Congyang { 74395a4774dSWen Congyang int i; 74495a4774dSWen Congyang 74595a4774dSWen Congyang if (!memmap) 74695a4774dSWen Congyang return; 74795a4774dSWen Congyang 7484b94ffdcSDan Williams for (i = 0; i < nr_pages; i++) { 74995a4774dSWen Congyang if (PageHWPoison(&memmap[i])) { 750293c07e3SXishi Qiu atomic_long_sub(1, &num_poisoned_pages); 75195a4774dSWen Congyang ClearPageHWPoison(&memmap[i]); 75295a4774dSWen Congyang } 75395a4774dSWen Congyang } 75495a4774dSWen Congyang } 75595a4774dSWen Congyang #else 75695a4774dSWen Congyang static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) 75795a4774dSWen Congyang { 75895a4774dSWen Congyang } 75995a4774dSWen Congyang #endif 76095a4774dSWen Congyang 7614edd7cefSDavid Rientjes static void free_section_usemap(struct page *memmap, unsigned long *usemap) 7624edd7cefSDavid Rientjes { 7634edd7cefSDavid Rientjes struct page *usemap_page; 7644edd7cefSDavid Rientjes 7654edd7cefSDavid Rientjes if (!usemap) 7664edd7cefSDavid Rientjes return; 7674edd7cefSDavid Rientjes 7684edd7cefSDavid Rientjes usemap_page = virt_to_page(usemap); 7694edd7cefSDavid Rientjes /* 7704edd7cefSDavid Rientjes * Check to see if allocation came from hot-plug-add 7714edd7cefSDavid Rientjes */ 7724edd7cefSDavid Rientjes if (PageSlab(usemap_page) || PageCompound(usemap_page)) { 7734edd7cefSDavid Rientjes kfree(usemap); 7744edd7cefSDavid Rientjes if (memmap) 77585b35feaSZhang Yanfei __kfree_section_memmap(memmap); 7764edd7cefSDavid Rientjes return; 7774edd7cefSDavid Rientjes } 7784edd7cefSDavid Rientjes 7794edd7cefSDavid Rientjes /* 7804edd7cefSDavid Rientjes * The usemap came from bootmem. This is packed with other usemaps 7814edd7cefSDavid Rientjes * on the section which has pgdat at boot time. Just keep it as is now. 7824edd7cefSDavid Rientjes */ 7834edd7cefSDavid Rientjes 78481556b02SZhang Yanfei if (memmap) 78581556b02SZhang Yanfei free_map_bootmem(memmap); 7864edd7cefSDavid Rientjes } 7874edd7cefSDavid Rientjes 7884b94ffdcSDan Williams void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, 7894b94ffdcSDan Williams unsigned long map_offset) 790ea01ea93SBadari Pulavarty { 791ea01ea93SBadari Pulavarty struct page *memmap = NULL; 792cd099682STang Chen unsigned long *usemap = NULL, flags; 793cd099682STang Chen struct pglist_data *pgdat = zone->zone_pgdat; 794ea01ea93SBadari Pulavarty 795cd099682STang Chen pgdat_resize_lock(pgdat, &flags); 796ea01ea93SBadari Pulavarty if (ms->section_mem_map) { 797ea01ea93SBadari Pulavarty usemap = ms->pageblock_flags; 798ea01ea93SBadari Pulavarty memmap = sparse_decode_mem_map(ms->section_mem_map, 799ea01ea93SBadari Pulavarty __section_nr(ms)); 800ea01ea93SBadari Pulavarty ms->section_mem_map = 0; 801ea01ea93SBadari Pulavarty ms->pageblock_flags = NULL; 802ea01ea93SBadari Pulavarty } 803cd099682STang Chen pgdat_resize_unlock(pgdat, &flags); 804ea01ea93SBadari Pulavarty 8054b94ffdcSDan Williams clear_hwpoisoned_pages(memmap + map_offset, 8064b94ffdcSDan Williams PAGES_PER_SECTION - map_offset); 807ea01ea93SBadari Pulavarty free_section_usemap(memmap, usemap); 808ea01ea93SBadari Pulavarty } 8094edd7cefSDavid Rientjes #endif /* CONFIG_MEMORY_HOTREMOVE */ 8104edd7cefSDavid Rientjes #endif /* CONFIG_MEMORY_HOTPLUG */ 811