xref: /linux/mm/memory_hotplug.c (revision 4932381ee2a77a21641009149722e1bb92bd99e2)
13947be19SDave Hansen /*
23947be19SDave Hansen  *  linux/mm/memory_hotplug.c
33947be19SDave Hansen  *
43947be19SDave Hansen  *  Copyright (C)
53947be19SDave Hansen  */
63947be19SDave Hansen 
73947be19SDave Hansen #include <linux/stddef.h>
83947be19SDave Hansen #include <linux/mm.h>
9174cd4b1SIngo Molnar #include <linux/sched/signal.h>
103947be19SDave Hansen #include <linux/swap.h>
113947be19SDave Hansen #include <linux/interrupt.h>
123947be19SDave Hansen #include <linux/pagemap.h>
133947be19SDave Hansen #include <linux/compiler.h>
14b95f1b31SPaul Gortmaker #include <linux/export.h>
153947be19SDave Hansen #include <linux/pagevec.h>
162d1d43f6SChandra Seetharaman #include <linux/writeback.h>
173947be19SDave Hansen #include <linux/slab.h>
183947be19SDave Hansen #include <linux/sysctl.h>
193947be19SDave Hansen #include <linux/cpu.h>
203947be19SDave Hansen #include <linux/memory.h>
214b94ffdcSDan Williams #include <linux/memremap.h>
223947be19SDave Hansen #include <linux/memory_hotplug.h>
233947be19SDave Hansen #include <linux/highmem.h>
243947be19SDave Hansen #include <linux/vmalloc.h>
250a547039SKAMEZAWA Hiroyuki #include <linux/ioport.h>
260c0e6195SKAMEZAWA Hiroyuki #include <linux/delay.h>
270c0e6195SKAMEZAWA Hiroyuki #include <linux/migrate.h>
280c0e6195SKAMEZAWA Hiroyuki #include <linux/page-isolation.h>
2971088785SBadari Pulavarty #include <linux/pfn.h>
306ad696d2SAndi Kleen #include <linux/suspend.h>
316d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
32d96ae530Sakpm@linux-foundation.org #include <linux/firmware-map.h>
3360a5a19eSTang Chen #include <linux/stop_machine.h>
34c8721bbbSNaoya Horiguchi #include <linux/hugetlb.h>
35c5320926STang Chen #include <linux/memblock.h>
36f784a3f1STang Chen #include <linux/bootmem.h>
37698b1b30SVlastimil Babka #include <linux/compaction.h>
383947be19SDave Hansen 
393947be19SDave Hansen #include <asm/tlbflush.h>
403947be19SDave Hansen 
411e5ad9a3SAdrian Bunk #include "internal.h"
421e5ad9a3SAdrian Bunk 
439d0ad8caSDaniel Kiper /*
449d0ad8caSDaniel Kiper  * online_page_callback contains pointer to current page onlining function.
459d0ad8caSDaniel Kiper  * Initially it is generic_online_page(). If it is required it could be
469d0ad8caSDaniel Kiper  * changed by calling set_online_page_callback() for callback registration
479d0ad8caSDaniel Kiper  * and restore_online_page_callback() for generic callback restore.
489d0ad8caSDaniel Kiper  */
499d0ad8caSDaniel Kiper 
509d0ad8caSDaniel Kiper static void generic_online_page(struct page *page);
519d0ad8caSDaniel Kiper 
529d0ad8caSDaniel Kiper static online_page_callback_t online_page_callback = generic_online_page;
53bfc8c901SVladimir Davydov static DEFINE_MUTEX(online_page_callback_lock);
549d0ad8caSDaniel Kiper 
55bfc8c901SVladimir Davydov /* The same as the cpu_hotplug lock, but for memory hotplug. */
56bfc8c901SVladimir Davydov static struct {
57bfc8c901SVladimir Davydov 	struct task_struct *active_writer;
58bfc8c901SVladimir Davydov 	struct mutex lock; /* Synchronizes accesses to refcount, */
59bfc8c901SVladimir Davydov 	/*
60bfc8c901SVladimir Davydov 	 * Also blocks the new readers during
61bfc8c901SVladimir Davydov 	 * an ongoing mem hotplug operation.
62bfc8c901SVladimir Davydov 	 */
63bfc8c901SVladimir Davydov 	int refcount;
6420d6c96bSKOSAKI Motohiro 
65bfc8c901SVladimir Davydov #ifdef CONFIG_DEBUG_LOCK_ALLOC
66bfc8c901SVladimir Davydov 	struct lockdep_map dep_map;
67bfc8c901SVladimir Davydov #endif
68bfc8c901SVladimir Davydov } mem_hotplug = {
69bfc8c901SVladimir Davydov 	.active_writer = NULL,
70bfc8c901SVladimir Davydov 	.lock = __MUTEX_INITIALIZER(mem_hotplug.lock),
71bfc8c901SVladimir Davydov 	.refcount = 0,
72bfc8c901SVladimir Davydov #ifdef CONFIG_DEBUG_LOCK_ALLOC
73bfc8c901SVladimir Davydov 	.dep_map = {.name = "mem_hotplug.lock" },
74bfc8c901SVladimir Davydov #endif
75bfc8c901SVladimir Davydov };
76bfc8c901SVladimir Davydov 
77bfc8c901SVladimir Davydov /* Lockdep annotations for get/put_online_mems() and mem_hotplug_begin/end() */
78bfc8c901SVladimir Davydov #define memhp_lock_acquire_read() lock_map_acquire_read(&mem_hotplug.dep_map)
79bfc8c901SVladimir Davydov #define memhp_lock_acquire()      lock_map_acquire(&mem_hotplug.dep_map)
80bfc8c901SVladimir Davydov #define memhp_lock_release()      lock_map_release(&mem_hotplug.dep_map)
81bfc8c901SVladimir Davydov 
82*4932381eSMichal Hocko bool movable_node_enabled = false;
83*4932381eSMichal Hocko 
848604d9e5SVitaly Kuznetsov #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
8531bc3858SVitaly Kuznetsov bool memhp_auto_online;
868604d9e5SVitaly Kuznetsov #else
878604d9e5SVitaly Kuznetsov bool memhp_auto_online = true;
888604d9e5SVitaly Kuznetsov #endif
8931bc3858SVitaly Kuznetsov EXPORT_SYMBOL_GPL(memhp_auto_online);
9031bc3858SVitaly Kuznetsov 
9186dd995dSVitaly Kuznetsov static int __init setup_memhp_default_state(char *str)
9286dd995dSVitaly Kuznetsov {
9386dd995dSVitaly Kuznetsov 	if (!strcmp(str, "online"))
9486dd995dSVitaly Kuznetsov 		memhp_auto_online = true;
9586dd995dSVitaly Kuznetsov 	else if (!strcmp(str, "offline"))
9686dd995dSVitaly Kuznetsov 		memhp_auto_online = false;
9786dd995dSVitaly Kuznetsov 
9886dd995dSVitaly Kuznetsov 	return 1;
9986dd995dSVitaly Kuznetsov }
10086dd995dSVitaly Kuznetsov __setup("memhp_default_state=", setup_memhp_default_state);
10186dd995dSVitaly Kuznetsov 
102bfc8c901SVladimir Davydov void get_online_mems(void)
10320d6c96bSKOSAKI Motohiro {
104bfc8c901SVladimir Davydov 	might_sleep();
105bfc8c901SVladimir Davydov 	if (mem_hotplug.active_writer == current)
106bfc8c901SVladimir Davydov 		return;
107bfc8c901SVladimir Davydov 	memhp_lock_acquire_read();
108bfc8c901SVladimir Davydov 	mutex_lock(&mem_hotplug.lock);
109bfc8c901SVladimir Davydov 	mem_hotplug.refcount++;
110bfc8c901SVladimir Davydov 	mutex_unlock(&mem_hotplug.lock);
111bfc8c901SVladimir Davydov 
11220d6c96bSKOSAKI Motohiro }
11320d6c96bSKOSAKI Motohiro 
114bfc8c901SVladimir Davydov void put_online_mems(void)
11520d6c96bSKOSAKI Motohiro {
116bfc8c901SVladimir Davydov 	if (mem_hotplug.active_writer == current)
117bfc8c901SVladimir Davydov 		return;
118bfc8c901SVladimir Davydov 	mutex_lock(&mem_hotplug.lock);
119bfc8c901SVladimir Davydov 
120bfc8c901SVladimir Davydov 	if (WARN_ON(!mem_hotplug.refcount))
121bfc8c901SVladimir Davydov 		mem_hotplug.refcount++; /* try to fix things up */
122bfc8c901SVladimir Davydov 
123bfc8c901SVladimir Davydov 	if (!--mem_hotplug.refcount && unlikely(mem_hotplug.active_writer))
124bfc8c901SVladimir Davydov 		wake_up_process(mem_hotplug.active_writer);
125bfc8c901SVladimir Davydov 	mutex_unlock(&mem_hotplug.lock);
126bfc8c901SVladimir Davydov 	memhp_lock_release();
127bfc8c901SVladimir Davydov 
12820d6c96bSKOSAKI Motohiro }
12920d6c96bSKOSAKI Motohiro 
13055adc1d0SHeiko Carstens /* Serializes write accesses to mem_hotplug.active_writer. */
13155adc1d0SHeiko Carstens static DEFINE_MUTEX(memory_add_remove_lock);
13255adc1d0SHeiko Carstens 
13330467e0bSDavid Rientjes void mem_hotplug_begin(void)
134bfc8c901SVladimir Davydov {
13555adc1d0SHeiko Carstens 	mutex_lock(&memory_add_remove_lock);
1363fc21924SDan Williams 
137bfc8c901SVladimir Davydov 	mem_hotplug.active_writer = current;
138bfc8c901SVladimir Davydov 
139bfc8c901SVladimir Davydov 	memhp_lock_acquire();
140bfc8c901SVladimir Davydov 	for (;;) {
141bfc8c901SVladimir Davydov 		mutex_lock(&mem_hotplug.lock);
142bfc8c901SVladimir Davydov 		if (likely(!mem_hotplug.refcount))
143bfc8c901SVladimir Davydov 			break;
144bfc8c901SVladimir Davydov 		__set_current_state(TASK_UNINTERRUPTIBLE);
145bfc8c901SVladimir Davydov 		mutex_unlock(&mem_hotplug.lock);
146bfc8c901SVladimir Davydov 		schedule();
147bfc8c901SVladimir Davydov 	}
148bfc8c901SVladimir Davydov }
149bfc8c901SVladimir Davydov 
15030467e0bSDavid Rientjes void mem_hotplug_done(void)
151bfc8c901SVladimir Davydov {
152bfc8c901SVladimir Davydov 	mem_hotplug.active_writer = NULL;
153bfc8c901SVladimir Davydov 	mutex_unlock(&mem_hotplug.lock);
154bfc8c901SVladimir Davydov 	memhp_lock_release();
15555adc1d0SHeiko Carstens 	mutex_unlock(&memory_add_remove_lock);
156bfc8c901SVladimir Davydov }
15720d6c96bSKOSAKI Motohiro 
15845e0b78bSKeith Mannthey /* add this memory to iomem resource */
15945e0b78bSKeith Mannthey static struct resource *register_memory_resource(u64 start, u64 size)
16045e0b78bSKeith Mannthey {
16145e0b78bSKeith Mannthey 	struct resource *res;
16245e0b78bSKeith Mannthey 	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1636f754ba4SVitaly Kuznetsov 	if (!res)
1646f754ba4SVitaly Kuznetsov 		return ERR_PTR(-ENOMEM);
16545e0b78bSKeith Mannthey 
16645e0b78bSKeith Mannthey 	res->name = "System RAM";
16745e0b78bSKeith Mannthey 	res->start = start;
16845e0b78bSKeith Mannthey 	res->end = start + size - 1;
169782b8664SToshi Kani 	res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
17045e0b78bSKeith Mannthey 	if (request_resource(&iomem_resource, res) < 0) {
1714996eed8SToshi Kani 		pr_debug("System RAM resource %pR cannot be added\n", res);
17245e0b78bSKeith Mannthey 		kfree(res);
1736f754ba4SVitaly Kuznetsov 		return ERR_PTR(-EEXIST);
17445e0b78bSKeith Mannthey 	}
17545e0b78bSKeith Mannthey 	return res;
17645e0b78bSKeith Mannthey }
17745e0b78bSKeith Mannthey 
17845e0b78bSKeith Mannthey static void release_memory_resource(struct resource *res)
17945e0b78bSKeith Mannthey {
18045e0b78bSKeith Mannthey 	if (!res)
18145e0b78bSKeith Mannthey 		return;
18245e0b78bSKeith Mannthey 	release_resource(res);
18345e0b78bSKeith Mannthey 	kfree(res);
18445e0b78bSKeith Mannthey 	return;
18545e0b78bSKeith Mannthey }
18645e0b78bSKeith Mannthey 
18753947027SKeith Mannthey #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
18846723bfaSYasuaki Ishimatsu void get_page_bootmem(unsigned long info,  struct page *page,
1895f24ce5fSAndrea Arcangeli 		      unsigned long type)
19004753278SYasunori Goto {
191ddffe98dSYasuaki Ishimatsu 	page->freelist = (void *)type;
19204753278SYasunori Goto 	SetPagePrivate(page);
19304753278SYasunori Goto 	set_page_private(page, info);
194fe896d18SJoonsoo Kim 	page_ref_inc(page);
19504753278SYasunori Goto }
19604753278SYasunori Goto 
197170a5a7eSJiang Liu void put_page_bootmem(struct page *page)
19804753278SYasunori Goto {
1995f24ce5fSAndrea Arcangeli 	unsigned long type;
20004753278SYasunori Goto 
201ddffe98dSYasuaki Ishimatsu 	type = (unsigned long) page->freelist;
2025f24ce5fSAndrea Arcangeli 	BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
2035f24ce5fSAndrea Arcangeli 	       type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
20404753278SYasunori Goto 
205fe896d18SJoonsoo Kim 	if (page_ref_dec_return(page) == 1) {
206ddffe98dSYasuaki Ishimatsu 		page->freelist = NULL;
20704753278SYasunori Goto 		ClearPagePrivate(page);
20804753278SYasunori Goto 		set_page_private(page, 0);
2095f24ce5fSAndrea Arcangeli 		INIT_LIST_HEAD(&page->lru);
210170a5a7eSJiang Liu 		free_reserved_page(page);
21104753278SYasunori Goto 	}
21204753278SYasunori Goto }
21304753278SYasunori Goto 
21446723bfaSYasuaki Ishimatsu #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
21546723bfaSYasuaki Ishimatsu #ifndef CONFIG_SPARSEMEM_VMEMMAP
216d92bc318SAdrian Bunk static void register_page_bootmem_info_section(unsigned long start_pfn)
21704753278SYasunori Goto {
21804753278SYasunori Goto 	unsigned long *usemap, mapsize, section_nr, i;
21904753278SYasunori Goto 	struct mem_section *ms;
22004753278SYasunori Goto 	struct page *page, *memmap;
22104753278SYasunori Goto 
22204753278SYasunori Goto 	section_nr = pfn_to_section_nr(start_pfn);
22304753278SYasunori Goto 	ms = __nr_to_section(section_nr);
22404753278SYasunori Goto 
22504753278SYasunori Goto 	/* Get section's memmap address */
22604753278SYasunori Goto 	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
22704753278SYasunori Goto 
22804753278SYasunori Goto 	/*
22904753278SYasunori Goto 	 * Get page for the memmap's phys address
23004753278SYasunori Goto 	 * XXX: need more consideration for sparse_vmemmap...
23104753278SYasunori Goto 	 */
23204753278SYasunori Goto 	page = virt_to_page(memmap);
23304753278SYasunori Goto 	mapsize = sizeof(struct page) * PAGES_PER_SECTION;
23404753278SYasunori Goto 	mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
23504753278SYasunori Goto 
23604753278SYasunori Goto 	/* remember memmap's page */
23704753278SYasunori Goto 	for (i = 0; i < mapsize; i++, page++)
23804753278SYasunori Goto 		get_page_bootmem(section_nr, page, SECTION_INFO);
23904753278SYasunori Goto 
24004753278SYasunori Goto 	usemap = __nr_to_section(section_nr)->pageblock_flags;
24104753278SYasunori Goto 	page = virt_to_page(usemap);
24204753278SYasunori Goto 
24304753278SYasunori Goto 	mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
24404753278SYasunori Goto 
24504753278SYasunori Goto 	for (i = 0; i < mapsize; i++, page++)
246af370fb8SYasunori Goto 		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
24704753278SYasunori Goto 
24804753278SYasunori Goto }
24946723bfaSYasuaki Ishimatsu #else /* CONFIG_SPARSEMEM_VMEMMAP */
25046723bfaSYasuaki Ishimatsu static void register_page_bootmem_info_section(unsigned long start_pfn)
25146723bfaSYasuaki Ishimatsu {
25246723bfaSYasuaki Ishimatsu 	unsigned long *usemap, mapsize, section_nr, i;
25346723bfaSYasuaki Ishimatsu 	struct mem_section *ms;
25446723bfaSYasuaki Ishimatsu 	struct page *page, *memmap;
25546723bfaSYasuaki Ishimatsu 
25646723bfaSYasuaki Ishimatsu 	if (!pfn_valid(start_pfn))
25746723bfaSYasuaki Ishimatsu 		return;
25846723bfaSYasuaki Ishimatsu 
25946723bfaSYasuaki Ishimatsu 	section_nr = pfn_to_section_nr(start_pfn);
26046723bfaSYasuaki Ishimatsu 	ms = __nr_to_section(section_nr);
26146723bfaSYasuaki Ishimatsu 
26246723bfaSYasuaki Ishimatsu 	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
26346723bfaSYasuaki Ishimatsu 
26446723bfaSYasuaki Ishimatsu 	register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
26546723bfaSYasuaki Ishimatsu 
26646723bfaSYasuaki Ishimatsu 	usemap = __nr_to_section(section_nr)->pageblock_flags;
26746723bfaSYasuaki Ishimatsu 	page = virt_to_page(usemap);
26846723bfaSYasuaki Ishimatsu 
26946723bfaSYasuaki Ishimatsu 	mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
27046723bfaSYasuaki Ishimatsu 
27146723bfaSYasuaki Ishimatsu 	for (i = 0; i < mapsize; i++, page++)
27246723bfaSYasuaki Ishimatsu 		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
27346723bfaSYasuaki Ishimatsu }
27446723bfaSYasuaki Ishimatsu #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
27504753278SYasunori Goto 
2767ded384aSLinus Torvalds void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
27704753278SYasunori Goto {
27804753278SYasunori Goto 	unsigned long i, pfn, end_pfn, nr_pages;
27904753278SYasunori Goto 	int node = pgdat->node_id;
28004753278SYasunori Goto 	struct page *page;
28104753278SYasunori Goto 
28204753278SYasunori Goto 	nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
28304753278SYasunori Goto 	page = virt_to_page(pgdat);
28404753278SYasunori Goto 
28504753278SYasunori Goto 	for (i = 0; i < nr_pages; i++, page++)
28604753278SYasunori Goto 		get_page_bootmem(node, page, NODE_INFO);
28704753278SYasunori Goto 
28804753278SYasunori Goto 	pfn = pgdat->node_start_pfn;
289c1f19495SCody P Schafer 	end_pfn = pgdat_end_pfn(pgdat);
29004753278SYasunori Goto 
2917e9f5eb0STang Chen 	/* register section info */
292f14851afSqiuxishi 	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
293f14851afSqiuxishi 		/*
294f14851afSqiuxishi 		 * Some platforms can assign the same pfn to multiple nodes - on
295f14851afSqiuxishi 		 * node0 as well as nodeN.  To avoid registering a pfn against
296f14851afSqiuxishi 		 * multiple nodes we check that this pfn does not already
2977e9f5eb0STang Chen 		 * reside in some other nodes.
298f14851afSqiuxishi 		 */
299f65e91dfSYang Shi 		if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
30004753278SYasunori Goto 			register_page_bootmem_info_section(pfn);
301f14851afSqiuxishi 	}
30204753278SYasunori Goto }
30346723bfaSYasuaki Ishimatsu #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
30404753278SYasunori Goto 
305f1dd2cd1SMichal Hocko static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
306f1dd2cd1SMichal Hocko 		bool want_memblock)
3073947be19SDave Hansen {
3083947be19SDave Hansen 	int ret;
309f1dd2cd1SMichal Hocko 	int i;
3103947be19SDave Hansen 
311ebd15302SKAMEZAWA Hiroyuki 	if (pfn_valid(phys_start_pfn))
312ebd15302SKAMEZAWA Hiroyuki 		return -EEXIST;
313ebd15302SKAMEZAWA Hiroyuki 
314f1dd2cd1SMichal Hocko 	ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn);
3153947be19SDave Hansen 	if (ret < 0)
3163947be19SDave Hansen 		return ret;
3173947be19SDave Hansen 
318f1dd2cd1SMichal Hocko 	/*
319f1dd2cd1SMichal Hocko 	 * Make all the pages reserved so that nobody will stumble over half
320f1dd2cd1SMichal Hocko 	 * initialized state.
321f1dd2cd1SMichal Hocko 	 * FIXME: We also have to associate it with a node because pfn_to_node
322f1dd2cd1SMichal Hocko 	 * relies on having page with the proper node.
323f1dd2cd1SMichal Hocko 	 */
324f1dd2cd1SMichal Hocko 	for (i = 0; i < PAGES_PER_SECTION; i++) {
325f1dd2cd1SMichal Hocko 		unsigned long pfn = phys_start_pfn + i;
326f1dd2cd1SMichal Hocko 		struct page *page;
327f1dd2cd1SMichal Hocko 		if (!pfn_valid(pfn))
328f1dd2cd1SMichal Hocko 			continue;
329718127ccSYasunori Goto 
330f1dd2cd1SMichal Hocko 		page = pfn_to_page(pfn);
331f1dd2cd1SMichal Hocko 		set_page_node(page, nid);
332f1dd2cd1SMichal Hocko 		SetPageReserved(page);
333f1dd2cd1SMichal Hocko 	}
334718127ccSYasunori Goto 
3351b862aecSMichal Hocko 	if (!want_memblock)
3361b862aecSMichal Hocko 		return 0;
3371b862aecSMichal Hocko 
338c04fc586SGary Hade 	return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
3393947be19SDave Hansen }
3403947be19SDave Hansen 
3414edd7cefSDavid Rientjes /*
3424edd7cefSDavid Rientjes  * Reasonably generic function for adding memory.  It is
3434edd7cefSDavid Rientjes  * expected that archs that support memory hotplug will
3444edd7cefSDavid Rientjes  * call this function after deciding the zone to which to
3454edd7cefSDavid Rientjes  * add the new pages.
3464edd7cefSDavid Rientjes  */
347f1dd2cd1SMichal Hocko int __ref __add_pages(int nid, unsigned long phys_start_pfn,
3481b862aecSMichal Hocko 			unsigned long nr_pages, bool want_memblock)
3494edd7cefSDavid Rientjes {
3504edd7cefSDavid Rientjes 	unsigned long i;
3514edd7cefSDavid Rientjes 	int err = 0;
3524edd7cefSDavid Rientjes 	int start_sec, end_sec;
3534b94ffdcSDan Williams 	struct vmem_altmap *altmap;
3544b94ffdcSDan Williams 
3554edd7cefSDavid Rientjes 	/* during initialize mem_map, align hot-added range to section */
3564edd7cefSDavid Rientjes 	start_sec = pfn_to_section_nr(phys_start_pfn);
3574edd7cefSDavid Rientjes 	end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
3584edd7cefSDavid Rientjes 
3594b94ffdcSDan Williams 	altmap = to_vmem_altmap((unsigned long) pfn_to_page(phys_start_pfn));
3604b94ffdcSDan Williams 	if (altmap) {
3614b94ffdcSDan Williams 		/*
3624b94ffdcSDan Williams 		 * Validate altmap is within bounds of the total request
3634b94ffdcSDan Williams 		 */
3644b94ffdcSDan Williams 		if (altmap->base_pfn != phys_start_pfn
3654b94ffdcSDan Williams 				|| vmem_altmap_offset(altmap) > nr_pages) {
3664b94ffdcSDan Williams 			pr_warn_once("memory add fail, invalid altmap\n");
3677cf91a98SJoonsoo Kim 			err = -EINVAL;
3687cf91a98SJoonsoo Kim 			goto out;
3694b94ffdcSDan Williams 		}
3704b94ffdcSDan Williams 		altmap->alloc = 0;
3714b94ffdcSDan Williams 	}
3724b94ffdcSDan Williams 
3734edd7cefSDavid Rientjes 	for (i = start_sec; i <= end_sec; i++) {
374f1dd2cd1SMichal Hocko 		err = __add_section(nid, section_nr_to_pfn(i), want_memblock);
3754edd7cefSDavid Rientjes 
3764edd7cefSDavid Rientjes 		/*
3774edd7cefSDavid Rientjes 		 * EEXIST is finally dealt with by ioresource collision
3784edd7cefSDavid Rientjes 		 * check. see add_memory() => register_memory_resource()
3794edd7cefSDavid Rientjes 		 * Warning will be printed if there is collision.
3804edd7cefSDavid Rientjes 		 */
3814edd7cefSDavid Rientjes 		if (err && (err != -EEXIST))
3824edd7cefSDavid Rientjes 			break;
3834edd7cefSDavid Rientjes 		err = 0;
3844edd7cefSDavid Rientjes 	}
385c435a390SZhu Guihua 	vmemmap_populate_print_last();
3867cf91a98SJoonsoo Kim out:
3874edd7cefSDavid Rientjes 	return err;
3884edd7cefSDavid Rientjes }
3894edd7cefSDavid Rientjes EXPORT_SYMBOL_GPL(__add_pages);
3904edd7cefSDavid Rientjes 
3914edd7cefSDavid Rientjes #ifdef CONFIG_MEMORY_HOTREMOVE
392815121d2SYasuaki Ishimatsu /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
393815121d2SYasuaki Ishimatsu static int find_smallest_section_pfn(int nid, struct zone *zone,
394815121d2SYasuaki Ishimatsu 				     unsigned long start_pfn,
395815121d2SYasuaki Ishimatsu 				     unsigned long end_pfn)
396815121d2SYasuaki Ishimatsu {
397815121d2SYasuaki Ishimatsu 	struct mem_section *ms;
398815121d2SYasuaki Ishimatsu 
399815121d2SYasuaki Ishimatsu 	for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
400815121d2SYasuaki Ishimatsu 		ms = __pfn_to_section(start_pfn);
401815121d2SYasuaki Ishimatsu 
402815121d2SYasuaki Ishimatsu 		if (unlikely(!valid_section(ms)))
403815121d2SYasuaki Ishimatsu 			continue;
404815121d2SYasuaki Ishimatsu 
405815121d2SYasuaki Ishimatsu 		if (unlikely(pfn_to_nid(start_pfn) != nid))
406815121d2SYasuaki Ishimatsu 			continue;
407815121d2SYasuaki Ishimatsu 
408815121d2SYasuaki Ishimatsu 		if (zone && zone != page_zone(pfn_to_page(start_pfn)))
409815121d2SYasuaki Ishimatsu 			continue;
410815121d2SYasuaki Ishimatsu 
411815121d2SYasuaki Ishimatsu 		return start_pfn;
412815121d2SYasuaki Ishimatsu 	}
413815121d2SYasuaki Ishimatsu 
414815121d2SYasuaki Ishimatsu 	return 0;
415815121d2SYasuaki Ishimatsu }
416815121d2SYasuaki Ishimatsu 
417815121d2SYasuaki Ishimatsu /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
418815121d2SYasuaki Ishimatsu static int find_biggest_section_pfn(int nid, struct zone *zone,
419815121d2SYasuaki Ishimatsu 				    unsigned long start_pfn,
420815121d2SYasuaki Ishimatsu 				    unsigned long end_pfn)
421815121d2SYasuaki Ishimatsu {
422815121d2SYasuaki Ishimatsu 	struct mem_section *ms;
423815121d2SYasuaki Ishimatsu 	unsigned long pfn;
424815121d2SYasuaki Ishimatsu 
425815121d2SYasuaki Ishimatsu 	/* pfn is the end pfn of a memory section. */
426815121d2SYasuaki Ishimatsu 	pfn = end_pfn - 1;
427815121d2SYasuaki Ishimatsu 	for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
428815121d2SYasuaki Ishimatsu 		ms = __pfn_to_section(pfn);
429815121d2SYasuaki Ishimatsu 
430815121d2SYasuaki Ishimatsu 		if (unlikely(!valid_section(ms)))
431815121d2SYasuaki Ishimatsu 			continue;
432815121d2SYasuaki Ishimatsu 
433815121d2SYasuaki Ishimatsu 		if (unlikely(pfn_to_nid(pfn) != nid))
434815121d2SYasuaki Ishimatsu 			continue;
435815121d2SYasuaki Ishimatsu 
436815121d2SYasuaki Ishimatsu 		if (zone && zone != page_zone(pfn_to_page(pfn)))
437815121d2SYasuaki Ishimatsu 			continue;
438815121d2SYasuaki Ishimatsu 
439815121d2SYasuaki Ishimatsu 		return pfn;
440815121d2SYasuaki Ishimatsu 	}
441815121d2SYasuaki Ishimatsu 
442815121d2SYasuaki Ishimatsu 	return 0;
443815121d2SYasuaki Ishimatsu }
444815121d2SYasuaki Ishimatsu 
445815121d2SYasuaki Ishimatsu static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
446815121d2SYasuaki Ishimatsu 			     unsigned long end_pfn)
447815121d2SYasuaki Ishimatsu {
448815121d2SYasuaki Ishimatsu 	unsigned long zone_start_pfn = zone->zone_start_pfn;
449c33bc315SXishi Qiu 	unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
450c33bc315SXishi Qiu 	unsigned long zone_end_pfn = z;
451815121d2SYasuaki Ishimatsu 	unsigned long pfn;
452815121d2SYasuaki Ishimatsu 	struct mem_section *ms;
453815121d2SYasuaki Ishimatsu 	int nid = zone_to_nid(zone);
454815121d2SYasuaki Ishimatsu 
455815121d2SYasuaki Ishimatsu 	zone_span_writelock(zone);
456815121d2SYasuaki Ishimatsu 	if (zone_start_pfn == start_pfn) {
457815121d2SYasuaki Ishimatsu 		/*
458815121d2SYasuaki Ishimatsu 		 * If the section is smallest section in the zone, it need
459815121d2SYasuaki Ishimatsu 		 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
460815121d2SYasuaki Ishimatsu 		 * In this case, we find second smallest valid mem_section
461815121d2SYasuaki Ishimatsu 		 * for shrinking zone.
462815121d2SYasuaki Ishimatsu 		 */
463815121d2SYasuaki Ishimatsu 		pfn = find_smallest_section_pfn(nid, zone, end_pfn,
464815121d2SYasuaki Ishimatsu 						zone_end_pfn);
465815121d2SYasuaki Ishimatsu 		if (pfn) {
466815121d2SYasuaki Ishimatsu 			zone->zone_start_pfn = pfn;
467815121d2SYasuaki Ishimatsu 			zone->spanned_pages = zone_end_pfn - pfn;
468815121d2SYasuaki Ishimatsu 		}
469815121d2SYasuaki Ishimatsu 	} else if (zone_end_pfn == end_pfn) {
470815121d2SYasuaki Ishimatsu 		/*
471815121d2SYasuaki Ishimatsu 		 * If the section is biggest section in the zone, it need
472815121d2SYasuaki Ishimatsu 		 * shrink zone->spanned_pages.
473815121d2SYasuaki Ishimatsu 		 * In this case, we find second biggest valid mem_section for
474815121d2SYasuaki Ishimatsu 		 * shrinking zone.
475815121d2SYasuaki Ishimatsu 		 */
476815121d2SYasuaki Ishimatsu 		pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
477815121d2SYasuaki Ishimatsu 					       start_pfn);
478815121d2SYasuaki Ishimatsu 		if (pfn)
479815121d2SYasuaki Ishimatsu 			zone->spanned_pages = pfn - zone_start_pfn + 1;
480815121d2SYasuaki Ishimatsu 	}
481815121d2SYasuaki Ishimatsu 
482815121d2SYasuaki Ishimatsu 	/*
483815121d2SYasuaki Ishimatsu 	 * The section is not biggest or smallest mem_section in the zone, it
484815121d2SYasuaki Ishimatsu 	 * only creates a hole in the zone. So in this case, we need not
485815121d2SYasuaki Ishimatsu 	 * change the zone. But perhaps, the zone has only hole data. Thus
486815121d2SYasuaki Ishimatsu 	 * it check the zone has only hole or not.
487815121d2SYasuaki Ishimatsu 	 */
488815121d2SYasuaki Ishimatsu 	pfn = zone_start_pfn;
489815121d2SYasuaki Ishimatsu 	for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
490815121d2SYasuaki Ishimatsu 		ms = __pfn_to_section(pfn);
491815121d2SYasuaki Ishimatsu 
492815121d2SYasuaki Ishimatsu 		if (unlikely(!valid_section(ms)))
493815121d2SYasuaki Ishimatsu 			continue;
494815121d2SYasuaki Ishimatsu 
495815121d2SYasuaki Ishimatsu 		if (page_zone(pfn_to_page(pfn)) != zone)
496815121d2SYasuaki Ishimatsu 			continue;
497815121d2SYasuaki Ishimatsu 
498815121d2SYasuaki Ishimatsu 		 /* If the section is current section, it continues the loop */
499815121d2SYasuaki Ishimatsu 		if (start_pfn == pfn)
500815121d2SYasuaki Ishimatsu 			continue;
501815121d2SYasuaki Ishimatsu 
502815121d2SYasuaki Ishimatsu 		/* If we find valid section, we have nothing to do */
503815121d2SYasuaki Ishimatsu 		zone_span_writeunlock(zone);
504815121d2SYasuaki Ishimatsu 		return;
505815121d2SYasuaki Ishimatsu 	}
506815121d2SYasuaki Ishimatsu 
507815121d2SYasuaki Ishimatsu 	/* The zone has no valid section */
508815121d2SYasuaki Ishimatsu 	zone->zone_start_pfn = 0;
509815121d2SYasuaki Ishimatsu 	zone->spanned_pages = 0;
510815121d2SYasuaki Ishimatsu 	zone_span_writeunlock(zone);
511815121d2SYasuaki Ishimatsu }
512815121d2SYasuaki Ishimatsu 
513815121d2SYasuaki Ishimatsu static void shrink_pgdat_span(struct pglist_data *pgdat,
514815121d2SYasuaki Ishimatsu 			      unsigned long start_pfn, unsigned long end_pfn)
515815121d2SYasuaki Ishimatsu {
516815121d2SYasuaki Ishimatsu 	unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
51783285c72SXishi Qiu 	unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
51883285c72SXishi Qiu 	unsigned long pgdat_end_pfn = p;
519815121d2SYasuaki Ishimatsu 	unsigned long pfn;
520815121d2SYasuaki Ishimatsu 	struct mem_section *ms;
521815121d2SYasuaki Ishimatsu 	int nid = pgdat->node_id;
522815121d2SYasuaki Ishimatsu 
523815121d2SYasuaki Ishimatsu 	if (pgdat_start_pfn == start_pfn) {
524815121d2SYasuaki Ishimatsu 		/*
525815121d2SYasuaki Ishimatsu 		 * If the section is smallest section in the pgdat, it need
526815121d2SYasuaki Ishimatsu 		 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
527815121d2SYasuaki Ishimatsu 		 * In this case, we find second smallest valid mem_section
528815121d2SYasuaki Ishimatsu 		 * for shrinking zone.
529815121d2SYasuaki Ishimatsu 		 */
530815121d2SYasuaki Ishimatsu 		pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
531815121d2SYasuaki Ishimatsu 						pgdat_end_pfn);
532815121d2SYasuaki Ishimatsu 		if (pfn) {
533815121d2SYasuaki Ishimatsu 			pgdat->node_start_pfn = pfn;
534815121d2SYasuaki Ishimatsu 			pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
535815121d2SYasuaki Ishimatsu 		}
536815121d2SYasuaki Ishimatsu 	} else if (pgdat_end_pfn == end_pfn) {
537815121d2SYasuaki Ishimatsu 		/*
538815121d2SYasuaki Ishimatsu 		 * If the section is biggest section in the pgdat, it need
539815121d2SYasuaki Ishimatsu 		 * shrink pgdat->node_spanned_pages.
540815121d2SYasuaki Ishimatsu 		 * In this case, we find second biggest valid mem_section for
541815121d2SYasuaki Ishimatsu 		 * shrinking zone.
542815121d2SYasuaki Ishimatsu 		 */
543815121d2SYasuaki Ishimatsu 		pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
544815121d2SYasuaki Ishimatsu 					       start_pfn);
545815121d2SYasuaki Ishimatsu 		if (pfn)
546815121d2SYasuaki Ishimatsu 			pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
547815121d2SYasuaki Ishimatsu 	}
548815121d2SYasuaki Ishimatsu 
549815121d2SYasuaki Ishimatsu 	/*
550815121d2SYasuaki Ishimatsu 	 * If the section is not biggest or smallest mem_section in the pgdat,
551815121d2SYasuaki Ishimatsu 	 * it only creates a hole in the pgdat. So in this case, we need not
552815121d2SYasuaki Ishimatsu 	 * change the pgdat.
553815121d2SYasuaki Ishimatsu 	 * But perhaps, the pgdat has only hole data. Thus it check the pgdat
554815121d2SYasuaki Ishimatsu 	 * has only hole or not.
555815121d2SYasuaki Ishimatsu 	 */
556815121d2SYasuaki Ishimatsu 	pfn = pgdat_start_pfn;
557815121d2SYasuaki Ishimatsu 	for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
558815121d2SYasuaki Ishimatsu 		ms = __pfn_to_section(pfn);
559815121d2SYasuaki Ishimatsu 
560815121d2SYasuaki Ishimatsu 		if (unlikely(!valid_section(ms)))
561815121d2SYasuaki Ishimatsu 			continue;
562815121d2SYasuaki Ishimatsu 
563815121d2SYasuaki Ishimatsu 		if (pfn_to_nid(pfn) != nid)
564815121d2SYasuaki Ishimatsu 			continue;
565815121d2SYasuaki Ishimatsu 
566815121d2SYasuaki Ishimatsu 		 /* If the section is current section, it continues the loop */
567815121d2SYasuaki Ishimatsu 		if (start_pfn == pfn)
568815121d2SYasuaki Ishimatsu 			continue;
569815121d2SYasuaki Ishimatsu 
570815121d2SYasuaki Ishimatsu 		/* If we find valid section, we have nothing to do */
571815121d2SYasuaki Ishimatsu 		return;
572815121d2SYasuaki Ishimatsu 	}
573815121d2SYasuaki Ishimatsu 
574815121d2SYasuaki Ishimatsu 	/* The pgdat has no valid section */
575815121d2SYasuaki Ishimatsu 	pgdat->node_start_pfn = 0;
576815121d2SYasuaki Ishimatsu 	pgdat->node_spanned_pages = 0;
577815121d2SYasuaki Ishimatsu }
578815121d2SYasuaki Ishimatsu 
579815121d2SYasuaki Ishimatsu static void __remove_zone(struct zone *zone, unsigned long start_pfn)
580815121d2SYasuaki Ishimatsu {
581815121d2SYasuaki Ishimatsu 	struct pglist_data *pgdat = zone->zone_pgdat;
582815121d2SYasuaki Ishimatsu 	int nr_pages = PAGES_PER_SECTION;
583815121d2SYasuaki Ishimatsu 	int zone_type;
584815121d2SYasuaki Ishimatsu 	unsigned long flags;
585815121d2SYasuaki Ishimatsu 
586815121d2SYasuaki Ishimatsu 	zone_type = zone - pgdat->node_zones;
587815121d2SYasuaki Ishimatsu 
588815121d2SYasuaki Ishimatsu 	pgdat_resize_lock(zone->zone_pgdat, &flags);
589815121d2SYasuaki Ishimatsu 	shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
590815121d2SYasuaki Ishimatsu 	shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
591815121d2SYasuaki Ishimatsu 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
592815121d2SYasuaki Ishimatsu }
593815121d2SYasuaki Ishimatsu 
5944b94ffdcSDan Williams static int __remove_section(struct zone *zone, struct mem_section *ms,
5954b94ffdcSDan Williams 		unsigned long map_offset)
596ea01ea93SBadari Pulavarty {
597815121d2SYasuaki Ishimatsu 	unsigned long start_pfn;
598815121d2SYasuaki Ishimatsu 	int scn_nr;
599ea01ea93SBadari Pulavarty 	int ret = -EINVAL;
600ea01ea93SBadari Pulavarty 
601ea01ea93SBadari Pulavarty 	if (!valid_section(ms))
602ea01ea93SBadari Pulavarty 		return ret;
603ea01ea93SBadari Pulavarty 
604ea01ea93SBadari Pulavarty 	ret = unregister_memory_section(ms);
605ea01ea93SBadari Pulavarty 	if (ret)
606ea01ea93SBadari Pulavarty 		return ret;
607ea01ea93SBadari Pulavarty 
608815121d2SYasuaki Ishimatsu 	scn_nr = __section_nr(ms);
609815121d2SYasuaki Ishimatsu 	start_pfn = section_nr_to_pfn(scn_nr);
610815121d2SYasuaki Ishimatsu 	__remove_zone(zone, start_pfn);
611815121d2SYasuaki Ishimatsu 
6124b94ffdcSDan Williams 	sparse_remove_one_section(zone, ms, map_offset);
613ea01ea93SBadari Pulavarty 	return 0;
614ea01ea93SBadari Pulavarty }
615ea01ea93SBadari Pulavarty 
616ea01ea93SBadari Pulavarty /**
617ea01ea93SBadari Pulavarty  * __remove_pages() - remove sections of pages from a zone
618ea01ea93SBadari Pulavarty  * @zone: zone from which pages need to be removed
619ea01ea93SBadari Pulavarty  * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
620ea01ea93SBadari Pulavarty  * @nr_pages: number of pages to remove (must be multiple of section size)
621ea01ea93SBadari Pulavarty  *
622ea01ea93SBadari Pulavarty  * Generic helper function to remove section mappings and sysfs entries
623ea01ea93SBadari Pulavarty  * for the section of the memory we are removing. Caller needs to make
624ea01ea93SBadari Pulavarty  * sure that pages are marked reserved and zones are adjust properly by
625ea01ea93SBadari Pulavarty  * calling offline_pages().
626ea01ea93SBadari Pulavarty  */
627ea01ea93SBadari Pulavarty int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
628ea01ea93SBadari Pulavarty 		 unsigned long nr_pages)
629ea01ea93SBadari Pulavarty {
630fe74ebb1SToshi Kani 	unsigned long i;
6314b94ffdcSDan Williams 	unsigned long map_offset = 0;
6324b94ffdcSDan Williams 	int sections_to_remove, ret = 0;
6334b94ffdcSDan Williams 
6344b94ffdcSDan Williams 	/* In the ZONE_DEVICE case device driver owns the memory region */
6354b94ffdcSDan Williams 	if (is_dev_zone(zone)) {
6364b94ffdcSDan Williams 		struct page *page = pfn_to_page(phys_start_pfn);
6374b94ffdcSDan Williams 		struct vmem_altmap *altmap;
6384b94ffdcSDan Williams 
6394b94ffdcSDan Williams 		altmap = to_vmem_altmap((unsigned long) page);
6404b94ffdcSDan Williams 		if (altmap)
6414b94ffdcSDan Williams 			map_offset = vmem_altmap_offset(altmap);
6424b94ffdcSDan Williams 	} else {
643fe74ebb1SToshi Kani 		resource_size_t start, size;
6444b94ffdcSDan Williams 
6454b94ffdcSDan Williams 		start = phys_start_pfn << PAGE_SHIFT;
6464b94ffdcSDan Williams 		size = nr_pages * PAGE_SIZE;
6474b94ffdcSDan Williams 
6484b94ffdcSDan Williams 		ret = release_mem_region_adjustable(&iomem_resource, start,
6494b94ffdcSDan Williams 					size);
6504b94ffdcSDan Williams 		if (ret) {
6514b94ffdcSDan Williams 			resource_size_t endres = start + size - 1;
6524b94ffdcSDan Williams 
6534b94ffdcSDan Williams 			pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
6544b94ffdcSDan Williams 					&start, &endres, ret);
6554b94ffdcSDan Williams 		}
6564b94ffdcSDan Williams 	}
657ea01ea93SBadari Pulavarty 
6587cf91a98SJoonsoo Kim 	clear_zone_contiguous(zone);
6597cf91a98SJoonsoo Kim 
660ea01ea93SBadari Pulavarty 	/*
661ea01ea93SBadari Pulavarty 	 * We can only remove entire sections
662ea01ea93SBadari Pulavarty 	 */
663ea01ea93SBadari Pulavarty 	BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
664ea01ea93SBadari Pulavarty 	BUG_ON(nr_pages % PAGES_PER_SECTION);
665ea01ea93SBadari Pulavarty 
666ea01ea93SBadari Pulavarty 	sections_to_remove = nr_pages / PAGES_PER_SECTION;
667ea01ea93SBadari Pulavarty 	for (i = 0; i < sections_to_remove; i++) {
668ea01ea93SBadari Pulavarty 		unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
6694b94ffdcSDan Williams 
6704b94ffdcSDan Williams 		ret = __remove_section(zone, __pfn_to_section(pfn), map_offset);
6714b94ffdcSDan Williams 		map_offset = 0;
672ea01ea93SBadari Pulavarty 		if (ret)
673ea01ea93SBadari Pulavarty 			break;
674ea01ea93SBadari Pulavarty 	}
6757cf91a98SJoonsoo Kim 
6767cf91a98SJoonsoo Kim 	set_zone_contiguous(zone);
6777cf91a98SJoonsoo Kim 
678ea01ea93SBadari Pulavarty 	return ret;
679ea01ea93SBadari Pulavarty }
6804edd7cefSDavid Rientjes #endif /* CONFIG_MEMORY_HOTREMOVE */
681ea01ea93SBadari Pulavarty 
6829d0ad8caSDaniel Kiper int set_online_page_callback(online_page_callback_t callback)
6839d0ad8caSDaniel Kiper {
6849d0ad8caSDaniel Kiper 	int rc = -EINVAL;
6859d0ad8caSDaniel Kiper 
686bfc8c901SVladimir Davydov 	get_online_mems();
687bfc8c901SVladimir Davydov 	mutex_lock(&online_page_callback_lock);
6889d0ad8caSDaniel Kiper 
6899d0ad8caSDaniel Kiper 	if (online_page_callback == generic_online_page) {
6909d0ad8caSDaniel Kiper 		online_page_callback = callback;
6919d0ad8caSDaniel Kiper 		rc = 0;
6929d0ad8caSDaniel Kiper 	}
6939d0ad8caSDaniel Kiper 
694bfc8c901SVladimir Davydov 	mutex_unlock(&online_page_callback_lock);
695bfc8c901SVladimir Davydov 	put_online_mems();
6969d0ad8caSDaniel Kiper 
6979d0ad8caSDaniel Kiper 	return rc;
6989d0ad8caSDaniel Kiper }
6999d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(set_online_page_callback);
7009d0ad8caSDaniel Kiper 
7019d0ad8caSDaniel Kiper int restore_online_page_callback(online_page_callback_t callback)
7029d0ad8caSDaniel Kiper {
7039d0ad8caSDaniel Kiper 	int rc = -EINVAL;
7049d0ad8caSDaniel Kiper 
705bfc8c901SVladimir Davydov 	get_online_mems();
706bfc8c901SVladimir Davydov 	mutex_lock(&online_page_callback_lock);
7079d0ad8caSDaniel Kiper 
7089d0ad8caSDaniel Kiper 	if (online_page_callback == callback) {
7099d0ad8caSDaniel Kiper 		online_page_callback = generic_online_page;
7109d0ad8caSDaniel Kiper 		rc = 0;
7119d0ad8caSDaniel Kiper 	}
7129d0ad8caSDaniel Kiper 
713bfc8c901SVladimir Davydov 	mutex_unlock(&online_page_callback_lock);
714bfc8c901SVladimir Davydov 	put_online_mems();
7159d0ad8caSDaniel Kiper 
7169d0ad8caSDaniel Kiper 	return rc;
7179d0ad8caSDaniel Kiper }
7189d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(restore_online_page_callback);
7199d0ad8caSDaniel Kiper 
7209d0ad8caSDaniel Kiper void __online_page_set_limits(struct page *page)
721180c06efSJeremy Fitzhardinge {
7229d0ad8caSDaniel Kiper }
7239d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_set_limits);
7249d0ad8caSDaniel Kiper 
7259d0ad8caSDaniel Kiper void __online_page_increment_counters(struct page *page)
7269d0ad8caSDaniel Kiper {
7273dcc0571SJiang Liu 	adjust_managed_page_count(page, 1);
7289d0ad8caSDaniel Kiper }
7299d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_increment_counters);
730180c06efSJeremy Fitzhardinge 
7319d0ad8caSDaniel Kiper void __online_page_free(struct page *page)
7329d0ad8caSDaniel Kiper {
7333dcc0571SJiang Liu 	__free_reserved_page(page);
734180c06efSJeremy Fitzhardinge }
7359d0ad8caSDaniel Kiper EXPORT_SYMBOL_GPL(__online_page_free);
7369d0ad8caSDaniel Kiper 
7379d0ad8caSDaniel Kiper static void generic_online_page(struct page *page)
7389d0ad8caSDaniel Kiper {
7399d0ad8caSDaniel Kiper 	__online_page_set_limits(page);
7409d0ad8caSDaniel Kiper 	__online_page_increment_counters(page);
7419d0ad8caSDaniel Kiper 	__online_page_free(page);
7429d0ad8caSDaniel Kiper }
743180c06efSJeremy Fitzhardinge 
74475884fb1SKAMEZAWA Hiroyuki static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
74575884fb1SKAMEZAWA Hiroyuki 			void *arg)
7463947be19SDave Hansen {
7473947be19SDave Hansen 	unsigned long i;
74875884fb1SKAMEZAWA Hiroyuki 	unsigned long onlined_pages = *(unsigned long *)arg;
74975884fb1SKAMEZAWA Hiroyuki 	struct page *page;
7502d070eabSMichal Hocko 
75175884fb1SKAMEZAWA Hiroyuki 	if (PageReserved(pfn_to_page(start_pfn)))
75275884fb1SKAMEZAWA Hiroyuki 		for (i = 0; i < nr_pages; i++) {
75375884fb1SKAMEZAWA Hiroyuki 			page = pfn_to_page(start_pfn + i);
7549d0ad8caSDaniel Kiper 			(*online_page_callback)(page);
75575884fb1SKAMEZAWA Hiroyuki 			onlined_pages++;
75675884fb1SKAMEZAWA Hiroyuki 		}
7572d070eabSMichal Hocko 
7582d070eabSMichal Hocko 	online_mem_sections(start_pfn, start_pfn + nr_pages);
7592d070eabSMichal Hocko 
76075884fb1SKAMEZAWA Hiroyuki 	*(unsigned long *)arg = onlined_pages;
76175884fb1SKAMEZAWA Hiroyuki 	return 0;
76275884fb1SKAMEZAWA Hiroyuki }
76375884fb1SKAMEZAWA Hiroyuki 
764d9713679SLai Jiangshan /* check which state of node_states will be changed when online memory */
765d9713679SLai Jiangshan static void node_states_check_changes_online(unsigned long nr_pages,
766d9713679SLai Jiangshan 	struct zone *zone, struct memory_notify *arg)
767d9713679SLai Jiangshan {
768d9713679SLai Jiangshan 	int nid = zone_to_nid(zone);
769d9713679SLai Jiangshan 	enum zone_type zone_last = ZONE_NORMAL;
770d9713679SLai Jiangshan 
771d9713679SLai Jiangshan 	/*
7726715ddf9SLai Jiangshan 	 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
7736715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_NORMAL,
7746715ddf9SLai Jiangshan 	 * set zone_last to ZONE_NORMAL.
775d9713679SLai Jiangshan 	 *
7766715ddf9SLai Jiangshan 	 * If we don't have HIGHMEM nor movable node,
7776715ddf9SLai Jiangshan 	 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
7786715ddf9SLai Jiangshan 	 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
779d9713679SLai Jiangshan 	 */
7806715ddf9SLai Jiangshan 	if (N_MEMORY == N_NORMAL_MEMORY)
781d9713679SLai Jiangshan 		zone_last = ZONE_MOVABLE;
782d9713679SLai Jiangshan 
783d9713679SLai Jiangshan 	/*
784d9713679SLai Jiangshan 	 * if the memory to be online is in a zone of 0...zone_last, and
785d9713679SLai Jiangshan 	 * the zones of 0...zone_last don't have memory before online, we will
786d9713679SLai Jiangshan 	 * need to set the node to node_states[N_NORMAL_MEMORY] after
787d9713679SLai Jiangshan 	 * the memory is online.
788d9713679SLai Jiangshan 	 */
789d9713679SLai Jiangshan 	if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
790d9713679SLai Jiangshan 		arg->status_change_nid_normal = nid;
791d9713679SLai Jiangshan 	else
792d9713679SLai Jiangshan 		arg->status_change_nid_normal = -1;
793d9713679SLai Jiangshan 
7946715ddf9SLai Jiangshan #ifdef CONFIG_HIGHMEM
7956715ddf9SLai Jiangshan 	/*
7966715ddf9SLai Jiangshan 	 * If we have movable node, node_states[N_HIGH_MEMORY]
7976715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_HIGHMEM,
7986715ddf9SLai Jiangshan 	 * set zone_last to ZONE_HIGHMEM.
7996715ddf9SLai Jiangshan 	 *
8006715ddf9SLai Jiangshan 	 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
8016715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_MOVABLE,
8026715ddf9SLai Jiangshan 	 * set zone_last to ZONE_MOVABLE.
8036715ddf9SLai Jiangshan 	 */
8046715ddf9SLai Jiangshan 	zone_last = ZONE_HIGHMEM;
8056715ddf9SLai Jiangshan 	if (N_MEMORY == N_HIGH_MEMORY)
8066715ddf9SLai Jiangshan 		zone_last = ZONE_MOVABLE;
8076715ddf9SLai Jiangshan 
8086715ddf9SLai Jiangshan 	if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
8096715ddf9SLai Jiangshan 		arg->status_change_nid_high = nid;
8106715ddf9SLai Jiangshan 	else
8116715ddf9SLai Jiangshan 		arg->status_change_nid_high = -1;
8126715ddf9SLai Jiangshan #else
8136715ddf9SLai Jiangshan 	arg->status_change_nid_high = arg->status_change_nid_normal;
8146715ddf9SLai Jiangshan #endif
8156715ddf9SLai Jiangshan 
816d9713679SLai Jiangshan 	/*
817d9713679SLai Jiangshan 	 * if the node don't have memory befor online, we will need to
8186715ddf9SLai Jiangshan 	 * set the node to node_states[N_MEMORY] after the memory
819d9713679SLai Jiangshan 	 * is online.
820d9713679SLai Jiangshan 	 */
8216715ddf9SLai Jiangshan 	if (!node_state(nid, N_MEMORY))
822d9713679SLai Jiangshan 		arg->status_change_nid = nid;
823d9713679SLai Jiangshan 	else
824d9713679SLai Jiangshan 		arg->status_change_nid = -1;
825d9713679SLai Jiangshan }
826d9713679SLai Jiangshan 
827d9713679SLai Jiangshan static void node_states_set_node(int node, struct memory_notify *arg)
828d9713679SLai Jiangshan {
829d9713679SLai Jiangshan 	if (arg->status_change_nid_normal >= 0)
830d9713679SLai Jiangshan 		node_set_state(node, N_NORMAL_MEMORY);
831d9713679SLai Jiangshan 
8326715ddf9SLai Jiangshan 	if (arg->status_change_nid_high >= 0)
833d9713679SLai Jiangshan 		node_set_state(node, N_HIGH_MEMORY);
8346715ddf9SLai Jiangshan 
8356715ddf9SLai Jiangshan 	node_set_state(node, N_MEMORY);
836d9713679SLai Jiangshan }
837d9713679SLai Jiangshan 
838f1dd2cd1SMichal Hocko bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, int online_type)
839df429ac0SReza Arbab {
840f1dd2cd1SMichal Hocko 	struct pglist_data *pgdat = NODE_DATA(nid);
841f1dd2cd1SMichal Hocko 	struct zone *movable_zone = &pgdat->node_zones[ZONE_MOVABLE];
842c246a213SMichal Hocko 	struct zone *default_zone = default_zone_for_pfn(nid, pfn, nr_pages);
843df429ac0SReza Arbab 
844f1dd2cd1SMichal Hocko 	/*
845f1dd2cd1SMichal Hocko 	 * TODO there shouldn't be any inherent reason to have ZONE_NORMAL
846f1dd2cd1SMichal Hocko 	 * physically before ZONE_MOVABLE. All we need is they do not
847f1dd2cd1SMichal Hocko 	 * overlap. Historically we didn't allow ZONE_NORMAL after ZONE_MOVABLE
848f1dd2cd1SMichal Hocko 	 * though so let's stick with it for simplicity for now.
849f1dd2cd1SMichal Hocko 	 * TODO make sure we do not overlap with ZONE_DEVICE
850f1dd2cd1SMichal Hocko 	 */
851f1dd2cd1SMichal Hocko 	if (online_type == MMOP_ONLINE_KERNEL) {
852f1dd2cd1SMichal Hocko 		if (zone_is_empty(movable_zone))
8538a1f780eSYasuaki Ishimatsu 			return true;
854f1dd2cd1SMichal Hocko 		return movable_zone->zone_start_pfn >= pfn + nr_pages;
855f1dd2cd1SMichal Hocko 	} else if (online_type == MMOP_ONLINE_MOVABLE) {
856c246a213SMichal Hocko 		return zone_end_pfn(default_zone) <= pfn;
857f1dd2cd1SMichal Hocko 	}
858f1dd2cd1SMichal Hocko 
859f1dd2cd1SMichal Hocko 	/* MMOP_ONLINE_KEEP will always succeed and inherits the current zone */
860f1dd2cd1SMichal Hocko 	return online_type == MMOP_ONLINE_KEEP;
861f1dd2cd1SMichal Hocko }
862f1dd2cd1SMichal Hocko 
863f1dd2cd1SMichal Hocko static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
864f1dd2cd1SMichal Hocko 		unsigned long nr_pages)
865f1dd2cd1SMichal Hocko {
866f1dd2cd1SMichal Hocko 	unsigned long old_end_pfn = zone_end_pfn(zone);
867f1dd2cd1SMichal Hocko 
868f1dd2cd1SMichal Hocko 	if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
869f1dd2cd1SMichal Hocko 		zone->zone_start_pfn = start_pfn;
870f1dd2cd1SMichal Hocko 
871f1dd2cd1SMichal Hocko 	zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
872f1dd2cd1SMichal Hocko }
873f1dd2cd1SMichal Hocko 
874f1dd2cd1SMichal Hocko static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn,
875f1dd2cd1SMichal Hocko                                      unsigned long nr_pages)
876f1dd2cd1SMichal Hocko {
877f1dd2cd1SMichal Hocko 	unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
878f1dd2cd1SMichal Hocko 
879f1dd2cd1SMichal Hocko 	if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
880f1dd2cd1SMichal Hocko 		pgdat->node_start_pfn = start_pfn;
881f1dd2cd1SMichal Hocko 
882f1dd2cd1SMichal Hocko 	pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
883f1dd2cd1SMichal Hocko }
884f1dd2cd1SMichal Hocko 
885cdf72f25SMichal Hocko void __ref move_pfn_range_to_zone(struct zone *zone,
886f1dd2cd1SMichal Hocko 		unsigned long start_pfn, unsigned long nr_pages)
887f1dd2cd1SMichal Hocko {
888f1dd2cd1SMichal Hocko 	struct pglist_data *pgdat = zone->zone_pgdat;
889f1dd2cd1SMichal Hocko 	int nid = pgdat->node_id;
890f1dd2cd1SMichal Hocko 	unsigned long flags;
891f1dd2cd1SMichal Hocko 
892f1dd2cd1SMichal Hocko 	if (zone_is_empty(zone))
893f1dd2cd1SMichal Hocko 		init_currently_empty_zone(zone, start_pfn, nr_pages);
894f1dd2cd1SMichal Hocko 
895f1dd2cd1SMichal Hocko 	clear_zone_contiguous(zone);
896f1dd2cd1SMichal Hocko 
897f1dd2cd1SMichal Hocko 	/* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */
898f1dd2cd1SMichal Hocko 	pgdat_resize_lock(pgdat, &flags);
899f1dd2cd1SMichal Hocko 	zone_span_writelock(zone);
900f1dd2cd1SMichal Hocko 	resize_zone_range(zone, start_pfn, nr_pages);
901f1dd2cd1SMichal Hocko 	zone_span_writeunlock(zone);
902f1dd2cd1SMichal Hocko 	resize_pgdat_range(pgdat, start_pfn, nr_pages);
903f1dd2cd1SMichal Hocko 	pgdat_resize_unlock(pgdat, &flags);
904f1dd2cd1SMichal Hocko 
905f1dd2cd1SMichal Hocko 	/*
906f1dd2cd1SMichal Hocko 	 * TODO now we have a visible range of pages which are not associated
907f1dd2cd1SMichal Hocko 	 * with their zone properly. Not nice but set_pfnblock_flags_mask
908f1dd2cd1SMichal Hocko 	 * expects the zone spans the pfn range. All the pages in the range
909f1dd2cd1SMichal Hocko 	 * are reserved so nobody should be touching them so we should be safe
910f1dd2cd1SMichal Hocko 	 */
911f1dd2cd1SMichal Hocko 	memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, MEMMAP_HOTPLUG);
912f1dd2cd1SMichal Hocko 
913f1dd2cd1SMichal Hocko 	set_zone_contiguous(zone);
914f1dd2cd1SMichal Hocko }
915f1dd2cd1SMichal Hocko 
916f1dd2cd1SMichal Hocko /*
917c246a213SMichal Hocko  * Returns a default kernel memory zone for the given pfn range.
918c246a213SMichal Hocko  * If no kernel zone covers this pfn range it will automatically go
919c246a213SMichal Hocko  * to the ZONE_NORMAL.
920c246a213SMichal Hocko  */
921c246a213SMichal Hocko struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
922c246a213SMichal Hocko 		unsigned long nr_pages)
923c246a213SMichal Hocko {
924c246a213SMichal Hocko 	struct pglist_data *pgdat = NODE_DATA(nid);
925c246a213SMichal Hocko 	int zid;
926c246a213SMichal Hocko 
927c246a213SMichal Hocko 	for (zid = 0; zid <= ZONE_NORMAL; zid++) {
928c246a213SMichal Hocko 		struct zone *zone = &pgdat->node_zones[zid];
929c246a213SMichal Hocko 
930c246a213SMichal Hocko 		if (zone_intersects(zone, start_pfn, nr_pages))
931c246a213SMichal Hocko 			return zone;
932c246a213SMichal Hocko 	}
933c246a213SMichal Hocko 
934c246a213SMichal Hocko 	return &pgdat->node_zones[ZONE_NORMAL];
935c246a213SMichal Hocko }
936c246a213SMichal Hocko 
937c246a213SMichal Hocko /*
938f1dd2cd1SMichal Hocko  * Associates the given pfn range with the given node and the zone appropriate
939f1dd2cd1SMichal Hocko  * for the given online type.
940f1dd2cd1SMichal Hocko  */
941f1dd2cd1SMichal Hocko static struct zone * __meminit move_pfn_range(int online_type, int nid,
942f1dd2cd1SMichal Hocko 		unsigned long start_pfn, unsigned long nr_pages)
943f1dd2cd1SMichal Hocko {
944f1dd2cd1SMichal Hocko 	struct pglist_data *pgdat = NODE_DATA(nid);
945c246a213SMichal Hocko 	struct zone *zone = default_zone_for_pfn(nid, start_pfn, nr_pages);
946f1dd2cd1SMichal Hocko 
947f1dd2cd1SMichal Hocko 	if (online_type == MMOP_ONLINE_KEEP) {
948f1dd2cd1SMichal Hocko 		struct zone *movable_zone = &pgdat->node_zones[ZONE_MOVABLE];
949f1dd2cd1SMichal Hocko 		/*
950a69578a1SMichal Hocko 		 * MMOP_ONLINE_KEEP defaults to MMOP_ONLINE_KERNEL but use
951a69578a1SMichal Hocko 		 * movable zone if that is not possible (e.g. we are within
952a69578a1SMichal Hocko 		 * or past the existing movable zone)
953f1dd2cd1SMichal Hocko 		 */
954a69578a1SMichal Hocko 		if (!allow_online_pfn_range(nid, start_pfn, nr_pages,
955a69578a1SMichal Hocko 					MMOP_ONLINE_KERNEL))
956f1dd2cd1SMichal Hocko 			zone = movable_zone;
957f1dd2cd1SMichal Hocko 	} else if (online_type == MMOP_ONLINE_MOVABLE) {
958f1dd2cd1SMichal Hocko 		zone = &pgdat->node_zones[ZONE_MOVABLE];
959f1dd2cd1SMichal Hocko 	}
960f1dd2cd1SMichal Hocko 
961f1dd2cd1SMichal Hocko 	move_pfn_range_to_zone(zone, start_pfn, nr_pages);
962f1dd2cd1SMichal Hocko 	return zone;
963df429ac0SReza Arbab }
96475884fb1SKAMEZAWA Hiroyuki 
96530467e0bSDavid Rientjes /* Must be protected by mem_hotplug_begin() */
966511c2abaSLai Jiangshan int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
96775884fb1SKAMEZAWA Hiroyuki {
968aa47228aSCody P Schafer 	unsigned long flags;
9693947be19SDave Hansen 	unsigned long onlined_pages = 0;
9703947be19SDave Hansen 	struct zone *zone;
9716811378eSYasunori Goto 	int need_zonelists_rebuild = 0;
9727b78d335SYasunori Goto 	int nid;
9737b78d335SYasunori Goto 	int ret;
9747b78d335SYasunori Goto 	struct memory_notify arg;
9753947be19SDave Hansen 
976f1dd2cd1SMichal Hocko 	nid = pfn_to_nid(pfn);
977f1dd2cd1SMichal Hocko 	if (!allow_online_pfn_range(nid, pfn, nr_pages, online_type))
97830467e0bSDavid Rientjes 		return -EINVAL;
97974d42d8fSLai Jiangshan 
980f1dd2cd1SMichal Hocko 	/* associate pfn range with the zone */
981f1dd2cd1SMichal Hocko 	zone = move_pfn_range(online_type, nid, pfn, nr_pages);
982511c2abaSLai Jiangshan 
9837b78d335SYasunori Goto 	arg.start_pfn = pfn;
9847b78d335SYasunori Goto 	arg.nr_pages = nr_pages;
985d9713679SLai Jiangshan 	node_states_check_changes_online(nr_pages, zone, &arg);
9867b78d335SYasunori Goto 
9877b78d335SYasunori Goto 	ret = memory_notify(MEM_GOING_ONLINE, &arg);
9887b78d335SYasunori Goto 	ret = notifier_to_errno(ret);
989e33e33b4SChen Yucong 	if (ret)
990e33e33b4SChen Yucong 		goto failed_addition;
991e33e33b4SChen Yucong 
9923947be19SDave Hansen 	/*
9936811378eSYasunori Goto 	 * If this zone is not populated, then it is not in zonelist.
9946811378eSYasunori Goto 	 * This means the page allocator ignores this zone.
9956811378eSYasunori Goto 	 * So, zonelist must be updated after online.
9966811378eSYasunori Goto 	 */
9974eaf3f64SHaicheng Li 	mutex_lock(&zonelists_mutex);
9986dcd73d7SWen Congyang 	if (!populated_zone(zone)) {
9996811378eSYasunori Goto 		need_zonelists_rebuild = 1;
10006dcd73d7SWen Congyang 		build_all_zonelists(NULL, zone);
10016dcd73d7SWen Congyang 	}
10026811378eSYasunori Goto 
1003908eedc6SKAMEZAWA Hiroyuki 	ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
100475884fb1SKAMEZAWA Hiroyuki 		online_pages_range);
1005fd8a4221SGeoff Levand 	if (ret) {
10066dcd73d7SWen Congyang 		if (need_zonelists_rebuild)
10076dcd73d7SWen Congyang 			zone_pcp_reset(zone);
10084eaf3f64SHaicheng Li 		mutex_unlock(&zonelists_mutex);
1009e33e33b4SChen Yucong 		goto failed_addition;
1010fd8a4221SGeoff Levand 	}
1011fd8a4221SGeoff Levand 
10123947be19SDave Hansen 	zone->present_pages += onlined_pages;
1013aa47228aSCody P Schafer 
1014aa47228aSCody P Schafer 	pgdat_resize_lock(zone->zone_pgdat, &flags);
1015f2937be5SYasunori Goto 	zone->zone_pgdat->node_present_pages += onlined_pages;
1016aa47228aSCody P Schafer 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
1017aa47228aSCody P Schafer 
101808dff7b7SJiang Liu 	if (onlined_pages) {
1019e888ca35SVlastimil Babka 		node_states_set_node(nid, &arg);
10201f522509SHaicheng Li 		if (need_zonelists_rebuild)
10216dcd73d7SWen Congyang 			build_all_zonelists(NULL, NULL);
10221f522509SHaicheng Li 		else
1023112067f0SShaohua Li 			zone_pcp_update(zone);
102408dff7b7SJiang Liu 	}
10251f522509SHaicheng Li 
10264eaf3f64SHaicheng Li 	mutex_unlock(&zonelists_mutex);
10271b79acc9SKOSAKI Motohiro 
10281b79acc9SKOSAKI Motohiro 	init_per_zone_wmark_min();
10291b79acc9SKOSAKI Motohiro 
1030698b1b30SVlastimil Babka 	if (onlined_pages) {
1031e888ca35SVlastimil Babka 		kswapd_run(nid);
1032698b1b30SVlastimil Babka 		kcompactd_run(nid);
1033698b1b30SVlastimil Babka 	}
103461b13993SDave Hansen 
10355a4d4361SKAMEZAWA Hiroyuki 	vm_total_pages = nr_free_pagecache_pages();
10362f7f24ecSKent Liu 
10372d1d43f6SChandra Seetharaman 	writeback_set_ratelimit();
10387b78d335SYasunori Goto 
10397b78d335SYasunori Goto 	if (onlined_pages)
10407b78d335SYasunori Goto 		memory_notify(MEM_ONLINE, &arg);
104130467e0bSDavid Rientjes 	return 0;
1042e33e33b4SChen Yucong 
1043e33e33b4SChen Yucong failed_addition:
1044e33e33b4SChen Yucong 	pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
1045e33e33b4SChen Yucong 		 (unsigned long long) pfn << PAGE_SHIFT,
1046e33e33b4SChen Yucong 		 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
1047e33e33b4SChen Yucong 	memory_notify(MEM_CANCEL_ONLINE, &arg);
1048e33e33b4SChen Yucong 	return ret;
10493947be19SDave Hansen }
105053947027SKeith Mannthey #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
1051bc02af93SYasunori Goto 
10520bd85420STang Chen static void reset_node_present_pages(pg_data_t *pgdat)
10530bd85420STang Chen {
10540bd85420STang Chen 	struct zone *z;
10550bd85420STang Chen 
10560bd85420STang Chen 	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
10570bd85420STang Chen 		z->present_pages = 0;
10580bd85420STang Chen 
10590bd85420STang Chen 	pgdat->node_present_pages = 0;
10600bd85420STang Chen }
10610bd85420STang Chen 
1062e1319331SHidetoshi Seto /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1063e1319331SHidetoshi Seto static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
10649af3c2deSYasunori Goto {
10659af3c2deSYasunori Goto 	struct pglist_data *pgdat;
10669af3c2deSYasunori Goto 	unsigned long zones_size[MAX_NR_ZONES] = {0};
10679af3c2deSYasunori Goto 	unsigned long zholes_size[MAX_NR_ZONES] = {0};
1068c8e861a5SFabian Frederick 	unsigned long start_pfn = PFN_DOWN(start);
10699af3c2deSYasunori Goto 
1070a1e565aaSTang Chen 	pgdat = NODE_DATA(nid);
1071a1e565aaSTang Chen 	if (!pgdat) {
10729af3c2deSYasunori Goto 		pgdat = arch_alloc_nodedata(nid);
10739af3c2deSYasunori Goto 		if (!pgdat)
10749af3c2deSYasunori Goto 			return NULL;
10759af3c2deSYasunori Goto 
10769af3c2deSYasunori Goto 		arch_refresh_nodedata(nid, pgdat);
1077b0dc3a34SGu Zheng 	} else {
1078e716f2ebSMel Gorman 		/*
1079e716f2ebSMel Gorman 		 * Reset the nr_zones, order and classzone_idx before reuse.
1080e716f2ebSMel Gorman 		 * Note that kswapd will init kswapd_classzone_idx properly
1081e716f2ebSMel Gorman 		 * when it starts in the near future.
1082e716f2ebSMel Gorman 		 */
1083b0dc3a34SGu Zheng 		pgdat->nr_zones = 0;
108438087d9bSMel Gorman 		pgdat->kswapd_order = 0;
108538087d9bSMel Gorman 		pgdat->kswapd_classzone_idx = 0;
1086a1e565aaSTang Chen 	}
10879af3c2deSYasunori Goto 
10889af3c2deSYasunori Goto 	/* we can use NODE_DATA(nid) from here */
10899af3c2deSYasunori Goto 
10909af3c2deSYasunori Goto 	/* init node's zones as empty zones, we don't have any present pages.*/
10919109fb7bSJohannes Weiner 	free_area_init_node(nid, zones_size, start_pfn, zholes_size);
10925830169fSReza Arbab 	pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
10939af3c2deSYasunori Goto 
1094959ecc48SKAMEZAWA Hiroyuki 	/*
1095959ecc48SKAMEZAWA Hiroyuki 	 * The node we allocated has no zone fallback lists. For avoiding
1096959ecc48SKAMEZAWA Hiroyuki 	 * to access not-initialized zonelist, build here.
1097959ecc48SKAMEZAWA Hiroyuki 	 */
1098f957db4fSDavid Rientjes 	mutex_lock(&zonelists_mutex);
10999adb62a5SJiang Liu 	build_all_zonelists(pgdat, NULL);
1100f957db4fSDavid Rientjes 	mutex_unlock(&zonelists_mutex);
1101959ecc48SKAMEZAWA Hiroyuki 
1102f784a3f1STang Chen 	/*
1103f784a3f1STang Chen 	 * zone->managed_pages is set to an approximate value in
1104f784a3f1STang Chen 	 * free_area_init_core(), which will cause
1105f784a3f1STang Chen 	 * /sys/device/system/node/nodeX/meminfo has wrong data.
1106f784a3f1STang Chen 	 * So reset it to 0 before any memory is onlined.
1107f784a3f1STang Chen 	 */
1108f784a3f1STang Chen 	reset_node_managed_pages(pgdat);
1109f784a3f1STang Chen 
11100bd85420STang Chen 	/*
11110bd85420STang Chen 	 * When memory is hot-added, all the memory is in offline state. So
11120bd85420STang Chen 	 * clear all zones' present_pages because they will be updated in
11130bd85420STang Chen 	 * online_pages() and offline_pages().
11140bd85420STang Chen 	 */
11150bd85420STang Chen 	reset_node_present_pages(pgdat);
11160bd85420STang Chen 
11179af3c2deSYasunori Goto 	return pgdat;
11189af3c2deSYasunori Goto }
11199af3c2deSYasunori Goto 
11209af3c2deSYasunori Goto static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
11219af3c2deSYasunori Goto {
11229af3c2deSYasunori Goto 	arch_refresh_nodedata(nid, NULL);
11235830169fSReza Arbab 	free_percpu(pgdat->per_cpu_nodestats);
11249af3c2deSYasunori Goto 	arch_free_nodedata(pgdat);
11259af3c2deSYasunori Goto 	return;
11269af3c2deSYasunori Goto }
11279af3c2deSYasunori Goto 
11280a547039SKAMEZAWA Hiroyuki 
112901b0f197SToshi Kani /**
113001b0f197SToshi Kani  * try_online_node - online a node if offlined
113101b0f197SToshi Kani  *
1132cf23422bSminskey guo  * called by cpu_up() to online a node without onlined memory.
1133cf23422bSminskey guo  */
113401b0f197SToshi Kani int try_online_node(int nid)
1135cf23422bSminskey guo {
1136cf23422bSminskey guo 	pg_data_t	*pgdat;
1137cf23422bSminskey guo 	int	ret;
1138cf23422bSminskey guo 
113901b0f197SToshi Kani 	if (node_online(nid))
114001b0f197SToshi Kani 		return 0;
114101b0f197SToshi Kani 
1142bfc8c901SVladimir Davydov 	mem_hotplug_begin();
1143cf23422bSminskey guo 	pgdat = hotadd_new_pgdat(nid, 0);
11447553e8f2SDavid Rientjes 	if (!pgdat) {
114501b0f197SToshi Kani 		pr_err("Cannot online node %d due to NULL pgdat\n", nid);
1146cf23422bSminskey guo 		ret = -ENOMEM;
1147cf23422bSminskey guo 		goto out;
1148cf23422bSminskey guo 	}
1149cf23422bSminskey guo 	node_set_online(nid);
1150cf23422bSminskey guo 	ret = register_one_node(nid);
1151cf23422bSminskey guo 	BUG_ON(ret);
1152cf23422bSminskey guo 
115301b0f197SToshi Kani 	if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
115401b0f197SToshi Kani 		mutex_lock(&zonelists_mutex);
115501b0f197SToshi Kani 		build_all_zonelists(NULL, NULL);
115601b0f197SToshi Kani 		mutex_unlock(&zonelists_mutex);
115701b0f197SToshi Kani 	}
115801b0f197SToshi Kani 
1159cf23422bSminskey guo out:
1160bfc8c901SVladimir Davydov 	mem_hotplug_done();
1161cf23422bSminskey guo 	return ret;
1162cf23422bSminskey guo }
1163cf23422bSminskey guo 
116427356f54SToshi Kani static int check_hotplug_memory_range(u64 start, u64 size)
116527356f54SToshi Kani {
1166c8e861a5SFabian Frederick 	u64 start_pfn = PFN_DOWN(start);
116727356f54SToshi Kani 	u64 nr_pages = size >> PAGE_SHIFT;
116827356f54SToshi Kani 
116927356f54SToshi Kani 	/* Memory range must be aligned with section */
117027356f54SToshi Kani 	if ((start_pfn & ~PAGE_SECTION_MASK) ||
117127356f54SToshi Kani 	    (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) {
117227356f54SToshi Kani 		pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n",
117327356f54SToshi Kani 				(unsigned long long)start,
117427356f54SToshi Kani 				(unsigned long long)size);
117527356f54SToshi Kani 		return -EINVAL;
117627356f54SToshi Kani 	}
117727356f54SToshi Kani 
117827356f54SToshi Kani 	return 0;
117927356f54SToshi Kani }
118027356f54SToshi Kani 
118131bc3858SVitaly Kuznetsov static int online_memory_block(struct memory_block *mem, void *arg)
118231bc3858SVitaly Kuznetsov {
1183dc18d706SNathan Fontenot 	return device_online(&mem->dev);
118431bc3858SVitaly Kuznetsov }
118531bc3858SVitaly Kuznetsov 
118631168481SAl Viro /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
118731bc3858SVitaly Kuznetsov int __ref add_memory_resource(int nid, struct resource *res, bool online)
1188bc02af93SYasunori Goto {
118962cedb9fSDavid Vrabel 	u64 start, size;
11909af3c2deSYasunori Goto 	pg_data_t *pgdat = NULL;
1191a1e565aaSTang Chen 	bool new_pgdat;
1192a1e565aaSTang Chen 	bool new_node;
1193bc02af93SYasunori Goto 	int ret;
1194bc02af93SYasunori Goto 
119562cedb9fSDavid Vrabel 	start = res->start;
119662cedb9fSDavid Vrabel 	size = resource_size(res);
119762cedb9fSDavid Vrabel 
119827356f54SToshi Kani 	ret = check_hotplug_memory_range(start, size);
119927356f54SToshi Kani 	if (ret)
120027356f54SToshi Kani 		return ret;
120127356f54SToshi Kani 
1202a1e565aaSTang Chen 	{	/* Stupid hack to suppress address-never-null warning */
1203a1e565aaSTang Chen 		void *p = NODE_DATA(nid);
1204a1e565aaSTang Chen 		new_pgdat = !p;
1205a1e565aaSTang Chen 	}
1206ac13c462SNathan Zimmer 
1207bfc8c901SVladimir Davydov 	mem_hotplug_begin();
1208ac13c462SNathan Zimmer 
12097f36e3e5STang Chen 	/*
12107f36e3e5STang Chen 	 * Add new range to memblock so that when hotadd_new_pgdat() is called
12117f36e3e5STang Chen 	 * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
12127f36e3e5STang Chen 	 * this new range and calculate total pages correctly.  The range will
12137f36e3e5STang Chen 	 * be removed at hot-remove time.
12147f36e3e5STang Chen 	 */
12157f36e3e5STang Chen 	memblock_add_node(start, size, nid);
12167f36e3e5STang Chen 
1217a1e565aaSTang Chen 	new_node = !node_online(nid);
1218a1e565aaSTang Chen 	if (new_node) {
12199af3c2deSYasunori Goto 		pgdat = hotadd_new_pgdat(nid, start);
12206ad696d2SAndi Kleen 		ret = -ENOMEM;
12219af3c2deSYasunori Goto 		if (!pgdat)
122241b9e2d7SWen Congyang 			goto error;
12239af3c2deSYasunori Goto 	}
12249af3c2deSYasunori Goto 
1225bc02af93SYasunori Goto 	/* call arch's memory hotadd */
12263d79a728SMichal Hocko 	ret = arch_add_memory(nid, start, size, true);
1227bc02af93SYasunori Goto 
12289af3c2deSYasunori Goto 	if (ret < 0)
12299af3c2deSYasunori Goto 		goto error;
12309af3c2deSYasunori Goto 
12310fc44159SYasunori Goto 	/* we online node here. we can't roll back from here. */
12329af3c2deSYasunori Goto 	node_set_online(nid);
12339af3c2deSYasunori Goto 
1234a1e565aaSTang Chen 	if (new_node) {
12359037a993SMichal Hocko 		unsigned long start_pfn = start >> PAGE_SHIFT;
12369037a993SMichal Hocko 		unsigned long nr_pages = size >> PAGE_SHIFT;
12379037a993SMichal Hocko 
12389037a993SMichal Hocko 		ret = __register_one_node(nid);
12399037a993SMichal Hocko 		if (ret)
12409037a993SMichal Hocko 			goto register_fail;
12419037a993SMichal Hocko 
12429037a993SMichal Hocko 		/*
12439037a993SMichal Hocko 		 * link memory sections under this node. This is already
12449037a993SMichal Hocko 		 * done when creatig memory section in register_new_memory
12459037a993SMichal Hocko 		 * but that depends to have the node registered so offline
12469037a993SMichal Hocko 		 * nodes have to go through register_node.
12479037a993SMichal Hocko 		 * TODO clean up this mess.
12489037a993SMichal Hocko 		 */
12499037a993SMichal Hocko 		ret = link_mem_sections(nid, start_pfn, nr_pages);
12509037a993SMichal Hocko register_fail:
12510fc44159SYasunori Goto 		/*
12520fc44159SYasunori Goto 		 * If sysfs file of new node can't create, cpu on the node
12530fc44159SYasunori Goto 		 * can't be hot-added. There is no rollback way now.
12540fc44159SYasunori Goto 		 * So, check by BUG_ON() to catch it reluctantly..
12550fc44159SYasunori Goto 		 */
12560fc44159SYasunori Goto 		BUG_ON(ret);
12570fc44159SYasunori Goto 	}
12580fc44159SYasunori Goto 
1259d96ae530Sakpm@linux-foundation.org 	/* create new memmap entry */
1260d96ae530Sakpm@linux-foundation.org 	firmware_map_add_hotplug(start, start + size, "System RAM");
1261d96ae530Sakpm@linux-foundation.org 
126231bc3858SVitaly Kuznetsov 	/* online pages if requested */
126331bc3858SVitaly Kuznetsov 	if (online)
126431bc3858SVitaly Kuznetsov 		walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
126531bc3858SVitaly Kuznetsov 				  NULL, online_memory_block);
126631bc3858SVitaly Kuznetsov 
12676ad696d2SAndi Kleen 	goto out;
12686ad696d2SAndi Kleen 
12699af3c2deSYasunori Goto error:
12709af3c2deSYasunori Goto 	/* rollback pgdat allocation and others */
12719af3c2deSYasunori Goto 	if (new_pgdat)
12729af3c2deSYasunori Goto 		rollback_node_hotadd(nid, pgdat);
12737f36e3e5STang Chen 	memblock_remove(start, size);
12749af3c2deSYasunori Goto 
12756ad696d2SAndi Kleen out:
1276bfc8c901SVladimir Davydov 	mem_hotplug_done();
1277bc02af93SYasunori Goto 	return ret;
1278bc02af93SYasunori Goto }
127962cedb9fSDavid Vrabel EXPORT_SYMBOL_GPL(add_memory_resource);
128062cedb9fSDavid Vrabel 
128162cedb9fSDavid Vrabel int __ref add_memory(int nid, u64 start, u64 size)
128262cedb9fSDavid Vrabel {
128362cedb9fSDavid Vrabel 	struct resource *res;
128462cedb9fSDavid Vrabel 	int ret;
128562cedb9fSDavid Vrabel 
128662cedb9fSDavid Vrabel 	res = register_memory_resource(start, size);
12876f754ba4SVitaly Kuznetsov 	if (IS_ERR(res))
12886f754ba4SVitaly Kuznetsov 		return PTR_ERR(res);
128962cedb9fSDavid Vrabel 
129031bc3858SVitaly Kuznetsov 	ret = add_memory_resource(nid, res, memhp_auto_online);
129162cedb9fSDavid Vrabel 	if (ret < 0)
129262cedb9fSDavid Vrabel 		release_memory_resource(res);
129362cedb9fSDavid Vrabel 	return ret;
129462cedb9fSDavid Vrabel }
1295bc02af93SYasunori Goto EXPORT_SYMBOL_GPL(add_memory);
12960c0e6195SKAMEZAWA Hiroyuki 
12970c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_MEMORY_HOTREMOVE
12980c0e6195SKAMEZAWA Hiroyuki /*
12995c755e9fSBadari Pulavarty  * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
13005c755e9fSBadari Pulavarty  * set and the size of the free page is given by page_order(). Using this,
13015c755e9fSBadari Pulavarty  * the function determines if the pageblock contains only free pages.
13025c755e9fSBadari Pulavarty  * Due to buddy contraints, a free page at least the size of a pageblock will
13035c755e9fSBadari Pulavarty  * be located at the start of the pageblock
13045c755e9fSBadari Pulavarty  */
13055c755e9fSBadari Pulavarty static inline int pageblock_free(struct page *page)
13065c755e9fSBadari Pulavarty {
13075c755e9fSBadari Pulavarty 	return PageBuddy(page) && page_order(page) >= pageblock_order;
13085c755e9fSBadari Pulavarty }
13095c755e9fSBadari Pulavarty 
13105c755e9fSBadari Pulavarty /* Return the start of the next active pageblock after a given page */
13115c755e9fSBadari Pulavarty static struct page *next_active_pageblock(struct page *page)
13125c755e9fSBadari Pulavarty {
13135c755e9fSBadari Pulavarty 	/* Ensure the starting page is pageblock-aligned */
13145c755e9fSBadari Pulavarty 	BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
13155c755e9fSBadari Pulavarty 
13165c755e9fSBadari Pulavarty 	/* If the entire pageblock is free, move to the end of free page */
13170dcc48c1SKAMEZAWA Hiroyuki 	if (pageblock_free(page)) {
13180dcc48c1SKAMEZAWA Hiroyuki 		int order;
13190dcc48c1SKAMEZAWA Hiroyuki 		/* be careful. we don't have locks, page_order can be changed.*/
13200dcc48c1SKAMEZAWA Hiroyuki 		order = page_order(page);
13210dcc48c1SKAMEZAWA Hiroyuki 		if ((order < MAX_ORDER) && (order >= pageblock_order))
13220dcc48c1SKAMEZAWA Hiroyuki 			return page + (1 << order);
13230dcc48c1SKAMEZAWA Hiroyuki 	}
13245c755e9fSBadari Pulavarty 
13250dcc48c1SKAMEZAWA Hiroyuki 	return page + pageblock_nr_pages;
13265c755e9fSBadari Pulavarty }
13275c755e9fSBadari Pulavarty 
13285c755e9fSBadari Pulavarty /* Checks if this range of memory is likely to be hot-removable. */
1329c98940f6SYaowei Bai bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
13305c755e9fSBadari Pulavarty {
13315c755e9fSBadari Pulavarty 	struct page *page = pfn_to_page(start_pfn);
13325c755e9fSBadari Pulavarty 	struct page *end_page = page + nr_pages;
13335c755e9fSBadari Pulavarty 
13345c755e9fSBadari Pulavarty 	/* Check the starting page of each pageblock within the range */
13355c755e9fSBadari Pulavarty 	for (; page < end_page; page = next_active_pageblock(page)) {
133649ac8255SKAMEZAWA Hiroyuki 		if (!is_pageblock_removable_nolock(page))
1337c98940f6SYaowei Bai 			return false;
133849ac8255SKAMEZAWA Hiroyuki 		cond_resched();
13395c755e9fSBadari Pulavarty 	}
13405c755e9fSBadari Pulavarty 
13415c755e9fSBadari Pulavarty 	/* All pageblocks in the memory block are likely to be hot-removable */
1342c98940f6SYaowei Bai 	return true;
13435c755e9fSBadari Pulavarty }
13445c755e9fSBadari Pulavarty 
13455c755e9fSBadari Pulavarty /*
1346deb88a2aSToshi Kani  * Confirm all pages in a range [start, end) belong to the same zone.
1347a96dfddbSToshi Kani  * When true, return its valid [start, end).
13480c0e6195SKAMEZAWA Hiroyuki  */
1349a96dfddbSToshi Kani int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1350a96dfddbSToshi Kani 			 unsigned long *valid_start, unsigned long *valid_end)
13510c0e6195SKAMEZAWA Hiroyuki {
13525f0f2887SAndrew Banman 	unsigned long pfn, sec_end_pfn;
1353a96dfddbSToshi Kani 	unsigned long start, end;
13540c0e6195SKAMEZAWA Hiroyuki 	struct zone *zone = NULL;
13550c0e6195SKAMEZAWA Hiroyuki 	struct page *page;
13560c0e6195SKAMEZAWA Hiroyuki 	int i;
1357deb88a2aSToshi Kani 	for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
13580c0e6195SKAMEZAWA Hiroyuki 	     pfn < end_pfn;
1359deb88a2aSToshi Kani 	     pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
13605f0f2887SAndrew Banman 		/* Make sure the memory section is present first */
13615f0f2887SAndrew Banman 		if (!present_section_nr(pfn_to_section_nr(pfn)))
13625f0f2887SAndrew Banman 			continue;
13635f0f2887SAndrew Banman 		for (; pfn < sec_end_pfn && pfn < end_pfn;
13640c0e6195SKAMEZAWA Hiroyuki 		     pfn += MAX_ORDER_NR_PAGES) {
13650c0e6195SKAMEZAWA Hiroyuki 			i = 0;
13660c0e6195SKAMEZAWA Hiroyuki 			/* This is just a CONFIG_HOLES_IN_ZONE check.*/
13675f0f2887SAndrew Banman 			while ((i < MAX_ORDER_NR_PAGES) &&
13685f0f2887SAndrew Banman 				!pfn_valid_within(pfn + i))
13690c0e6195SKAMEZAWA Hiroyuki 				i++;
1370d6d8c8a4Szhong jiang 			if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
13710c0e6195SKAMEZAWA Hiroyuki 				continue;
13720c0e6195SKAMEZAWA Hiroyuki 			page = pfn_to_page(pfn + i);
13730c0e6195SKAMEZAWA Hiroyuki 			if (zone && page_zone(page) != zone)
13740c0e6195SKAMEZAWA Hiroyuki 				return 0;
1375a96dfddbSToshi Kani 			if (!zone)
1376a96dfddbSToshi Kani 				start = pfn + i;
13770c0e6195SKAMEZAWA Hiroyuki 			zone = page_zone(page);
1378a96dfddbSToshi Kani 			end = pfn + MAX_ORDER_NR_PAGES;
13790c0e6195SKAMEZAWA Hiroyuki 		}
13805f0f2887SAndrew Banman 	}
1381deb88a2aSToshi Kani 
1382a96dfddbSToshi Kani 	if (zone) {
1383a96dfddbSToshi Kani 		*valid_start = start;
1384d6d8c8a4Szhong jiang 		*valid_end = min(end, end_pfn);
13850c0e6195SKAMEZAWA Hiroyuki 		return 1;
1386a96dfddbSToshi Kani 	} else {
1387deb88a2aSToshi Kani 		return 0;
13880c0e6195SKAMEZAWA Hiroyuki 	}
1389a96dfddbSToshi Kani }
13900c0e6195SKAMEZAWA Hiroyuki 
13910c0e6195SKAMEZAWA Hiroyuki /*
13920efadf48SYisheng Xie  * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
13930efadf48SYisheng Xie  * non-lru movable pages and hugepages). We scan pfn because it's much
13940efadf48SYisheng Xie  * easier than scanning over linked list. This function returns the pfn
13950efadf48SYisheng Xie  * of the first found movable page if it's found, otherwise 0.
13960c0e6195SKAMEZAWA Hiroyuki  */
1397c8721bbbSNaoya Horiguchi static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
13980c0e6195SKAMEZAWA Hiroyuki {
13990c0e6195SKAMEZAWA Hiroyuki 	unsigned long pfn;
14000c0e6195SKAMEZAWA Hiroyuki 	struct page *page;
14010c0e6195SKAMEZAWA Hiroyuki 	for (pfn = start; pfn < end; pfn++) {
14020c0e6195SKAMEZAWA Hiroyuki 		if (pfn_valid(pfn)) {
14030c0e6195SKAMEZAWA Hiroyuki 			page = pfn_to_page(pfn);
14040c0e6195SKAMEZAWA Hiroyuki 			if (PageLRU(page))
14050c0e6195SKAMEZAWA Hiroyuki 				return pfn;
14060efadf48SYisheng Xie 			if (__PageMovable(page))
14070efadf48SYisheng Xie 				return pfn;
1408c8721bbbSNaoya Horiguchi 			if (PageHuge(page)) {
14097e1f049eSNaoya Horiguchi 				if (page_huge_active(page))
1410c8721bbbSNaoya Horiguchi 					return pfn;
1411c8721bbbSNaoya Horiguchi 				else
1412c8721bbbSNaoya Horiguchi 					pfn = round_up(pfn + 1,
1413c8721bbbSNaoya Horiguchi 						1 << compound_order(page)) - 1;
1414c8721bbbSNaoya Horiguchi 			}
14150c0e6195SKAMEZAWA Hiroyuki 		}
14160c0e6195SKAMEZAWA Hiroyuki 	}
14170c0e6195SKAMEZAWA Hiroyuki 	return 0;
14180c0e6195SKAMEZAWA Hiroyuki }
14190c0e6195SKAMEZAWA Hiroyuki 
1420394e31d2SXishi Qiu static struct page *new_node_page(struct page *page, unsigned long private,
1421394e31d2SXishi Qiu 		int **result)
1422394e31d2SXishi Qiu {
1423394e31d2SXishi Qiu 	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
1424394e31d2SXishi Qiu 	int nid = page_to_nid(page);
1425231e97e2SLi Zhong 	nodemask_t nmask = node_states[N_MEMORY];
1426231e97e2SLi Zhong 	struct page *new_page = NULL;
1427394e31d2SXishi Qiu 
1428394e31d2SXishi Qiu 	/*
1429394e31d2SXishi Qiu 	 * TODO: allocate a destination hugepage from a nearest neighbor node,
1430394e31d2SXishi Qiu 	 * accordance with memory policy of the user process if possible. For
1431394e31d2SXishi Qiu 	 * now as a simple work-around, we use the next node for destination.
1432394e31d2SXishi Qiu 	 */
1433394e31d2SXishi Qiu 	if (PageHuge(page))
1434394e31d2SXishi Qiu 		return alloc_huge_page_node(page_hstate(compound_head(page)),
1435394e31d2SXishi Qiu 					next_node_in(nid, nmask));
1436394e31d2SXishi Qiu 
1437394e31d2SXishi Qiu 	node_clear(nid, nmask);
14389bb627beSLi Zhong 
1439394e31d2SXishi Qiu 	if (PageHighMem(page)
1440394e31d2SXishi Qiu 	    || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
1441394e31d2SXishi Qiu 		gfp_mask |= __GFP_HIGHMEM;
1442394e31d2SXishi Qiu 
1443231e97e2SLi Zhong 	if (!nodes_empty(nmask))
144404ec6264SVlastimil Babka 		new_page = __alloc_pages_nodemask(gfp_mask, 0, nid, &nmask);
1445394e31d2SXishi Qiu 	if (!new_page)
144604ec6264SVlastimil Babka 		new_page = __alloc_pages(gfp_mask, 0, nid);
1447394e31d2SXishi Qiu 
1448394e31d2SXishi Qiu 	return new_page;
1449394e31d2SXishi Qiu }
1450394e31d2SXishi Qiu 
14510c0e6195SKAMEZAWA Hiroyuki #define NR_OFFLINE_AT_ONCE_PAGES	(256)
14520c0e6195SKAMEZAWA Hiroyuki static int
14530c0e6195SKAMEZAWA Hiroyuki do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
14540c0e6195SKAMEZAWA Hiroyuki {
14550c0e6195SKAMEZAWA Hiroyuki 	unsigned long pfn;
14560c0e6195SKAMEZAWA Hiroyuki 	struct page *page;
14570c0e6195SKAMEZAWA Hiroyuki 	int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
14580c0e6195SKAMEZAWA Hiroyuki 	int not_managed = 0;
14590c0e6195SKAMEZAWA Hiroyuki 	int ret = 0;
14600c0e6195SKAMEZAWA Hiroyuki 	LIST_HEAD(source);
14610c0e6195SKAMEZAWA Hiroyuki 
14620c0e6195SKAMEZAWA Hiroyuki 	for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
14630c0e6195SKAMEZAWA Hiroyuki 		if (!pfn_valid(pfn))
14640c0e6195SKAMEZAWA Hiroyuki 			continue;
14650c0e6195SKAMEZAWA Hiroyuki 		page = pfn_to_page(pfn);
1466c8721bbbSNaoya Horiguchi 
1467c8721bbbSNaoya Horiguchi 		if (PageHuge(page)) {
1468c8721bbbSNaoya Horiguchi 			struct page *head = compound_head(page);
1469c8721bbbSNaoya Horiguchi 			pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
1470c8721bbbSNaoya Horiguchi 			if (compound_order(head) > PFN_SECTION_SHIFT) {
1471c8721bbbSNaoya Horiguchi 				ret = -EBUSY;
1472c8721bbbSNaoya Horiguchi 				break;
1473c8721bbbSNaoya Horiguchi 			}
1474c8721bbbSNaoya Horiguchi 			if (isolate_huge_page(page, &source))
1475c8721bbbSNaoya Horiguchi 				move_pages -= 1 << compound_order(head);
1476c8721bbbSNaoya Horiguchi 			continue;
1477c8721bbbSNaoya Horiguchi 		}
1478c8721bbbSNaoya Horiguchi 
1479700c2a46SKonstantin Khlebnikov 		if (!get_page_unless_zero(page))
14800c0e6195SKAMEZAWA Hiroyuki 			continue;
14810c0e6195SKAMEZAWA Hiroyuki 		/*
14820efadf48SYisheng Xie 		 * We can skip free pages. And we can deal with pages on
14830efadf48SYisheng Xie 		 * LRU and non-lru movable pages.
14840c0e6195SKAMEZAWA Hiroyuki 		 */
14850efadf48SYisheng Xie 		if (PageLRU(page))
148662695a84SNick Piggin 			ret = isolate_lru_page(page);
14870efadf48SYisheng Xie 		else
14880efadf48SYisheng Xie 			ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
14890c0e6195SKAMEZAWA Hiroyuki 		if (!ret) { /* Success */
1490700c2a46SKonstantin Khlebnikov 			put_page(page);
149162695a84SNick Piggin 			list_add_tail(&page->lru, &source);
14920c0e6195SKAMEZAWA Hiroyuki 			move_pages--;
14930efadf48SYisheng Xie 			if (!__PageMovable(page))
1494599d0c95SMel Gorman 				inc_node_page_state(page, NR_ISOLATED_ANON +
14956d9c285aSKOSAKI Motohiro 						    page_is_file_cache(page));
14966d9c285aSKOSAKI Motohiro 
14970c0e6195SKAMEZAWA Hiroyuki 		} else {
14980c0e6195SKAMEZAWA Hiroyuki #ifdef CONFIG_DEBUG_VM
14990efadf48SYisheng Xie 			pr_alert("failed to isolate pfn %lx\n", pfn);
15000efadf48SYisheng Xie 			dump_page(page, "isolation failed");
15010c0e6195SKAMEZAWA Hiroyuki #endif
1502700c2a46SKonstantin Khlebnikov 			put_page(page);
150325985edcSLucas De Marchi 			/* Because we don't have big zone->lock. we should
1504809c4449SBob Liu 			   check this again here. */
1505809c4449SBob Liu 			if (page_count(page)) {
1506809c4449SBob Liu 				not_managed++;
1507f3ab2636SBob Liu 				ret = -EBUSY;
1508809c4449SBob Liu 				break;
1509809c4449SBob Liu 			}
15100c0e6195SKAMEZAWA Hiroyuki 		}
15110c0e6195SKAMEZAWA Hiroyuki 	}
1512f3ab2636SBob Liu 	if (!list_empty(&source)) {
15130c0e6195SKAMEZAWA Hiroyuki 		if (not_managed) {
1514c8721bbbSNaoya Horiguchi 			putback_movable_pages(&source);
15150c0e6195SKAMEZAWA Hiroyuki 			goto out;
15160c0e6195SKAMEZAWA Hiroyuki 		}
151774c08f98SMinchan Kim 
1518394e31d2SXishi Qiu 		/* Allocate a new page from the nearest neighbor node */
1519394e31d2SXishi Qiu 		ret = migrate_pages(&source, new_node_page, NULL, 0,
15209c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
1521cf608ac1SMinchan Kim 		if (ret)
1522c8721bbbSNaoya Horiguchi 			putback_movable_pages(&source);
1523f3ab2636SBob Liu 	}
15240c0e6195SKAMEZAWA Hiroyuki out:
15250c0e6195SKAMEZAWA Hiroyuki 	return ret;
15260c0e6195SKAMEZAWA Hiroyuki }
15270c0e6195SKAMEZAWA Hiroyuki 
15280c0e6195SKAMEZAWA Hiroyuki /*
15290c0e6195SKAMEZAWA Hiroyuki  * remove from free_area[] and mark all as Reserved.
15300c0e6195SKAMEZAWA Hiroyuki  */
15310c0e6195SKAMEZAWA Hiroyuki static int
15320c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
15330c0e6195SKAMEZAWA Hiroyuki 			void *data)
15340c0e6195SKAMEZAWA Hiroyuki {
15350c0e6195SKAMEZAWA Hiroyuki 	__offline_isolated_pages(start, start + nr_pages);
15360c0e6195SKAMEZAWA Hiroyuki 	return 0;
15370c0e6195SKAMEZAWA Hiroyuki }
15380c0e6195SKAMEZAWA Hiroyuki 
15390c0e6195SKAMEZAWA Hiroyuki static void
15400c0e6195SKAMEZAWA Hiroyuki offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
15410c0e6195SKAMEZAWA Hiroyuki {
1542908eedc6SKAMEZAWA Hiroyuki 	walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
15430c0e6195SKAMEZAWA Hiroyuki 				offline_isolated_pages_cb);
15440c0e6195SKAMEZAWA Hiroyuki }
15450c0e6195SKAMEZAWA Hiroyuki 
15460c0e6195SKAMEZAWA Hiroyuki /*
15470c0e6195SKAMEZAWA Hiroyuki  * Check all pages in range, recoreded as memory resource, are isolated.
15480c0e6195SKAMEZAWA Hiroyuki  */
15490c0e6195SKAMEZAWA Hiroyuki static int
15500c0e6195SKAMEZAWA Hiroyuki check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
15510c0e6195SKAMEZAWA Hiroyuki 			void *data)
15520c0e6195SKAMEZAWA Hiroyuki {
15530c0e6195SKAMEZAWA Hiroyuki 	int ret;
15540c0e6195SKAMEZAWA Hiroyuki 	long offlined = *(long *)data;
1555b023f468SWen Congyang 	ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
15560c0e6195SKAMEZAWA Hiroyuki 	offlined = nr_pages;
15570c0e6195SKAMEZAWA Hiroyuki 	if (!ret)
15580c0e6195SKAMEZAWA Hiroyuki 		*(long *)data += offlined;
15590c0e6195SKAMEZAWA Hiroyuki 	return ret;
15600c0e6195SKAMEZAWA Hiroyuki }
15610c0e6195SKAMEZAWA Hiroyuki 
15620c0e6195SKAMEZAWA Hiroyuki static long
15630c0e6195SKAMEZAWA Hiroyuki check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
15640c0e6195SKAMEZAWA Hiroyuki {
15650c0e6195SKAMEZAWA Hiroyuki 	long offlined = 0;
15660c0e6195SKAMEZAWA Hiroyuki 	int ret;
15670c0e6195SKAMEZAWA Hiroyuki 
1568908eedc6SKAMEZAWA Hiroyuki 	ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
15690c0e6195SKAMEZAWA Hiroyuki 			check_pages_isolated_cb);
15700c0e6195SKAMEZAWA Hiroyuki 	if (ret < 0)
15710c0e6195SKAMEZAWA Hiroyuki 		offlined = (long)ret;
15720c0e6195SKAMEZAWA Hiroyuki 	return offlined;
15730c0e6195SKAMEZAWA Hiroyuki }
15740c0e6195SKAMEZAWA Hiroyuki 
1575c5320926STang Chen static int __init cmdline_parse_movable_node(char *p)
1576c5320926STang Chen {
1577*4932381eSMichal Hocko #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
157855ac590cSTang Chen 	movable_node_enabled = true;
1579*4932381eSMichal Hocko #else
1580*4932381eSMichal Hocko 	pr_warn("movable_node parameter depends on CONFIG_HAVE_MEMBLOCK_NODE_MAP to work properly\n");
1581*4932381eSMichal Hocko #endif
1582c5320926STang Chen 	return 0;
1583c5320926STang Chen }
1584c5320926STang Chen early_param("movable_node", cmdline_parse_movable_node);
1585c5320926STang Chen 
1586d9713679SLai Jiangshan /* check which state of node_states will be changed when offline memory */
1587d9713679SLai Jiangshan static void node_states_check_changes_offline(unsigned long nr_pages,
1588d9713679SLai Jiangshan 		struct zone *zone, struct memory_notify *arg)
1589d9713679SLai Jiangshan {
1590d9713679SLai Jiangshan 	struct pglist_data *pgdat = zone->zone_pgdat;
1591d9713679SLai Jiangshan 	unsigned long present_pages = 0;
1592d9713679SLai Jiangshan 	enum zone_type zt, zone_last = ZONE_NORMAL;
1593d9713679SLai Jiangshan 
1594d9713679SLai Jiangshan 	/*
15956715ddf9SLai Jiangshan 	 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
15966715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_NORMAL,
15976715ddf9SLai Jiangshan 	 * set zone_last to ZONE_NORMAL.
1598d9713679SLai Jiangshan 	 *
15996715ddf9SLai Jiangshan 	 * If we don't have HIGHMEM nor movable node,
16006715ddf9SLai Jiangshan 	 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
16016715ddf9SLai Jiangshan 	 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
1602d9713679SLai Jiangshan 	 */
16036715ddf9SLai Jiangshan 	if (N_MEMORY == N_NORMAL_MEMORY)
1604d9713679SLai Jiangshan 		zone_last = ZONE_MOVABLE;
1605d9713679SLai Jiangshan 
1606d9713679SLai Jiangshan 	/*
1607d9713679SLai Jiangshan 	 * check whether node_states[N_NORMAL_MEMORY] will be changed.
1608d9713679SLai Jiangshan 	 * If the memory to be offline is in a zone of 0...zone_last,
1609d9713679SLai Jiangshan 	 * and it is the last present memory, 0...zone_last will
1610d9713679SLai Jiangshan 	 * become empty after offline , thus we can determind we will
1611d9713679SLai Jiangshan 	 * need to clear the node from node_states[N_NORMAL_MEMORY].
1612d9713679SLai Jiangshan 	 */
1613d9713679SLai Jiangshan 	for (zt = 0; zt <= zone_last; zt++)
1614d9713679SLai Jiangshan 		present_pages += pgdat->node_zones[zt].present_pages;
1615d9713679SLai Jiangshan 	if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1616d9713679SLai Jiangshan 		arg->status_change_nid_normal = zone_to_nid(zone);
1617d9713679SLai Jiangshan 	else
1618d9713679SLai Jiangshan 		arg->status_change_nid_normal = -1;
1619d9713679SLai Jiangshan 
16206715ddf9SLai Jiangshan #ifdef CONFIG_HIGHMEM
16216715ddf9SLai Jiangshan 	/*
16226715ddf9SLai Jiangshan 	 * If we have movable node, node_states[N_HIGH_MEMORY]
16236715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_HIGHMEM,
16246715ddf9SLai Jiangshan 	 * set zone_last to ZONE_HIGHMEM.
16256715ddf9SLai Jiangshan 	 *
16266715ddf9SLai Jiangshan 	 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
16276715ddf9SLai Jiangshan 	 * contains nodes which have zones of 0...ZONE_MOVABLE,
16286715ddf9SLai Jiangshan 	 * set zone_last to ZONE_MOVABLE.
16296715ddf9SLai Jiangshan 	 */
16306715ddf9SLai Jiangshan 	zone_last = ZONE_HIGHMEM;
16316715ddf9SLai Jiangshan 	if (N_MEMORY == N_HIGH_MEMORY)
16326715ddf9SLai Jiangshan 		zone_last = ZONE_MOVABLE;
16336715ddf9SLai Jiangshan 
16346715ddf9SLai Jiangshan 	for (; zt <= zone_last; zt++)
16356715ddf9SLai Jiangshan 		present_pages += pgdat->node_zones[zt].present_pages;
16366715ddf9SLai Jiangshan 	if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
16376715ddf9SLai Jiangshan 		arg->status_change_nid_high = zone_to_nid(zone);
16386715ddf9SLai Jiangshan 	else
16396715ddf9SLai Jiangshan 		arg->status_change_nid_high = -1;
16406715ddf9SLai Jiangshan #else
16416715ddf9SLai Jiangshan 	arg->status_change_nid_high = arg->status_change_nid_normal;
16426715ddf9SLai Jiangshan #endif
16436715ddf9SLai Jiangshan 
1644d9713679SLai Jiangshan 	/*
1645d9713679SLai Jiangshan 	 * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
1646d9713679SLai Jiangshan 	 */
1647d9713679SLai Jiangshan 	zone_last = ZONE_MOVABLE;
1648d9713679SLai Jiangshan 
1649d9713679SLai Jiangshan 	/*
1650d9713679SLai Jiangshan 	 * check whether node_states[N_HIGH_MEMORY] will be changed
1651d9713679SLai Jiangshan 	 * If we try to offline the last present @nr_pages from the node,
1652d9713679SLai Jiangshan 	 * we can determind we will need to clear the node from
1653d9713679SLai Jiangshan 	 * node_states[N_HIGH_MEMORY].
1654d9713679SLai Jiangshan 	 */
1655d9713679SLai Jiangshan 	for (; zt <= zone_last; zt++)
1656d9713679SLai Jiangshan 		present_pages += pgdat->node_zones[zt].present_pages;
1657d9713679SLai Jiangshan 	if (nr_pages >= present_pages)
1658d9713679SLai Jiangshan 		arg->status_change_nid = zone_to_nid(zone);
1659d9713679SLai Jiangshan 	else
1660d9713679SLai Jiangshan 		arg->status_change_nid = -1;
1661d9713679SLai Jiangshan }
1662d9713679SLai Jiangshan 
1663d9713679SLai Jiangshan static void node_states_clear_node(int node, struct memory_notify *arg)
1664d9713679SLai Jiangshan {
1665d9713679SLai Jiangshan 	if (arg->status_change_nid_normal >= 0)
1666d9713679SLai Jiangshan 		node_clear_state(node, N_NORMAL_MEMORY);
1667d9713679SLai Jiangshan 
16686715ddf9SLai Jiangshan 	if ((N_MEMORY != N_NORMAL_MEMORY) &&
16696715ddf9SLai Jiangshan 	    (arg->status_change_nid_high >= 0))
1670d9713679SLai Jiangshan 		node_clear_state(node, N_HIGH_MEMORY);
16716715ddf9SLai Jiangshan 
16726715ddf9SLai Jiangshan 	if ((N_MEMORY != N_HIGH_MEMORY) &&
16736715ddf9SLai Jiangshan 	    (arg->status_change_nid >= 0))
16746715ddf9SLai Jiangshan 		node_clear_state(node, N_MEMORY);
1675d9713679SLai Jiangshan }
1676d9713679SLai Jiangshan 
1677a16cee10SWen Congyang static int __ref __offline_pages(unsigned long start_pfn,
16780c0e6195SKAMEZAWA Hiroyuki 		  unsigned long end_pfn, unsigned long timeout)
16790c0e6195SKAMEZAWA Hiroyuki {
16800c0e6195SKAMEZAWA Hiroyuki 	unsigned long pfn, nr_pages, expire;
16810c0e6195SKAMEZAWA Hiroyuki 	long offlined_pages;
16827b78d335SYasunori Goto 	int ret, drain, retry_max, node;
1683d702909fSCody P Schafer 	unsigned long flags;
1684a96dfddbSToshi Kani 	unsigned long valid_start, valid_end;
16850c0e6195SKAMEZAWA Hiroyuki 	struct zone *zone;
16867b78d335SYasunori Goto 	struct memory_notify arg;
16870c0e6195SKAMEZAWA Hiroyuki 
16880c0e6195SKAMEZAWA Hiroyuki 	/* at least, alignment against pageblock is necessary */
16890c0e6195SKAMEZAWA Hiroyuki 	if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
16900c0e6195SKAMEZAWA Hiroyuki 		return -EINVAL;
16910c0e6195SKAMEZAWA Hiroyuki 	if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
16920c0e6195SKAMEZAWA Hiroyuki 		return -EINVAL;
16930c0e6195SKAMEZAWA Hiroyuki 	/* This makes hotplug much easier...and readable.
16940c0e6195SKAMEZAWA Hiroyuki 	   we assume this for now. .*/
1695a96dfddbSToshi Kani 	if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
16960c0e6195SKAMEZAWA Hiroyuki 		return -EINVAL;
16977b78d335SYasunori Goto 
1698a96dfddbSToshi Kani 	zone = page_zone(pfn_to_page(valid_start));
16997b78d335SYasunori Goto 	node = zone_to_nid(zone);
17007b78d335SYasunori Goto 	nr_pages = end_pfn - start_pfn;
17017b78d335SYasunori Goto 
17020c0e6195SKAMEZAWA Hiroyuki 	/* set above range as isolated */
1703b023f468SWen Congyang 	ret = start_isolate_page_range(start_pfn, end_pfn,
1704b023f468SWen Congyang 				       MIGRATE_MOVABLE, true);
17050c0e6195SKAMEZAWA Hiroyuki 	if (ret)
170630467e0bSDavid Rientjes 		return ret;
17077b78d335SYasunori Goto 
17087b78d335SYasunori Goto 	arg.start_pfn = start_pfn;
17097b78d335SYasunori Goto 	arg.nr_pages = nr_pages;
1710d9713679SLai Jiangshan 	node_states_check_changes_offline(nr_pages, zone, &arg);
17117b78d335SYasunori Goto 
17127b78d335SYasunori Goto 	ret = memory_notify(MEM_GOING_OFFLINE, &arg);
17137b78d335SYasunori Goto 	ret = notifier_to_errno(ret);
17147b78d335SYasunori Goto 	if (ret)
17157b78d335SYasunori Goto 		goto failed_removal;
17167b78d335SYasunori Goto 
17170c0e6195SKAMEZAWA Hiroyuki 	pfn = start_pfn;
17180c0e6195SKAMEZAWA Hiroyuki 	expire = jiffies + timeout;
17190c0e6195SKAMEZAWA Hiroyuki 	drain = 0;
17200c0e6195SKAMEZAWA Hiroyuki 	retry_max = 5;
17210c0e6195SKAMEZAWA Hiroyuki repeat:
17220c0e6195SKAMEZAWA Hiroyuki 	/* start memory hot removal */
17230c0e6195SKAMEZAWA Hiroyuki 	ret = -EAGAIN;
17240c0e6195SKAMEZAWA Hiroyuki 	if (time_after(jiffies, expire))
17250c0e6195SKAMEZAWA Hiroyuki 		goto failed_removal;
17260c0e6195SKAMEZAWA Hiroyuki 	ret = -EINTR;
17270c0e6195SKAMEZAWA Hiroyuki 	if (signal_pending(current))
17280c0e6195SKAMEZAWA Hiroyuki 		goto failed_removal;
17290c0e6195SKAMEZAWA Hiroyuki 	ret = 0;
17300c0e6195SKAMEZAWA Hiroyuki 	if (drain) {
17310c0e6195SKAMEZAWA Hiroyuki 		lru_add_drain_all();
17320c0e6195SKAMEZAWA Hiroyuki 		cond_resched();
1733c0554329SVlastimil Babka 		drain_all_pages(zone);
17340c0e6195SKAMEZAWA Hiroyuki 	}
17350c0e6195SKAMEZAWA Hiroyuki 
1736c8721bbbSNaoya Horiguchi 	pfn = scan_movable_pages(start_pfn, end_pfn);
1737c8721bbbSNaoya Horiguchi 	if (pfn) { /* We have movable pages */
17380c0e6195SKAMEZAWA Hiroyuki 		ret = do_migrate_range(pfn, end_pfn);
17390c0e6195SKAMEZAWA Hiroyuki 		if (!ret) {
17400c0e6195SKAMEZAWA Hiroyuki 			drain = 1;
17410c0e6195SKAMEZAWA Hiroyuki 			goto repeat;
17420c0e6195SKAMEZAWA Hiroyuki 		} else {
17430c0e6195SKAMEZAWA Hiroyuki 			if (ret < 0)
17440c0e6195SKAMEZAWA Hiroyuki 				if (--retry_max == 0)
17450c0e6195SKAMEZAWA Hiroyuki 					goto failed_removal;
17460c0e6195SKAMEZAWA Hiroyuki 			yield();
17470c0e6195SKAMEZAWA Hiroyuki 			drain = 1;
17480c0e6195SKAMEZAWA Hiroyuki 			goto repeat;
17490c0e6195SKAMEZAWA Hiroyuki 		}
17500c0e6195SKAMEZAWA Hiroyuki 	}
1751b3834be5SAdam Buchbinder 	/* drain all zone's lru pagevec, this is asynchronous... */
17520c0e6195SKAMEZAWA Hiroyuki 	lru_add_drain_all();
17530c0e6195SKAMEZAWA Hiroyuki 	yield();
1754b3834be5SAdam Buchbinder 	/* drain pcp pages, this is synchronous. */
1755c0554329SVlastimil Babka 	drain_all_pages(zone);
1756c8721bbbSNaoya Horiguchi 	/*
1757c8721bbbSNaoya Horiguchi 	 * dissolve free hugepages in the memory block before doing offlining
1758c8721bbbSNaoya Horiguchi 	 * actually in order to make hugetlbfs's object counting consistent.
1759c8721bbbSNaoya Horiguchi 	 */
1760082d5b6bSGerald Schaefer 	ret = dissolve_free_huge_pages(start_pfn, end_pfn);
1761082d5b6bSGerald Schaefer 	if (ret)
1762082d5b6bSGerald Schaefer 		goto failed_removal;
17630c0e6195SKAMEZAWA Hiroyuki 	/* check again */
17640c0e6195SKAMEZAWA Hiroyuki 	offlined_pages = check_pages_isolated(start_pfn, end_pfn);
17650c0e6195SKAMEZAWA Hiroyuki 	if (offlined_pages < 0) {
17660c0e6195SKAMEZAWA Hiroyuki 		ret = -EBUSY;
17670c0e6195SKAMEZAWA Hiroyuki 		goto failed_removal;
17680c0e6195SKAMEZAWA Hiroyuki 	}
1769e33e33b4SChen Yucong 	pr_info("Offlined Pages %ld\n", offlined_pages);
1770b3834be5SAdam Buchbinder 	/* Ok, all of our target is isolated.
17710c0e6195SKAMEZAWA Hiroyuki 	   We cannot do rollback at this point. */
17720c0e6195SKAMEZAWA Hiroyuki 	offline_isolated_pages(start_pfn, end_pfn);
1773dbc0e4ceSKAMEZAWA Hiroyuki 	/* reset pagetype flags and makes migrate type to be MOVABLE */
17740815f3d8SMichal Nazarewicz 	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
17750c0e6195SKAMEZAWA Hiroyuki 	/* removal success */
17763dcc0571SJiang Liu 	adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
17770c0e6195SKAMEZAWA Hiroyuki 	zone->present_pages -= offlined_pages;
1778d702909fSCody P Schafer 
1779d702909fSCody P Schafer 	pgdat_resize_lock(zone->zone_pgdat, &flags);
17800c0e6195SKAMEZAWA Hiroyuki 	zone->zone_pgdat->node_present_pages -= offlined_pages;
1781d702909fSCody P Schafer 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
17827b78d335SYasunori Goto 
17831b79acc9SKOSAKI Motohiro 	init_per_zone_wmark_min();
17841b79acc9SKOSAKI Motohiro 
17851e8537baSXishi Qiu 	if (!populated_zone(zone)) {
1786340175b7SJiang Liu 		zone_pcp_reset(zone);
17871e8537baSXishi Qiu 		mutex_lock(&zonelists_mutex);
17881e8537baSXishi Qiu 		build_all_zonelists(NULL, NULL);
17891e8537baSXishi Qiu 		mutex_unlock(&zonelists_mutex);
17901e8537baSXishi Qiu 	} else
17911e8537baSXishi Qiu 		zone_pcp_update(zone);
1792340175b7SJiang Liu 
1793d9713679SLai Jiangshan 	node_states_clear_node(node, &arg);
1794698b1b30SVlastimil Babka 	if (arg.status_change_nid >= 0) {
17958fe23e05SDavid Rientjes 		kswapd_stop(node);
1796698b1b30SVlastimil Babka 		kcompactd_stop(node);
1797698b1b30SVlastimil Babka 	}
1798bce7394aSMinchan Kim 
17990c0e6195SKAMEZAWA Hiroyuki 	vm_total_pages = nr_free_pagecache_pages();
18000c0e6195SKAMEZAWA Hiroyuki 	writeback_set_ratelimit();
18017b78d335SYasunori Goto 
18027b78d335SYasunori Goto 	memory_notify(MEM_OFFLINE, &arg);
18030c0e6195SKAMEZAWA Hiroyuki 	return 0;
18040c0e6195SKAMEZAWA Hiroyuki 
18050c0e6195SKAMEZAWA Hiroyuki failed_removal:
1806e33e33b4SChen Yucong 	pr_debug("memory offlining [mem %#010llx-%#010llx] failed\n",
1807a62e2f4fSBjorn Helgaas 		 (unsigned long long) start_pfn << PAGE_SHIFT,
1808a62e2f4fSBjorn Helgaas 		 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
18097b78d335SYasunori Goto 	memory_notify(MEM_CANCEL_OFFLINE, &arg);
18100c0e6195SKAMEZAWA Hiroyuki 	/* pushback to free area */
18110815f3d8SMichal Nazarewicz 	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
18120c0e6195SKAMEZAWA Hiroyuki 	return ret;
18130c0e6195SKAMEZAWA Hiroyuki }
181471088785SBadari Pulavarty 
181530467e0bSDavid Rientjes /* Must be protected by mem_hotplug_begin() */
1816a16cee10SWen Congyang int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1817a16cee10SWen Congyang {
1818a16cee10SWen Congyang 	return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
1819a16cee10SWen Congyang }
1820e2ff3940SRafael J. Wysocki #endif /* CONFIG_MEMORY_HOTREMOVE */
1821a16cee10SWen Congyang 
1822bbc76be6SWen Congyang /**
1823bbc76be6SWen Congyang  * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
1824bbc76be6SWen Congyang  * @start_pfn: start pfn of the memory range
1825e05c4bbfSToshi Kani  * @end_pfn: end pfn of the memory range
1826bbc76be6SWen Congyang  * @arg: argument passed to func
1827bbc76be6SWen Congyang  * @func: callback for each memory section walked
1828bbc76be6SWen Congyang  *
1829bbc76be6SWen Congyang  * This function walks through all present mem sections in range
1830bbc76be6SWen Congyang  * [start_pfn, end_pfn) and call func on each mem section.
1831bbc76be6SWen Congyang  *
1832bbc76be6SWen Congyang  * Returns the return value of func.
1833bbc76be6SWen Congyang  */
1834e2ff3940SRafael J. Wysocki int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
1835bbc76be6SWen Congyang 		void *arg, int (*func)(struct memory_block *, void *))
183671088785SBadari Pulavarty {
1837e90bdb7fSWen Congyang 	struct memory_block *mem = NULL;
1838e90bdb7fSWen Congyang 	struct mem_section *section;
1839e90bdb7fSWen Congyang 	unsigned long pfn, section_nr;
1840e90bdb7fSWen Congyang 	int ret;
184171088785SBadari Pulavarty 
1842e90bdb7fSWen Congyang 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1843e90bdb7fSWen Congyang 		section_nr = pfn_to_section_nr(pfn);
1844e90bdb7fSWen Congyang 		if (!present_section_nr(section_nr))
1845e90bdb7fSWen Congyang 			continue;
1846e90bdb7fSWen Congyang 
1847e90bdb7fSWen Congyang 		section = __nr_to_section(section_nr);
1848e90bdb7fSWen Congyang 		/* same memblock? */
1849e90bdb7fSWen Congyang 		if (mem)
1850e90bdb7fSWen Congyang 			if ((section_nr >= mem->start_section_nr) &&
1851e90bdb7fSWen Congyang 			    (section_nr <= mem->end_section_nr))
1852e90bdb7fSWen Congyang 				continue;
1853e90bdb7fSWen Congyang 
1854e90bdb7fSWen Congyang 		mem = find_memory_block_hinted(section, mem);
1855e90bdb7fSWen Congyang 		if (!mem)
1856e90bdb7fSWen Congyang 			continue;
1857e90bdb7fSWen Congyang 
1858bbc76be6SWen Congyang 		ret = func(mem, arg);
1859e90bdb7fSWen Congyang 		if (ret) {
1860e90bdb7fSWen Congyang 			kobject_put(&mem->dev.kobj);
1861e90bdb7fSWen Congyang 			return ret;
1862e90bdb7fSWen Congyang 		}
1863e90bdb7fSWen Congyang 	}
1864e90bdb7fSWen Congyang 
1865e90bdb7fSWen Congyang 	if (mem)
1866e90bdb7fSWen Congyang 		kobject_put(&mem->dev.kobj);
1867e90bdb7fSWen Congyang 
1868bbc76be6SWen Congyang 	return 0;
1869bbc76be6SWen Congyang }
1870bbc76be6SWen Congyang 
1871e2ff3940SRafael J. Wysocki #ifdef CONFIG_MEMORY_HOTREMOVE
1872d6de9d53SXishi Qiu static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
1873bbc76be6SWen Congyang {
1874bbc76be6SWen Congyang 	int ret = !is_memblock_offlined(mem);
1875bbc76be6SWen Congyang 
1876349daa0fSRandy Dunlap 	if (unlikely(ret)) {
1877349daa0fSRandy Dunlap 		phys_addr_t beginpa, endpa;
1878349daa0fSRandy Dunlap 
1879349daa0fSRandy Dunlap 		beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
1880349daa0fSRandy Dunlap 		endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
1881756a025fSJoe Perches 		pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
1882349daa0fSRandy Dunlap 			&beginpa, &endpa);
1883349daa0fSRandy Dunlap 	}
1884bbc76be6SWen Congyang 
1885bbc76be6SWen Congyang 	return ret;
1886bbc76be6SWen Congyang }
1887bbc76be6SWen Congyang 
18880f1cfe9dSToshi Kani static int check_cpu_on_node(pg_data_t *pgdat)
188960a5a19eSTang Chen {
189060a5a19eSTang Chen 	int cpu;
189160a5a19eSTang Chen 
189260a5a19eSTang Chen 	for_each_present_cpu(cpu) {
189360a5a19eSTang Chen 		if (cpu_to_node(cpu) == pgdat->node_id)
189460a5a19eSTang Chen 			/*
189560a5a19eSTang Chen 			 * the cpu on this node isn't removed, and we can't
189660a5a19eSTang Chen 			 * offline this node.
189760a5a19eSTang Chen 			 */
189860a5a19eSTang Chen 			return -EBUSY;
189960a5a19eSTang Chen 	}
190060a5a19eSTang Chen 
190160a5a19eSTang Chen 	return 0;
190260a5a19eSTang Chen }
190360a5a19eSTang Chen 
19040f1cfe9dSToshi Kani static void unmap_cpu_on_node(pg_data_t *pgdat)
1905e13fe869SWen Congyang {
1906e13fe869SWen Congyang #ifdef CONFIG_ACPI_NUMA
1907e13fe869SWen Congyang 	int cpu;
1908e13fe869SWen Congyang 
1909e13fe869SWen Congyang 	for_each_possible_cpu(cpu)
1910e13fe869SWen Congyang 		if (cpu_to_node(cpu) == pgdat->node_id)
1911e13fe869SWen Congyang 			numa_clear_node(cpu);
1912e13fe869SWen Congyang #endif
1913e13fe869SWen Congyang }
1914e13fe869SWen Congyang 
19150f1cfe9dSToshi Kani static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
1916e13fe869SWen Congyang {
19170f1cfe9dSToshi Kani 	int ret;
1918e13fe869SWen Congyang 
19190f1cfe9dSToshi Kani 	ret = check_cpu_on_node(pgdat);
1920e13fe869SWen Congyang 	if (ret)
1921e13fe869SWen Congyang 		return ret;
1922e13fe869SWen Congyang 
1923e13fe869SWen Congyang 	/*
1924e13fe869SWen Congyang 	 * the node will be offlined when we come here, so we can clear
1925e13fe869SWen Congyang 	 * the cpu_to_node() now.
1926e13fe869SWen Congyang 	 */
1927e13fe869SWen Congyang 
19280f1cfe9dSToshi Kani 	unmap_cpu_on_node(pgdat);
1929e13fe869SWen Congyang 	return 0;
1930e13fe869SWen Congyang }
1931e13fe869SWen Congyang 
19320f1cfe9dSToshi Kani /**
19330f1cfe9dSToshi Kani  * try_offline_node
19340f1cfe9dSToshi Kani  *
19350f1cfe9dSToshi Kani  * Offline a node if all memory sections and cpus of the node are removed.
19360f1cfe9dSToshi Kani  *
19370f1cfe9dSToshi Kani  * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
19380f1cfe9dSToshi Kani  * and online/offline operations before this call.
19390f1cfe9dSToshi Kani  */
194090b30cdcSWen Congyang void try_offline_node(int nid)
194160a5a19eSTang Chen {
1942d822b86aSWen Congyang 	pg_data_t *pgdat = NODE_DATA(nid);
1943d822b86aSWen Congyang 	unsigned long start_pfn = pgdat->node_start_pfn;
1944d822b86aSWen Congyang 	unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
194560a5a19eSTang Chen 	unsigned long pfn;
194660a5a19eSTang Chen 
194760a5a19eSTang Chen 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
194860a5a19eSTang Chen 		unsigned long section_nr = pfn_to_section_nr(pfn);
194960a5a19eSTang Chen 
195060a5a19eSTang Chen 		if (!present_section_nr(section_nr))
195160a5a19eSTang Chen 			continue;
195260a5a19eSTang Chen 
195360a5a19eSTang Chen 		if (pfn_to_nid(pfn) != nid)
195460a5a19eSTang Chen 			continue;
195560a5a19eSTang Chen 
195660a5a19eSTang Chen 		/*
195760a5a19eSTang Chen 		 * some memory sections of this node are not removed, and we
195860a5a19eSTang Chen 		 * can't offline node now.
195960a5a19eSTang Chen 		 */
196060a5a19eSTang Chen 		return;
196160a5a19eSTang Chen 	}
196260a5a19eSTang Chen 
19630f1cfe9dSToshi Kani 	if (check_and_unmap_cpu_on_node(pgdat))
196460a5a19eSTang Chen 		return;
196560a5a19eSTang Chen 
196660a5a19eSTang Chen 	/*
196760a5a19eSTang Chen 	 * all memory/cpu of this node are removed, we can offline this
196860a5a19eSTang Chen 	 * node now.
196960a5a19eSTang Chen 	 */
197060a5a19eSTang Chen 	node_set_offline(nid);
197160a5a19eSTang Chen 	unregister_one_node(nid);
197260a5a19eSTang Chen }
197390b30cdcSWen Congyang EXPORT_SYMBOL(try_offline_node);
197460a5a19eSTang Chen 
19750f1cfe9dSToshi Kani /**
19760f1cfe9dSToshi Kani  * remove_memory
19770f1cfe9dSToshi Kani  *
19780f1cfe9dSToshi Kani  * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
19790f1cfe9dSToshi Kani  * and online/offline operations before this call, as required by
19800f1cfe9dSToshi Kani  * try_offline_node().
19810f1cfe9dSToshi Kani  */
1982242831ebSRafael J. Wysocki void __ref remove_memory(int nid, u64 start, u64 size)
1983bbc76be6SWen Congyang {
1984242831ebSRafael J. Wysocki 	int ret;
1985993c1aadSWen Congyang 
198627356f54SToshi Kani 	BUG_ON(check_hotplug_memory_range(start, size));
198727356f54SToshi Kani 
1988bfc8c901SVladimir Davydov 	mem_hotplug_begin();
19896677e3eaSYasuaki Ishimatsu 
19906677e3eaSYasuaki Ishimatsu 	/*
1991242831ebSRafael J. Wysocki 	 * All memory blocks must be offlined before removing memory.  Check
1992242831ebSRafael J. Wysocki 	 * whether all memory blocks in question are offline and trigger a BUG()
1993242831ebSRafael J. Wysocki 	 * if this is not the case.
19946677e3eaSYasuaki Ishimatsu 	 */
1995242831ebSRafael J. Wysocki 	ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
1996d6de9d53SXishi Qiu 				check_memblock_offlined_cb);
1997bfc8c901SVladimir Davydov 	if (ret)
1998242831ebSRafael J. Wysocki 		BUG();
19996677e3eaSYasuaki Ishimatsu 
200046c66c4bSYasuaki Ishimatsu 	/* remove memmap entry */
200146c66c4bSYasuaki Ishimatsu 	firmware_map_remove(start, start + size, "System RAM");
2002f9126ab9SXishi Qiu 	memblock_free(start, size);
2003f9126ab9SXishi Qiu 	memblock_remove(start, size);
200446c66c4bSYasuaki Ishimatsu 
200524d335caSWen Congyang 	arch_remove_memory(start, size);
200624d335caSWen Congyang 
200760a5a19eSTang Chen 	try_offline_node(nid);
200860a5a19eSTang Chen 
2009bfc8c901SVladimir Davydov 	mem_hotplug_done();
201071088785SBadari Pulavarty }
201171088785SBadari Pulavarty EXPORT_SYMBOL_GPL(remove_memory);
2012aba6efc4SRafael J. Wysocki #endif /* CONFIG_MEMORY_HOTREMOVE */
2013