xref: /linux/mm/memory_hotplug.c (revision 765532c8aaac624b5f8687af6d319c6a1138a257)
1 /*
2  *  linux/mm/memory_hotplug.c
3  *
4  *  Copyright (C)
5  */
6 
7 #include <linux/stddef.h>
8 #include <linux/mm.h>
9 #include <linux/swap.h>
10 #include <linux/interrupt.h>
11 #include <linux/pagemap.h>
12 #include <linux/bootmem.h>
13 #include <linux/compiler.h>
14 #include <linux/module.h>
15 #include <linux/pagevec.h>
16 #include <linux/writeback.h>
17 #include <linux/slab.h>
18 #include <linux/sysctl.h>
19 #include <linux/cpu.h>
20 #include <linux/memory.h>
21 #include <linux/memory_hotplug.h>
22 #include <linux/highmem.h>
23 #include <linux/vmalloc.h>
24 #include <linux/ioport.h>
25 #include <linux/delay.h>
26 #include <linux/migrate.h>
27 #include <linux/page-isolation.h>
28 #include <linux/pfn.h>
29 #include <linux/suspend.h>
30 #include <linux/mm_inline.h>
31 #include <linux/firmware-map.h>
32 
33 #include <asm/tlbflush.h>
34 
35 #include "internal.h"
36 
37 DEFINE_MUTEX(mem_hotplug_mutex);
38 
39 void lock_memory_hotplug(void)
40 {
41 	mutex_lock(&mem_hotplug_mutex);
42 
43 	/* for exclusive hibernation if CONFIG_HIBERNATION=y */
44 	lock_system_sleep();
45 }
46 
47 void unlock_memory_hotplug(void)
48 {
49 	unlock_system_sleep();
50 	mutex_unlock(&mem_hotplug_mutex);
51 }
52 
53 
54 /* add this memory to iomem resource */
55 static struct resource *register_memory_resource(u64 start, u64 size)
56 {
57 	struct resource *res;
58 	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
59 	BUG_ON(!res);
60 
61 	res->name = "System RAM";
62 	res->start = start;
63 	res->end = start + size - 1;
64 	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
65 	if (request_resource(&iomem_resource, res) < 0) {
66 		printk("System RAM resource %llx - %llx cannot be added\n",
67 		(unsigned long long)res->start, (unsigned long long)res->end);
68 		kfree(res);
69 		res = NULL;
70 	}
71 	return res;
72 }
73 
74 static void release_memory_resource(struct resource *res)
75 {
76 	if (!res)
77 		return;
78 	release_resource(res);
79 	kfree(res);
80 	return;
81 }
82 
83 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
84 #ifndef CONFIG_SPARSEMEM_VMEMMAP
85 static void get_page_bootmem(unsigned long info,  struct page *page, int type)
86 {
87 	atomic_set(&page->_mapcount, type);
88 	SetPagePrivate(page);
89 	set_page_private(page, info);
90 	atomic_inc(&page->_count);
91 }
92 
93 /* reference to __meminit __free_pages_bootmem is valid
94  * so use __ref to tell modpost not to generate a warning */
95 void __ref put_page_bootmem(struct page *page)
96 {
97 	int type;
98 
99 	type = atomic_read(&page->_mapcount);
100 	BUG_ON(type >= -1);
101 
102 	if (atomic_dec_return(&page->_count) == 1) {
103 		ClearPagePrivate(page);
104 		set_page_private(page, 0);
105 		reset_page_mapcount(page);
106 		__free_pages_bootmem(page, 0);
107 	}
108 
109 }
110 
111 static void register_page_bootmem_info_section(unsigned long start_pfn)
112 {
113 	unsigned long *usemap, mapsize, section_nr, i;
114 	struct mem_section *ms;
115 	struct page *page, *memmap;
116 
117 	if (!pfn_valid(start_pfn))
118 		return;
119 
120 	section_nr = pfn_to_section_nr(start_pfn);
121 	ms = __nr_to_section(section_nr);
122 
123 	/* Get section's memmap address */
124 	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
125 
126 	/*
127 	 * Get page for the memmap's phys address
128 	 * XXX: need more consideration for sparse_vmemmap...
129 	 */
130 	page = virt_to_page(memmap);
131 	mapsize = sizeof(struct page) * PAGES_PER_SECTION;
132 	mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
133 
134 	/* remember memmap's page */
135 	for (i = 0; i < mapsize; i++, page++)
136 		get_page_bootmem(section_nr, page, SECTION_INFO);
137 
138 	usemap = __nr_to_section(section_nr)->pageblock_flags;
139 	page = virt_to_page(usemap);
140 
141 	mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
142 
143 	for (i = 0; i < mapsize; i++, page++)
144 		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
145 
146 }
147 
148 void register_page_bootmem_info_node(struct pglist_data *pgdat)
149 {
150 	unsigned long i, pfn, end_pfn, nr_pages;
151 	int node = pgdat->node_id;
152 	struct page *page;
153 	struct zone *zone;
154 
155 	nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
156 	page = virt_to_page(pgdat);
157 
158 	for (i = 0; i < nr_pages; i++, page++)
159 		get_page_bootmem(node, page, NODE_INFO);
160 
161 	zone = &pgdat->node_zones[0];
162 	for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
163 		if (zone->wait_table) {
164 			nr_pages = zone->wait_table_hash_nr_entries
165 				* sizeof(wait_queue_head_t);
166 			nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
167 			page = virt_to_page(zone->wait_table);
168 
169 			for (i = 0; i < nr_pages; i++, page++)
170 				get_page_bootmem(node, page, NODE_INFO);
171 		}
172 	}
173 
174 	pfn = pgdat->node_start_pfn;
175 	end_pfn = pfn + pgdat->node_spanned_pages;
176 
177 	/* register_section info */
178 	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
179 		register_page_bootmem_info_section(pfn);
180 
181 }
182 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
183 
184 static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
185 			   unsigned long end_pfn)
186 {
187 	unsigned long old_zone_end_pfn;
188 
189 	zone_span_writelock(zone);
190 
191 	old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
192 	if (start_pfn < zone->zone_start_pfn)
193 		zone->zone_start_pfn = start_pfn;
194 
195 	zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
196 				zone->zone_start_pfn;
197 
198 	zone_span_writeunlock(zone);
199 }
200 
201 static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
202 			    unsigned long end_pfn)
203 {
204 	unsigned long old_pgdat_end_pfn =
205 		pgdat->node_start_pfn + pgdat->node_spanned_pages;
206 
207 	if (start_pfn < pgdat->node_start_pfn)
208 		pgdat->node_start_pfn = start_pfn;
209 
210 	pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
211 					pgdat->node_start_pfn;
212 }
213 
214 static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
215 {
216 	struct pglist_data *pgdat = zone->zone_pgdat;
217 	int nr_pages = PAGES_PER_SECTION;
218 	int nid = pgdat->node_id;
219 	int zone_type;
220 	unsigned long flags;
221 
222 	zone_type = zone - pgdat->node_zones;
223 	if (!zone->wait_table) {
224 		int ret;
225 
226 		ret = init_currently_empty_zone(zone, phys_start_pfn,
227 						nr_pages, MEMMAP_HOTPLUG);
228 		if (ret)
229 			return ret;
230 	}
231 	pgdat_resize_lock(zone->zone_pgdat, &flags);
232 	grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
233 	grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
234 			phys_start_pfn + nr_pages);
235 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
236 	memmap_init_zone(nr_pages, nid, zone_type,
237 			 phys_start_pfn, MEMMAP_HOTPLUG);
238 	return 0;
239 }
240 
241 static int __meminit __add_section(int nid, struct zone *zone,
242 					unsigned long phys_start_pfn)
243 {
244 	int nr_pages = PAGES_PER_SECTION;
245 	int ret;
246 
247 	if (pfn_valid(phys_start_pfn))
248 		return -EEXIST;
249 
250 	ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
251 
252 	if (ret < 0)
253 		return ret;
254 
255 	ret = __add_zone(zone, phys_start_pfn);
256 
257 	if (ret < 0)
258 		return ret;
259 
260 	return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
261 }
262 
263 #ifdef CONFIG_SPARSEMEM_VMEMMAP
264 static int __remove_section(struct zone *zone, struct mem_section *ms)
265 {
266 	/*
267 	 * XXX: Freeing memmap with vmemmap is not implement yet.
268 	 *      This should be removed later.
269 	 */
270 	return -EBUSY;
271 }
272 #else
273 static int __remove_section(struct zone *zone, struct mem_section *ms)
274 {
275 	unsigned long flags;
276 	struct pglist_data *pgdat = zone->zone_pgdat;
277 	int ret = -EINVAL;
278 
279 	if (!valid_section(ms))
280 		return ret;
281 
282 	ret = unregister_memory_section(ms);
283 	if (ret)
284 		return ret;
285 
286 	pgdat_resize_lock(pgdat, &flags);
287 	sparse_remove_one_section(zone, ms);
288 	pgdat_resize_unlock(pgdat, &flags);
289 	return 0;
290 }
291 #endif
292 
293 /*
294  * Reasonably generic function for adding memory.  It is
295  * expected that archs that support memory hotplug will
296  * call this function after deciding the zone to which to
297  * add the new pages.
298  */
299 int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
300 			unsigned long nr_pages)
301 {
302 	unsigned long i;
303 	int err = 0;
304 	int start_sec, end_sec;
305 	/* during initialize mem_map, align hot-added range to section */
306 	start_sec = pfn_to_section_nr(phys_start_pfn);
307 	end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
308 
309 	for (i = start_sec; i <= end_sec; i++) {
310 		err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
311 
312 		/*
313 		 * EEXIST is finally dealt with by ioresource collision
314 		 * check. see add_memory() => register_memory_resource()
315 		 * Warning will be printed if there is collision.
316 		 */
317 		if (err && (err != -EEXIST))
318 			break;
319 		err = 0;
320 	}
321 
322 	return err;
323 }
324 EXPORT_SYMBOL_GPL(__add_pages);
325 
326 /**
327  * __remove_pages() - remove sections of pages from a zone
328  * @zone: zone from which pages need to be removed
329  * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
330  * @nr_pages: number of pages to remove (must be multiple of section size)
331  *
332  * Generic helper function to remove section mappings and sysfs entries
333  * for the section of the memory we are removing. Caller needs to make
334  * sure that pages are marked reserved and zones are adjust properly by
335  * calling offline_pages().
336  */
337 int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
338 		 unsigned long nr_pages)
339 {
340 	unsigned long i, ret = 0;
341 	int sections_to_remove;
342 
343 	/*
344 	 * We can only remove entire sections
345 	 */
346 	BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
347 	BUG_ON(nr_pages % PAGES_PER_SECTION);
348 
349 	sections_to_remove = nr_pages / PAGES_PER_SECTION;
350 	for (i = 0; i < sections_to_remove; i++) {
351 		unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
352 		release_mem_region(pfn << PAGE_SHIFT,
353 				   PAGES_PER_SECTION << PAGE_SHIFT);
354 		ret = __remove_section(zone, __pfn_to_section(pfn));
355 		if (ret)
356 			break;
357 	}
358 	return ret;
359 }
360 EXPORT_SYMBOL_GPL(__remove_pages);
361 
362 void online_page(struct page *page)
363 {
364 	unsigned long pfn = page_to_pfn(page);
365 
366 	totalram_pages++;
367 	if (pfn >= num_physpages)
368 		num_physpages = pfn + 1;
369 
370 #ifdef CONFIG_HIGHMEM
371 	if (PageHighMem(page))
372 		totalhigh_pages++;
373 #endif
374 
375 #ifdef CONFIG_FLATMEM
376 	max_mapnr = max(page_to_pfn(page), max_mapnr);
377 #endif
378 
379 	ClearPageReserved(page);
380 	init_page_count(page);
381 	__free_page(page);
382 }
383 
384 static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
385 			void *arg)
386 {
387 	unsigned long i;
388 	unsigned long onlined_pages = *(unsigned long *)arg;
389 	struct page *page;
390 	if (PageReserved(pfn_to_page(start_pfn)))
391 		for (i = 0; i < nr_pages; i++) {
392 			page = pfn_to_page(start_pfn + i);
393 			online_page(page);
394 			onlined_pages++;
395 		}
396 	*(unsigned long *)arg = onlined_pages;
397 	return 0;
398 }
399 
400 
401 int online_pages(unsigned long pfn, unsigned long nr_pages)
402 {
403 	unsigned long onlined_pages = 0;
404 	struct zone *zone;
405 	int need_zonelists_rebuild = 0;
406 	int nid;
407 	int ret;
408 	struct memory_notify arg;
409 
410 	arg.start_pfn = pfn;
411 	arg.nr_pages = nr_pages;
412 	arg.status_change_nid = -1;
413 
414 	nid = page_to_nid(pfn_to_page(pfn));
415 	if (node_present_pages(nid) == 0)
416 		arg.status_change_nid = nid;
417 
418 	ret = memory_notify(MEM_GOING_ONLINE, &arg);
419 	ret = notifier_to_errno(ret);
420 	if (ret) {
421 		memory_notify(MEM_CANCEL_ONLINE, &arg);
422 		return ret;
423 	}
424 	/*
425 	 * This doesn't need a lock to do pfn_to_page().
426 	 * The section can't be removed here because of the
427 	 * memory_block->state_mutex.
428 	 */
429 	zone = page_zone(pfn_to_page(pfn));
430 	/*
431 	 * If this zone is not populated, then it is not in zonelist.
432 	 * This means the page allocator ignores this zone.
433 	 * So, zonelist must be updated after online.
434 	 */
435 	mutex_lock(&zonelists_mutex);
436 	if (!populated_zone(zone))
437 		need_zonelists_rebuild = 1;
438 
439 	ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
440 		online_pages_range);
441 	if (ret) {
442 		mutex_unlock(&zonelists_mutex);
443 		printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
444 			nr_pages, pfn);
445 		memory_notify(MEM_CANCEL_ONLINE, &arg);
446 		return ret;
447 	}
448 
449 	zone->present_pages += onlined_pages;
450 	zone->zone_pgdat->node_present_pages += onlined_pages;
451 	if (need_zonelists_rebuild)
452 		build_all_zonelists(zone);
453 	else
454 		zone_pcp_update(zone);
455 
456 	mutex_unlock(&zonelists_mutex);
457 	setup_per_zone_wmarks();
458 	calculate_zone_inactive_ratio(zone);
459 	if (onlined_pages) {
460 		kswapd_run(zone_to_nid(zone));
461 		node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
462 	}
463 
464 	vm_total_pages = nr_free_pagecache_pages();
465 
466 	writeback_set_ratelimit();
467 
468 	if (onlined_pages)
469 		memory_notify(MEM_ONLINE, &arg);
470 
471 	return 0;
472 }
473 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
474 
475 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
476 static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
477 {
478 	struct pglist_data *pgdat;
479 	unsigned long zones_size[MAX_NR_ZONES] = {0};
480 	unsigned long zholes_size[MAX_NR_ZONES] = {0};
481 	unsigned long start_pfn = start >> PAGE_SHIFT;
482 
483 	pgdat = arch_alloc_nodedata(nid);
484 	if (!pgdat)
485 		return NULL;
486 
487 	arch_refresh_nodedata(nid, pgdat);
488 
489 	/* we can use NODE_DATA(nid) from here */
490 
491 	/* init node's zones as empty zones, we don't have any present pages.*/
492 	free_area_init_node(nid, zones_size, start_pfn, zholes_size);
493 
494 	return pgdat;
495 }
496 
497 static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
498 {
499 	arch_refresh_nodedata(nid, NULL);
500 	arch_free_nodedata(pgdat);
501 	return;
502 }
503 
504 
505 /*
506  * called by cpu_up() to online a node without onlined memory.
507  */
508 int mem_online_node(int nid)
509 {
510 	pg_data_t	*pgdat;
511 	int	ret;
512 
513 	lock_memory_hotplug();
514 	pgdat = hotadd_new_pgdat(nid, 0);
515 	if (pgdat) {
516 		ret = -ENOMEM;
517 		goto out;
518 	}
519 	node_set_online(nid);
520 	ret = register_one_node(nid);
521 	BUG_ON(ret);
522 
523 out:
524 	unlock_memory_hotplug();
525 	return ret;
526 }
527 
528 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
529 int __ref add_memory(int nid, u64 start, u64 size)
530 {
531 	pg_data_t *pgdat = NULL;
532 	int new_pgdat = 0;
533 	struct resource *res;
534 	int ret;
535 
536 	lock_memory_hotplug();
537 
538 	res = register_memory_resource(start, size);
539 	ret = -EEXIST;
540 	if (!res)
541 		goto out;
542 
543 	if (!node_online(nid)) {
544 		pgdat = hotadd_new_pgdat(nid, start);
545 		ret = -ENOMEM;
546 		if (!pgdat)
547 			goto out;
548 		new_pgdat = 1;
549 	}
550 
551 	/* call arch's memory hotadd */
552 	ret = arch_add_memory(nid, start, size);
553 
554 	if (ret < 0)
555 		goto error;
556 
557 	/* we online node here. we can't roll back from here. */
558 	node_set_online(nid);
559 
560 	if (new_pgdat) {
561 		ret = register_one_node(nid);
562 		/*
563 		 * If sysfs file of new node can't create, cpu on the node
564 		 * can't be hot-added. There is no rollback way now.
565 		 * So, check by BUG_ON() to catch it reluctantly..
566 		 */
567 		BUG_ON(ret);
568 	}
569 
570 	/* create new memmap entry */
571 	firmware_map_add_hotplug(start, start + size, "System RAM");
572 
573 	goto out;
574 
575 error:
576 	/* rollback pgdat allocation and others */
577 	if (new_pgdat)
578 		rollback_node_hotadd(nid, pgdat);
579 	if (res)
580 		release_memory_resource(res);
581 
582 out:
583 	unlock_memory_hotplug();
584 	return ret;
585 }
586 EXPORT_SYMBOL_GPL(add_memory);
587 
588 #ifdef CONFIG_MEMORY_HOTREMOVE
589 /*
590  * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
591  * set and the size of the free page is given by page_order(). Using this,
592  * the function determines if the pageblock contains only free pages.
593  * Due to buddy contraints, a free page at least the size of a pageblock will
594  * be located at the start of the pageblock
595  */
596 static inline int pageblock_free(struct page *page)
597 {
598 	return PageBuddy(page) && page_order(page) >= pageblock_order;
599 }
600 
601 /* Return the start of the next active pageblock after a given page */
602 static struct page *next_active_pageblock(struct page *page)
603 {
604 	/* Ensure the starting page is pageblock-aligned */
605 	BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
606 
607 	/* If the entire pageblock is free, move to the end of free page */
608 	if (pageblock_free(page)) {
609 		int order;
610 		/* be careful. we don't have locks, page_order can be changed.*/
611 		order = page_order(page);
612 		if ((order < MAX_ORDER) && (order >= pageblock_order))
613 			return page + (1 << order);
614 	}
615 
616 	return page + pageblock_nr_pages;
617 }
618 
619 /* Checks if this range of memory is likely to be hot-removable. */
620 int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
621 {
622 	struct page *page = pfn_to_page(start_pfn);
623 	struct page *end_page = page + nr_pages;
624 
625 	/* Check the starting page of each pageblock within the range */
626 	for (; page < end_page; page = next_active_pageblock(page)) {
627 		if (!is_pageblock_removable_nolock(page))
628 			return 0;
629 		cond_resched();
630 	}
631 
632 	/* All pageblocks in the memory block are likely to be hot-removable */
633 	return 1;
634 }
635 
636 /*
637  * Confirm all pages in a range [start, end) is belongs to the same zone.
638  */
639 static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
640 {
641 	unsigned long pfn;
642 	struct zone *zone = NULL;
643 	struct page *page;
644 	int i;
645 	for (pfn = start_pfn;
646 	     pfn < end_pfn;
647 	     pfn += MAX_ORDER_NR_PAGES) {
648 		i = 0;
649 		/* This is just a CONFIG_HOLES_IN_ZONE check.*/
650 		while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
651 			i++;
652 		if (i == MAX_ORDER_NR_PAGES)
653 			continue;
654 		page = pfn_to_page(pfn + i);
655 		if (zone && page_zone(page) != zone)
656 			return 0;
657 		zone = page_zone(page);
658 	}
659 	return 1;
660 }
661 
662 /*
663  * Scanning pfn is much easier than scanning lru list.
664  * Scan pfn from start to end and Find LRU page.
665  */
666 static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
667 {
668 	unsigned long pfn;
669 	struct page *page;
670 	for (pfn = start; pfn < end; pfn++) {
671 		if (pfn_valid(pfn)) {
672 			page = pfn_to_page(pfn);
673 			if (PageLRU(page))
674 				return pfn;
675 		}
676 	}
677 	return 0;
678 }
679 
680 static struct page *
681 hotremove_migrate_alloc(struct page *page, unsigned long private, int **x)
682 {
683 	/* This should be improooooved!! */
684 	return alloc_page(GFP_HIGHUSER_MOVABLE);
685 }
686 
687 #define NR_OFFLINE_AT_ONCE_PAGES	(256)
688 static int
689 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
690 {
691 	unsigned long pfn;
692 	struct page *page;
693 	int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
694 	int not_managed = 0;
695 	int ret = 0;
696 	LIST_HEAD(source);
697 
698 	for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
699 		if (!pfn_valid(pfn))
700 			continue;
701 		page = pfn_to_page(pfn);
702 		if (!page_count(page))
703 			continue;
704 		/*
705 		 * We can skip free pages. And we can only deal with pages on
706 		 * LRU.
707 		 */
708 		ret = isolate_lru_page(page);
709 		if (!ret) { /* Success */
710 			list_add_tail(&page->lru, &source);
711 			move_pages--;
712 			inc_zone_page_state(page, NR_ISOLATED_ANON +
713 					    page_is_file_cache(page));
714 
715 		} else {
716 #ifdef CONFIG_DEBUG_VM
717 			printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
718 			       pfn);
719 			dump_page(page);
720 #endif
721 			/* Becasue we don't have big zone->lock. we should
722 			   check this again here. */
723 			if (page_count(page)) {
724 				not_managed++;
725 				ret = -EBUSY;
726 				break;
727 			}
728 		}
729 	}
730 	if (!list_empty(&source)) {
731 		if (not_managed) {
732 			putback_lru_pages(&source);
733 			goto out;
734 		}
735 		/* this function returns # of failed pages */
736 		ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1);
737 		if (ret)
738 			putback_lru_pages(&source);
739 	}
740 out:
741 	return ret;
742 }
743 
744 /*
745  * remove from free_area[] and mark all as Reserved.
746  */
747 static int
748 offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
749 			void *data)
750 {
751 	__offline_isolated_pages(start, start + nr_pages);
752 	return 0;
753 }
754 
755 static void
756 offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
757 {
758 	walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
759 				offline_isolated_pages_cb);
760 }
761 
762 /*
763  * Check all pages in range, recoreded as memory resource, are isolated.
764  */
765 static int
766 check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
767 			void *data)
768 {
769 	int ret;
770 	long offlined = *(long *)data;
771 	ret = test_pages_isolated(start_pfn, start_pfn + nr_pages);
772 	offlined = nr_pages;
773 	if (!ret)
774 		*(long *)data += offlined;
775 	return ret;
776 }
777 
778 static long
779 check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
780 {
781 	long offlined = 0;
782 	int ret;
783 
784 	ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
785 			check_pages_isolated_cb);
786 	if (ret < 0)
787 		offlined = (long)ret;
788 	return offlined;
789 }
790 
791 static int offline_pages(unsigned long start_pfn,
792 		  unsigned long end_pfn, unsigned long timeout)
793 {
794 	unsigned long pfn, nr_pages, expire;
795 	long offlined_pages;
796 	int ret, drain, retry_max, node;
797 	struct zone *zone;
798 	struct memory_notify arg;
799 
800 	BUG_ON(start_pfn >= end_pfn);
801 	/* at least, alignment against pageblock is necessary */
802 	if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
803 		return -EINVAL;
804 	if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
805 		return -EINVAL;
806 	/* This makes hotplug much easier...and readable.
807 	   we assume this for now. .*/
808 	if (!test_pages_in_a_zone(start_pfn, end_pfn))
809 		return -EINVAL;
810 
811 	lock_memory_hotplug();
812 
813 	zone = page_zone(pfn_to_page(start_pfn));
814 	node = zone_to_nid(zone);
815 	nr_pages = end_pfn - start_pfn;
816 
817 	/* set above range as isolated */
818 	ret = start_isolate_page_range(start_pfn, end_pfn);
819 	if (ret)
820 		goto out;
821 
822 	arg.start_pfn = start_pfn;
823 	arg.nr_pages = nr_pages;
824 	arg.status_change_nid = -1;
825 	if (nr_pages >= node_present_pages(node))
826 		arg.status_change_nid = node;
827 
828 	ret = memory_notify(MEM_GOING_OFFLINE, &arg);
829 	ret = notifier_to_errno(ret);
830 	if (ret)
831 		goto failed_removal;
832 
833 	pfn = start_pfn;
834 	expire = jiffies + timeout;
835 	drain = 0;
836 	retry_max = 5;
837 repeat:
838 	/* start memory hot removal */
839 	ret = -EAGAIN;
840 	if (time_after(jiffies, expire))
841 		goto failed_removal;
842 	ret = -EINTR;
843 	if (signal_pending(current))
844 		goto failed_removal;
845 	ret = 0;
846 	if (drain) {
847 		lru_add_drain_all();
848 		cond_resched();
849 		drain_all_pages();
850 	}
851 
852 	pfn = scan_lru_pages(start_pfn, end_pfn);
853 	if (pfn) { /* We have page on LRU */
854 		ret = do_migrate_range(pfn, end_pfn);
855 		if (!ret) {
856 			drain = 1;
857 			goto repeat;
858 		} else {
859 			if (ret < 0)
860 				if (--retry_max == 0)
861 					goto failed_removal;
862 			yield();
863 			drain = 1;
864 			goto repeat;
865 		}
866 	}
867 	/* drain all zone's lru pagevec, this is asyncronous... */
868 	lru_add_drain_all();
869 	yield();
870 	/* drain pcp pages , this is synchrouns. */
871 	drain_all_pages();
872 	/* check again */
873 	offlined_pages = check_pages_isolated(start_pfn, end_pfn);
874 	if (offlined_pages < 0) {
875 		ret = -EBUSY;
876 		goto failed_removal;
877 	}
878 	printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
879 	/* Ok, all of our target is islaoted.
880 	   We cannot do rollback at this point. */
881 	offline_isolated_pages(start_pfn, end_pfn);
882 	/* reset pagetype flags and makes migrate type to be MOVABLE */
883 	undo_isolate_page_range(start_pfn, end_pfn);
884 	/* removal success */
885 	zone->present_pages -= offlined_pages;
886 	zone->zone_pgdat->node_present_pages -= offlined_pages;
887 	totalram_pages -= offlined_pages;
888 
889 	setup_per_zone_wmarks();
890 	calculate_zone_inactive_ratio(zone);
891 	if (!node_present_pages(node)) {
892 		node_clear_state(node, N_HIGH_MEMORY);
893 		kswapd_stop(node);
894 	}
895 
896 	vm_total_pages = nr_free_pagecache_pages();
897 	writeback_set_ratelimit();
898 
899 	memory_notify(MEM_OFFLINE, &arg);
900 	unlock_memory_hotplug();
901 	return 0;
902 
903 failed_removal:
904 	printk(KERN_INFO "memory offlining %lx to %lx failed\n",
905 		start_pfn, end_pfn);
906 	memory_notify(MEM_CANCEL_OFFLINE, &arg);
907 	/* pushback to free area */
908 	undo_isolate_page_range(start_pfn, end_pfn);
909 
910 out:
911 	unlock_memory_hotplug();
912 	return ret;
913 }
914 
915 int remove_memory(u64 start, u64 size)
916 {
917 	unsigned long start_pfn, end_pfn;
918 
919 	start_pfn = PFN_DOWN(start);
920 	end_pfn = start_pfn + PFN_DOWN(size);
921 	return offline_pages(start_pfn, end_pfn, 120 * HZ);
922 }
923 #else
924 int remove_memory(u64 start, u64 size)
925 {
926 	return -EINVAL;
927 }
928 #endif /* CONFIG_MEMORY_HOTREMOVE */
929 EXPORT_SYMBOL_GPL(remove_memory);
930