xref: /linux/mm/memory_hotplug.c (revision 273b281fa22c293963ee3e6eec418f5dda2dbc83)
1 /*
2  *  linux/mm/memory_hotplug.c
3  *
4  *  Copyright (C)
5  */
6 
7 #include <linux/stddef.h>
8 #include <linux/mm.h>
9 #include <linux/swap.h>
10 #include <linux/interrupt.h>
11 #include <linux/pagemap.h>
12 #include <linux/bootmem.h>
13 #include <linux/compiler.h>
14 #include <linux/module.h>
15 #include <linux/pagevec.h>
16 #include <linux/writeback.h>
17 #include <linux/slab.h>
18 #include <linux/sysctl.h>
19 #include <linux/cpu.h>
20 #include <linux/memory.h>
21 #include <linux/memory_hotplug.h>
22 #include <linux/highmem.h>
23 #include <linux/vmalloc.h>
24 #include <linux/ioport.h>
25 #include <linux/delay.h>
26 #include <linux/migrate.h>
27 #include <linux/page-isolation.h>
28 #include <linux/pfn.h>
29 #include <linux/suspend.h>
30 
31 #include <asm/tlbflush.h>
32 
33 #include "internal.h"
34 
35 /* add this memory to iomem resource */
36 static struct resource *register_memory_resource(u64 start, u64 size)
37 {
38 	struct resource *res;
39 	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
40 	BUG_ON(!res);
41 
42 	res->name = "System RAM";
43 	res->start = start;
44 	res->end = start + size - 1;
45 	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
46 	if (request_resource(&iomem_resource, res) < 0) {
47 		printk("System RAM resource %llx - %llx cannot be added\n",
48 		(unsigned long long)res->start, (unsigned long long)res->end);
49 		kfree(res);
50 		res = NULL;
51 	}
52 	return res;
53 }
54 
55 static void release_memory_resource(struct resource *res)
56 {
57 	if (!res)
58 		return;
59 	release_resource(res);
60 	kfree(res);
61 	return;
62 }
63 
64 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
65 #ifndef CONFIG_SPARSEMEM_VMEMMAP
66 static void get_page_bootmem(unsigned long info,  struct page *page, int type)
67 {
68 	atomic_set(&page->_mapcount, type);
69 	SetPagePrivate(page);
70 	set_page_private(page, info);
71 	atomic_inc(&page->_count);
72 }
73 
74 void put_page_bootmem(struct page *page)
75 {
76 	int type;
77 
78 	type = atomic_read(&page->_mapcount);
79 	BUG_ON(type >= -1);
80 
81 	if (atomic_dec_return(&page->_count) == 1) {
82 		ClearPagePrivate(page);
83 		set_page_private(page, 0);
84 		reset_page_mapcount(page);
85 		__free_pages_bootmem(page, 0);
86 	}
87 
88 }
89 
90 static void register_page_bootmem_info_section(unsigned long start_pfn)
91 {
92 	unsigned long *usemap, mapsize, section_nr, i;
93 	struct mem_section *ms;
94 	struct page *page, *memmap;
95 
96 	if (!pfn_valid(start_pfn))
97 		return;
98 
99 	section_nr = pfn_to_section_nr(start_pfn);
100 	ms = __nr_to_section(section_nr);
101 
102 	/* Get section's memmap address */
103 	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
104 
105 	/*
106 	 * Get page for the memmap's phys address
107 	 * XXX: need more consideration for sparse_vmemmap...
108 	 */
109 	page = virt_to_page(memmap);
110 	mapsize = sizeof(struct page) * PAGES_PER_SECTION;
111 	mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
112 
113 	/* remember memmap's page */
114 	for (i = 0; i < mapsize; i++, page++)
115 		get_page_bootmem(section_nr, page, SECTION_INFO);
116 
117 	usemap = __nr_to_section(section_nr)->pageblock_flags;
118 	page = virt_to_page(usemap);
119 
120 	mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
121 
122 	for (i = 0; i < mapsize; i++, page++)
123 		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
124 
125 }
126 
127 void register_page_bootmem_info_node(struct pglist_data *pgdat)
128 {
129 	unsigned long i, pfn, end_pfn, nr_pages;
130 	int node = pgdat->node_id;
131 	struct page *page;
132 	struct zone *zone;
133 
134 	nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
135 	page = virt_to_page(pgdat);
136 
137 	for (i = 0; i < nr_pages; i++, page++)
138 		get_page_bootmem(node, page, NODE_INFO);
139 
140 	zone = &pgdat->node_zones[0];
141 	for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
142 		if (zone->wait_table) {
143 			nr_pages = zone->wait_table_hash_nr_entries
144 				* sizeof(wait_queue_head_t);
145 			nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
146 			page = virt_to_page(zone->wait_table);
147 
148 			for (i = 0; i < nr_pages; i++, page++)
149 				get_page_bootmem(node, page, NODE_INFO);
150 		}
151 	}
152 
153 	pfn = pgdat->node_start_pfn;
154 	end_pfn = pfn + pgdat->node_spanned_pages;
155 
156 	/* register_section info */
157 	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
158 		register_page_bootmem_info_section(pfn);
159 
160 }
161 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
162 
163 static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
164 			   unsigned long end_pfn)
165 {
166 	unsigned long old_zone_end_pfn;
167 
168 	zone_span_writelock(zone);
169 
170 	old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
171 	if (start_pfn < zone->zone_start_pfn)
172 		zone->zone_start_pfn = start_pfn;
173 
174 	zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
175 				zone->zone_start_pfn;
176 
177 	zone_span_writeunlock(zone);
178 }
179 
180 static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
181 			    unsigned long end_pfn)
182 {
183 	unsigned long old_pgdat_end_pfn =
184 		pgdat->node_start_pfn + pgdat->node_spanned_pages;
185 
186 	if (start_pfn < pgdat->node_start_pfn)
187 		pgdat->node_start_pfn = start_pfn;
188 
189 	pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
190 					pgdat->node_start_pfn;
191 }
192 
193 static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
194 {
195 	struct pglist_data *pgdat = zone->zone_pgdat;
196 	int nr_pages = PAGES_PER_SECTION;
197 	int nid = pgdat->node_id;
198 	int zone_type;
199 	unsigned long flags;
200 
201 	zone_type = zone - pgdat->node_zones;
202 	if (!zone->wait_table) {
203 		int ret;
204 
205 		ret = init_currently_empty_zone(zone, phys_start_pfn,
206 						nr_pages, MEMMAP_HOTPLUG);
207 		if (ret)
208 			return ret;
209 	}
210 	pgdat_resize_lock(zone->zone_pgdat, &flags);
211 	grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
212 	grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
213 			phys_start_pfn + nr_pages);
214 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
215 	memmap_init_zone(nr_pages, nid, zone_type,
216 			 phys_start_pfn, MEMMAP_HOTPLUG);
217 	return 0;
218 }
219 
220 static int __meminit __add_section(int nid, struct zone *zone,
221 					unsigned long phys_start_pfn)
222 {
223 	int nr_pages = PAGES_PER_SECTION;
224 	int ret;
225 
226 	if (pfn_valid(phys_start_pfn))
227 		return -EEXIST;
228 
229 	ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
230 
231 	if (ret < 0)
232 		return ret;
233 
234 	ret = __add_zone(zone, phys_start_pfn);
235 
236 	if (ret < 0)
237 		return ret;
238 
239 	return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
240 }
241 
242 #ifdef CONFIG_SPARSEMEM_VMEMMAP
243 static int __remove_section(struct zone *zone, struct mem_section *ms)
244 {
245 	/*
246 	 * XXX: Freeing memmap with vmemmap is not implement yet.
247 	 *      This should be removed later.
248 	 */
249 	return -EBUSY;
250 }
251 #else
252 static int __remove_section(struct zone *zone, struct mem_section *ms)
253 {
254 	unsigned long flags;
255 	struct pglist_data *pgdat = zone->zone_pgdat;
256 	int ret = -EINVAL;
257 
258 	if (!valid_section(ms))
259 		return ret;
260 
261 	ret = unregister_memory_section(ms);
262 	if (ret)
263 		return ret;
264 
265 	pgdat_resize_lock(pgdat, &flags);
266 	sparse_remove_one_section(zone, ms);
267 	pgdat_resize_unlock(pgdat, &flags);
268 	return 0;
269 }
270 #endif
271 
272 /*
273  * Reasonably generic function for adding memory.  It is
274  * expected that archs that support memory hotplug will
275  * call this function after deciding the zone to which to
276  * add the new pages.
277  */
278 int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
279 			unsigned long nr_pages)
280 {
281 	unsigned long i;
282 	int err = 0;
283 	int start_sec, end_sec;
284 	/* during initialize mem_map, align hot-added range to section */
285 	start_sec = pfn_to_section_nr(phys_start_pfn);
286 	end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
287 
288 	for (i = start_sec; i <= end_sec; i++) {
289 		err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
290 
291 		/*
292 		 * EEXIST is finally dealt with by ioresource collision
293 		 * check. see add_memory() => register_memory_resource()
294 		 * Warning will be printed if there is collision.
295 		 */
296 		if (err && (err != -EEXIST))
297 			break;
298 		err = 0;
299 	}
300 
301 	return err;
302 }
303 EXPORT_SYMBOL_GPL(__add_pages);
304 
305 /**
306  * __remove_pages() - remove sections of pages from a zone
307  * @zone: zone from which pages need to be removed
308  * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
309  * @nr_pages: number of pages to remove (must be multiple of section size)
310  *
311  * Generic helper function to remove section mappings and sysfs entries
312  * for the section of the memory we are removing. Caller needs to make
313  * sure that pages are marked reserved and zones are adjust properly by
314  * calling offline_pages().
315  */
316 int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
317 		 unsigned long nr_pages)
318 {
319 	unsigned long i, ret = 0;
320 	int sections_to_remove;
321 
322 	/*
323 	 * We can only remove entire sections
324 	 */
325 	BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
326 	BUG_ON(nr_pages % PAGES_PER_SECTION);
327 
328 	sections_to_remove = nr_pages / PAGES_PER_SECTION;
329 	for (i = 0; i < sections_to_remove; i++) {
330 		unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
331 		release_mem_region(pfn << PAGE_SHIFT,
332 				   PAGES_PER_SECTION << PAGE_SHIFT);
333 		ret = __remove_section(zone, __pfn_to_section(pfn));
334 		if (ret)
335 			break;
336 	}
337 	return ret;
338 }
339 EXPORT_SYMBOL_GPL(__remove_pages);
340 
341 void online_page(struct page *page)
342 {
343 	unsigned long pfn = page_to_pfn(page);
344 
345 	totalram_pages++;
346 	if (pfn >= num_physpages)
347 		num_physpages = pfn + 1;
348 
349 #ifdef CONFIG_HIGHMEM
350 	if (PageHighMem(page))
351 		totalhigh_pages++;
352 #endif
353 
354 #ifdef CONFIG_FLATMEM
355 	max_mapnr = max(page_to_pfn(page), max_mapnr);
356 #endif
357 
358 	ClearPageReserved(page);
359 	init_page_count(page);
360 	__free_page(page);
361 }
362 
363 static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
364 			void *arg)
365 {
366 	unsigned long i;
367 	unsigned long onlined_pages = *(unsigned long *)arg;
368 	struct page *page;
369 	if (PageReserved(pfn_to_page(start_pfn)))
370 		for (i = 0; i < nr_pages; i++) {
371 			page = pfn_to_page(start_pfn + i);
372 			online_page(page);
373 			onlined_pages++;
374 		}
375 	*(unsigned long *)arg = onlined_pages;
376 	return 0;
377 }
378 
379 
380 int online_pages(unsigned long pfn, unsigned long nr_pages)
381 {
382 	unsigned long onlined_pages = 0;
383 	struct zone *zone;
384 	int need_zonelists_rebuild = 0;
385 	int nid;
386 	int ret;
387 	struct memory_notify arg;
388 
389 	arg.start_pfn = pfn;
390 	arg.nr_pages = nr_pages;
391 	arg.status_change_nid = -1;
392 
393 	nid = page_to_nid(pfn_to_page(pfn));
394 	if (node_present_pages(nid) == 0)
395 		arg.status_change_nid = nid;
396 
397 	ret = memory_notify(MEM_GOING_ONLINE, &arg);
398 	ret = notifier_to_errno(ret);
399 	if (ret) {
400 		memory_notify(MEM_CANCEL_ONLINE, &arg);
401 		return ret;
402 	}
403 	/*
404 	 * This doesn't need a lock to do pfn_to_page().
405 	 * The section can't be removed here because of the
406 	 * memory_block->state_mutex.
407 	 */
408 	zone = page_zone(pfn_to_page(pfn));
409 	/*
410 	 * If this zone is not populated, then it is not in zonelist.
411 	 * This means the page allocator ignores this zone.
412 	 * So, zonelist must be updated after online.
413 	 */
414 	if (!populated_zone(zone))
415 		need_zonelists_rebuild = 1;
416 
417 	ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
418 		online_pages_range);
419 	if (ret) {
420 		printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
421 			nr_pages, pfn);
422 		memory_notify(MEM_CANCEL_ONLINE, &arg);
423 		return ret;
424 	}
425 
426 	zone->present_pages += onlined_pages;
427 	zone->zone_pgdat->node_present_pages += onlined_pages;
428 
429 	zone_pcp_update(zone);
430 	setup_per_zone_wmarks();
431 	calculate_zone_inactive_ratio(zone);
432 	if (onlined_pages) {
433 		kswapd_run(zone_to_nid(zone));
434 		node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
435 	}
436 
437 	if (need_zonelists_rebuild)
438 		build_all_zonelists();
439 	else
440 		vm_total_pages = nr_free_pagecache_pages();
441 
442 	writeback_set_ratelimit();
443 
444 	if (onlined_pages)
445 		memory_notify(MEM_ONLINE, &arg);
446 
447 	return 0;
448 }
449 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
450 
451 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
452 static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
453 {
454 	struct pglist_data *pgdat;
455 	unsigned long zones_size[MAX_NR_ZONES] = {0};
456 	unsigned long zholes_size[MAX_NR_ZONES] = {0};
457 	unsigned long start_pfn = start >> PAGE_SHIFT;
458 
459 	pgdat = arch_alloc_nodedata(nid);
460 	if (!pgdat)
461 		return NULL;
462 
463 	arch_refresh_nodedata(nid, pgdat);
464 
465 	/* we can use NODE_DATA(nid) from here */
466 
467 	/* init node's zones as empty zones, we don't have any present pages.*/
468 	free_area_init_node(nid, zones_size, start_pfn, zholes_size);
469 
470 	return pgdat;
471 }
472 
473 static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
474 {
475 	arch_refresh_nodedata(nid, NULL);
476 	arch_free_nodedata(pgdat);
477 	return;
478 }
479 
480 
481 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
482 int __ref add_memory(int nid, u64 start, u64 size)
483 {
484 	pg_data_t *pgdat = NULL;
485 	int new_pgdat = 0;
486 	struct resource *res;
487 	int ret;
488 
489 	lock_system_sleep();
490 
491 	res = register_memory_resource(start, size);
492 	ret = -EEXIST;
493 	if (!res)
494 		goto out;
495 
496 	if (!node_online(nid)) {
497 		pgdat = hotadd_new_pgdat(nid, start);
498 		ret = -ENOMEM;
499 		if (!pgdat)
500 			goto out;
501 		new_pgdat = 1;
502 	}
503 
504 	/* call arch's memory hotadd */
505 	ret = arch_add_memory(nid, start, size);
506 
507 	if (ret < 0)
508 		goto error;
509 
510 	/* we online node here. we can't roll back from here. */
511 	node_set_online(nid);
512 
513 	if (new_pgdat) {
514 		ret = register_one_node(nid);
515 		/*
516 		 * If sysfs file of new node can't create, cpu on the node
517 		 * can't be hot-added. There is no rollback way now.
518 		 * So, check by BUG_ON() to catch it reluctantly..
519 		 */
520 		BUG_ON(ret);
521 	}
522 
523 	goto out;
524 
525 error:
526 	/* rollback pgdat allocation and others */
527 	if (new_pgdat)
528 		rollback_node_hotadd(nid, pgdat);
529 	if (res)
530 		release_memory_resource(res);
531 
532 out:
533 	unlock_system_sleep();
534 	return ret;
535 }
536 EXPORT_SYMBOL_GPL(add_memory);
537 
538 #ifdef CONFIG_MEMORY_HOTREMOVE
539 /*
540  * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
541  * set and the size of the free page is given by page_order(). Using this,
542  * the function determines if the pageblock contains only free pages.
543  * Due to buddy contraints, a free page at least the size of a pageblock will
544  * be located at the start of the pageblock
545  */
546 static inline int pageblock_free(struct page *page)
547 {
548 	return PageBuddy(page) && page_order(page) >= pageblock_order;
549 }
550 
551 /* Return the start of the next active pageblock after a given page */
552 static struct page *next_active_pageblock(struct page *page)
553 {
554 	int pageblocks_stride;
555 
556 	/* Ensure the starting page is pageblock-aligned */
557 	BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
558 
559 	/* Move forward by at least 1 * pageblock_nr_pages */
560 	pageblocks_stride = 1;
561 
562 	/* If the entire pageblock is free, move to the end of free page */
563 	if (pageblock_free(page))
564 		pageblocks_stride += page_order(page) - pageblock_order;
565 
566 	return page + (pageblocks_stride * pageblock_nr_pages);
567 }
568 
569 /* Checks if this range of memory is likely to be hot-removable. */
570 int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
571 {
572 	int type;
573 	struct page *page = pfn_to_page(start_pfn);
574 	struct page *end_page = page + nr_pages;
575 
576 	/* Check the starting page of each pageblock within the range */
577 	for (; page < end_page; page = next_active_pageblock(page)) {
578 		type = get_pageblock_migratetype(page);
579 
580 		/*
581 		 * A pageblock containing MOVABLE or free pages is considered
582 		 * removable
583 		 */
584 		if (type != MIGRATE_MOVABLE && !pageblock_free(page))
585 			return 0;
586 
587 		/*
588 		 * A pageblock starting with a PageReserved page is not
589 		 * considered removable.
590 		 */
591 		if (PageReserved(page))
592 			return 0;
593 	}
594 
595 	/* All pageblocks in the memory block are likely to be hot-removable */
596 	return 1;
597 }
598 
599 /*
600  * Confirm all pages in a range [start, end) is belongs to the same zone.
601  */
602 static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
603 {
604 	unsigned long pfn;
605 	struct zone *zone = NULL;
606 	struct page *page;
607 	int i;
608 	for (pfn = start_pfn;
609 	     pfn < end_pfn;
610 	     pfn += MAX_ORDER_NR_PAGES) {
611 		i = 0;
612 		/* This is just a CONFIG_HOLES_IN_ZONE check.*/
613 		while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
614 			i++;
615 		if (i == MAX_ORDER_NR_PAGES)
616 			continue;
617 		page = pfn_to_page(pfn + i);
618 		if (zone && page_zone(page) != zone)
619 			return 0;
620 		zone = page_zone(page);
621 	}
622 	return 1;
623 }
624 
625 /*
626  * Scanning pfn is much easier than scanning lru list.
627  * Scan pfn from start to end and Find LRU page.
628  */
629 int scan_lru_pages(unsigned long start, unsigned long end)
630 {
631 	unsigned long pfn;
632 	struct page *page;
633 	for (pfn = start; pfn < end; pfn++) {
634 		if (pfn_valid(pfn)) {
635 			page = pfn_to_page(pfn);
636 			if (PageLRU(page))
637 				return pfn;
638 		}
639 	}
640 	return 0;
641 }
642 
643 static struct page *
644 hotremove_migrate_alloc(struct page *page, unsigned long private, int **x)
645 {
646 	/* This should be improooooved!! */
647 	return alloc_page(GFP_HIGHUSER_MOVABLE);
648 }
649 
650 #define NR_OFFLINE_AT_ONCE_PAGES	(256)
651 static int
652 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
653 {
654 	unsigned long pfn;
655 	struct page *page;
656 	int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
657 	int not_managed = 0;
658 	int ret = 0;
659 	LIST_HEAD(source);
660 
661 	for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
662 		if (!pfn_valid(pfn))
663 			continue;
664 		page = pfn_to_page(pfn);
665 		if (!page_count(page))
666 			continue;
667 		/*
668 		 * We can skip free pages. And we can only deal with pages on
669 		 * LRU.
670 		 */
671 		ret = isolate_lru_page(page);
672 		if (!ret) { /* Success */
673 			list_add_tail(&page->lru, &source);
674 			move_pages--;
675 		} else {
676 			/* Becasue we don't have big zone->lock. we should
677 			   check this again here. */
678 			if (page_count(page))
679 				not_managed++;
680 #ifdef CONFIG_DEBUG_VM
681 			printk(KERN_INFO "removing from LRU failed"
682 					 " %lx/%d/%lx\n",
683 				pfn, page_count(page), page->flags);
684 #endif
685 		}
686 	}
687 	ret = -EBUSY;
688 	if (not_managed) {
689 		if (!list_empty(&source))
690 			putback_lru_pages(&source);
691 		goto out;
692 	}
693 	ret = 0;
694 	if (list_empty(&source))
695 		goto out;
696 	/* this function returns # of failed pages */
697 	ret = migrate_pages(&source, hotremove_migrate_alloc, 0);
698 
699 out:
700 	return ret;
701 }
702 
703 /*
704  * remove from free_area[] and mark all as Reserved.
705  */
706 static int
707 offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
708 			void *data)
709 {
710 	__offline_isolated_pages(start, start + nr_pages);
711 	return 0;
712 }
713 
714 static void
715 offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
716 {
717 	walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
718 				offline_isolated_pages_cb);
719 }
720 
721 /*
722  * Check all pages in range, recoreded as memory resource, are isolated.
723  */
724 static int
725 check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
726 			void *data)
727 {
728 	int ret;
729 	long offlined = *(long *)data;
730 	ret = test_pages_isolated(start_pfn, start_pfn + nr_pages);
731 	offlined = nr_pages;
732 	if (!ret)
733 		*(long *)data += offlined;
734 	return ret;
735 }
736 
737 static long
738 check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
739 {
740 	long offlined = 0;
741 	int ret;
742 
743 	ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
744 			check_pages_isolated_cb);
745 	if (ret < 0)
746 		offlined = (long)ret;
747 	return offlined;
748 }
749 
750 int offline_pages(unsigned long start_pfn,
751 		  unsigned long end_pfn, unsigned long timeout)
752 {
753 	unsigned long pfn, nr_pages, expire;
754 	long offlined_pages;
755 	int ret, drain, retry_max, node;
756 	struct zone *zone;
757 	struct memory_notify arg;
758 
759 	BUG_ON(start_pfn >= end_pfn);
760 	/* at least, alignment against pageblock is necessary */
761 	if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
762 		return -EINVAL;
763 	if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
764 		return -EINVAL;
765 	/* This makes hotplug much easier...and readable.
766 	   we assume this for now. .*/
767 	if (!test_pages_in_a_zone(start_pfn, end_pfn))
768 		return -EINVAL;
769 
770 	lock_system_sleep();
771 
772 	zone = page_zone(pfn_to_page(start_pfn));
773 	node = zone_to_nid(zone);
774 	nr_pages = end_pfn - start_pfn;
775 
776 	/* set above range as isolated */
777 	ret = start_isolate_page_range(start_pfn, end_pfn);
778 	if (ret)
779 		goto out;
780 
781 	arg.start_pfn = start_pfn;
782 	arg.nr_pages = nr_pages;
783 	arg.status_change_nid = -1;
784 	if (nr_pages >= node_present_pages(node))
785 		arg.status_change_nid = node;
786 
787 	ret = memory_notify(MEM_GOING_OFFLINE, &arg);
788 	ret = notifier_to_errno(ret);
789 	if (ret)
790 		goto failed_removal;
791 
792 	pfn = start_pfn;
793 	expire = jiffies + timeout;
794 	drain = 0;
795 	retry_max = 5;
796 repeat:
797 	/* start memory hot removal */
798 	ret = -EAGAIN;
799 	if (time_after(jiffies, expire))
800 		goto failed_removal;
801 	ret = -EINTR;
802 	if (signal_pending(current))
803 		goto failed_removal;
804 	ret = 0;
805 	if (drain) {
806 		lru_add_drain_all();
807 		flush_scheduled_work();
808 		cond_resched();
809 		drain_all_pages();
810 	}
811 
812 	pfn = scan_lru_pages(start_pfn, end_pfn);
813 	if (pfn) { /* We have page on LRU */
814 		ret = do_migrate_range(pfn, end_pfn);
815 		if (!ret) {
816 			drain = 1;
817 			goto repeat;
818 		} else {
819 			if (ret < 0)
820 				if (--retry_max == 0)
821 					goto failed_removal;
822 			yield();
823 			drain = 1;
824 			goto repeat;
825 		}
826 	}
827 	/* drain all zone's lru pagevec, this is asyncronous... */
828 	lru_add_drain_all();
829 	flush_scheduled_work();
830 	yield();
831 	/* drain pcp pages , this is synchrouns. */
832 	drain_all_pages();
833 	/* check again */
834 	offlined_pages = check_pages_isolated(start_pfn, end_pfn);
835 	if (offlined_pages < 0) {
836 		ret = -EBUSY;
837 		goto failed_removal;
838 	}
839 	printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
840 	/* Ok, all of our target is islaoted.
841 	   We cannot do rollback at this point. */
842 	offline_isolated_pages(start_pfn, end_pfn);
843 	/* reset pagetype flags and makes migrate type to be MOVABLE */
844 	undo_isolate_page_range(start_pfn, end_pfn);
845 	/* removal success */
846 	zone->present_pages -= offlined_pages;
847 	zone->zone_pgdat->node_present_pages -= offlined_pages;
848 	totalram_pages -= offlined_pages;
849 
850 	setup_per_zone_wmarks();
851 	calculate_zone_inactive_ratio(zone);
852 
853 	vm_total_pages = nr_free_pagecache_pages();
854 	writeback_set_ratelimit();
855 
856 	memory_notify(MEM_OFFLINE, &arg);
857 	unlock_system_sleep();
858 	return 0;
859 
860 failed_removal:
861 	printk(KERN_INFO "memory offlining %lx to %lx failed\n",
862 		start_pfn, end_pfn);
863 	memory_notify(MEM_CANCEL_OFFLINE, &arg);
864 	/* pushback to free area */
865 	undo_isolate_page_range(start_pfn, end_pfn);
866 
867 out:
868 	unlock_system_sleep();
869 	return ret;
870 }
871 
872 int remove_memory(u64 start, u64 size)
873 {
874 	unsigned long start_pfn, end_pfn;
875 
876 	start_pfn = PFN_DOWN(start);
877 	end_pfn = start_pfn + PFN_DOWN(size);
878 	return offline_pages(start_pfn, end_pfn, 120 * HZ);
879 }
880 #else
881 int remove_memory(u64 start, u64 size)
882 {
883 	return -EINVAL;
884 }
885 #endif /* CONFIG_MEMORY_HOTREMOVE */
886 EXPORT_SYMBOL_GPL(remove_memory);
887