1 /* 2 * linux/mm/memory_hotplug.c 3 * 4 * Copyright (C) 5 */ 6 7 #include <linux/stddef.h> 8 #include <linux/mm.h> 9 #include <linux/swap.h> 10 #include <linux/interrupt.h> 11 #include <linux/pagemap.h> 12 #include <linux/compiler.h> 13 #include <linux/export.h> 14 #include <linux/pagevec.h> 15 #include <linux/writeback.h> 16 #include <linux/slab.h> 17 #include <linux/sysctl.h> 18 #include <linux/cpu.h> 19 #include <linux/memory.h> 20 #include <linux/memory_hotplug.h> 21 #include <linux/highmem.h> 22 #include <linux/vmalloc.h> 23 #include <linux/ioport.h> 24 #include <linux/delay.h> 25 #include <linux/migrate.h> 26 #include <linux/page-isolation.h> 27 #include <linux/pfn.h> 28 #include <linux/suspend.h> 29 #include <linux/mm_inline.h> 30 #include <linux/firmware-map.h> 31 #include <linux/stop_machine.h> 32 #include <linux/hugetlb.h> 33 #include <linux/memblock.h> 34 35 #include <asm/tlbflush.h> 36 37 #include "internal.h" 38 39 /* 40 * online_page_callback contains pointer to current page onlining function. 41 * Initially it is generic_online_page(). If it is required it could be 42 * changed by calling set_online_page_callback() for callback registration 43 * and restore_online_page_callback() for generic callback restore. 44 */ 45 46 static void generic_online_page(struct page *page); 47 48 static online_page_callback_t online_page_callback = generic_online_page; 49 static DEFINE_MUTEX(online_page_callback_lock); 50 51 /* The same as the cpu_hotplug lock, but for memory hotplug. */ 52 static struct { 53 struct task_struct *active_writer; 54 struct mutex lock; /* Synchronizes accesses to refcount, */ 55 /* 56 * Also blocks the new readers during 57 * an ongoing mem hotplug operation. 58 */ 59 int refcount; 60 61 #ifdef CONFIG_DEBUG_LOCK_ALLOC 62 struct lockdep_map dep_map; 63 #endif 64 } mem_hotplug = { 65 .active_writer = NULL, 66 .lock = __MUTEX_INITIALIZER(mem_hotplug.lock), 67 .refcount = 0, 68 #ifdef CONFIG_DEBUG_LOCK_ALLOC 69 .dep_map = {.name = "mem_hotplug.lock" }, 70 #endif 71 }; 72 73 /* Lockdep annotations for get/put_online_mems() and mem_hotplug_begin/end() */ 74 #define memhp_lock_acquire_read() lock_map_acquire_read(&mem_hotplug.dep_map) 75 #define memhp_lock_acquire() lock_map_acquire(&mem_hotplug.dep_map) 76 #define memhp_lock_release() lock_map_release(&mem_hotplug.dep_map) 77 78 void get_online_mems(void) 79 { 80 might_sleep(); 81 if (mem_hotplug.active_writer == current) 82 return; 83 memhp_lock_acquire_read(); 84 mutex_lock(&mem_hotplug.lock); 85 mem_hotplug.refcount++; 86 mutex_unlock(&mem_hotplug.lock); 87 88 } 89 90 void put_online_mems(void) 91 { 92 if (mem_hotplug.active_writer == current) 93 return; 94 mutex_lock(&mem_hotplug.lock); 95 96 if (WARN_ON(!mem_hotplug.refcount)) 97 mem_hotplug.refcount++; /* try to fix things up */ 98 99 if (!--mem_hotplug.refcount && unlikely(mem_hotplug.active_writer)) 100 wake_up_process(mem_hotplug.active_writer); 101 mutex_unlock(&mem_hotplug.lock); 102 memhp_lock_release(); 103 104 } 105 106 static void mem_hotplug_begin(void) 107 { 108 mem_hotplug.active_writer = current; 109 110 memhp_lock_acquire(); 111 for (;;) { 112 mutex_lock(&mem_hotplug.lock); 113 if (likely(!mem_hotplug.refcount)) 114 break; 115 __set_current_state(TASK_UNINTERRUPTIBLE); 116 mutex_unlock(&mem_hotplug.lock); 117 schedule(); 118 } 119 } 120 121 static void mem_hotplug_done(void) 122 { 123 mem_hotplug.active_writer = NULL; 124 mutex_unlock(&mem_hotplug.lock); 125 memhp_lock_release(); 126 } 127 128 /* add this memory to iomem resource */ 129 static struct resource *register_memory_resource(u64 start, u64 size) 130 { 131 struct resource *res; 132 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 133 BUG_ON(!res); 134 135 res->name = "System RAM"; 136 res->start = start; 137 res->end = start + size - 1; 138 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 139 if (request_resource(&iomem_resource, res) < 0) { 140 pr_debug("System RAM resource %pR cannot be added\n", res); 141 kfree(res); 142 res = NULL; 143 } 144 return res; 145 } 146 147 static void release_memory_resource(struct resource *res) 148 { 149 if (!res) 150 return; 151 release_resource(res); 152 kfree(res); 153 return; 154 } 155 156 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 157 void get_page_bootmem(unsigned long info, struct page *page, 158 unsigned long type) 159 { 160 page->lru.next = (struct list_head *) type; 161 SetPagePrivate(page); 162 set_page_private(page, info); 163 atomic_inc(&page->_count); 164 } 165 166 void put_page_bootmem(struct page *page) 167 { 168 unsigned long type; 169 170 type = (unsigned long) page->lru.next; 171 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || 172 type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); 173 174 if (atomic_dec_return(&page->_count) == 1) { 175 ClearPagePrivate(page); 176 set_page_private(page, 0); 177 INIT_LIST_HEAD(&page->lru); 178 free_reserved_page(page); 179 } 180 } 181 182 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE 183 #ifndef CONFIG_SPARSEMEM_VMEMMAP 184 static void register_page_bootmem_info_section(unsigned long start_pfn) 185 { 186 unsigned long *usemap, mapsize, section_nr, i; 187 struct mem_section *ms; 188 struct page *page, *memmap; 189 190 section_nr = pfn_to_section_nr(start_pfn); 191 ms = __nr_to_section(section_nr); 192 193 /* Get section's memmap address */ 194 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); 195 196 /* 197 * Get page for the memmap's phys address 198 * XXX: need more consideration for sparse_vmemmap... 199 */ 200 page = virt_to_page(memmap); 201 mapsize = sizeof(struct page) * PAGES_PER_SECTION; 202 mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; 203 204 /* remember memmap's page */ 205 for (i = 0; i < mapsize; i++, page++) 206 get_page_bootmem(section_nr, page, SECTION_INFO); 207 208 usemap = __nr_to_section(section_nr)->pageblock_flags; 209 page = virt_to_page(usemap); 210 211 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; 212 213 for (i = 0; i < mapsize; i++, page++) 214 get_page_bootmem(section_nr, page, MIX_SECTION_INFO); 215 216 } 217 #else /* CONFIG_SPARSEMEM_VMEMMAP */ 218 static void register_page_bootmem_info_section(unsigned long start_pfn) 219 { 220 unsigned long *usemap, mapsize, section_nr, i; 221 struct mem_section *ms; 222 struct page *page, *memmap; 223 224 if (!pfn_valid(start_pfn)) 225 return; 226 227 section_nr = pfn_to_section_nr(start_pfn); 228 ms = __nr_to_section(section_nr); 229 230 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); 231 232 register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); 233 234 usemap = __nr_to_section(section_nr)->pageblock_flags; 235 page = virt_to_page(usemap); 236 237 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; 238 239 for (i = 0; i < mapsize; i++, page++) 240 get_page_bootmem(section_nr, page, MIX_SECTION_INFO); 241 } 242 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 243 244 void register_page_bootmem_info_node(struct pglist_data *pgdat) 245 { 246 unsigned long i, pfn, end_pfn, nr_pages; 247 int node = pgdat->node_id; 248 struct page *page; 249 struct zone *zone; 250 251 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; 252 page = virt_to_page(pgdat); 253 254 for (i = 0; i < nr_pages; i++, page++) 255 get_page_bootmem(node, page, NODE_INFO); 256 257 zone = &pgdat->node_zones[0]; 258 for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { 259 if (zone_is_initialized(zone)) { 260 nr_pages = zone->wait_table_hash_nr_entries 261 * sizeof(wait_queue_head_t); 262 nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; 263 page = virt_to_page(zone->wait_table); 264 265 for (i = 0; i < nr_pages; i++, page++) 266 get_page_bootmem(node, page, NODE_INFO); 267 } 268 } 269 270 pfn = pgdat->node_start_pfn; 271 end_pfn = pgdat_end_pfn(pgdat); 272 273 /* register section info */ 274 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 275 /* 276 * Some platforms can assign the same pfn to multiple nodes - on 277 * node0 as well as nodeN. To avoid registering a pfn against 278 * multiple nodes we check that this pfn does not already 279 * reside in some other nodes. 280 */ 281 if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node)) 282 register_page_bootmem_info_section(pfn); 283 } 284 } 285 #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ 286 287 static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn, 288 unsigned long end_pfn) 289 { 290 unsigned long old_zone_end_pfn; 291 292 zone_span_writelock(zone); 293 294 old_zone_end_pfn = zone_end_pfn(zone); 295 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) 296 zone->zone_start_pfn = start_pfn; 297 298 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - 299 zone->zone_start_pfn; 300 301 zone_span_writeunlock(zone); 302 } 303 304 static void resize_zone(struct zone *zone, unsigned long start_pfn, 305 unsigned long end_pfn) 306 { 307 zone_span_writelock(zone); 308 309 if (end_pfn - start_pfn) { 310 zone->zone_start_pfn = start_pfn; 311 zone->spanned_pages = end_pfn - start_pfn; 312 } else { 313 /* 314 * make it consist as free_area_init_core(), 315 * if spanned_pages = 0, then keep start_pfn = 0 316 */ 317 zone->zone_start_pfn = 0; 318 zone->spanned_pages = 0; 319 } 320 321 zone_span_writeunlock(zone); 322 } 323 324 static void fix_zone_id(struct zone *zone, unsigned long start_pfn, 325 unsigned long end_pfn) 326 { 327 enum zone_type zid = zone_idx(zone); 328 int nid = zone->zone_pgdat->node_id; 329 unsigned long pfn; 330 331 for (pfn = start_pfn; pfn < end_pfn; pfn++) 332 set_page_links(pfn_to_page(pfn), zid, nid, pfn); 333 } 334 335 /* Can fail with -ENOMEM from allocating a wait table with vmalloc() or 336 * alloc_bootmem_node_nopanic()/memblock_virt_alloc_node_nopanic() */ 337 static int __ref ensure_zone_is_initialized(struct zone *zone, 338 unsigned long start_pfn, unsigned long num_pages) 339 { 340 if (!zone_is_initialized(zone)) 341 return init_currently_empty_zone(zone, start_pfn, num_pages, 342 MEMMAP_HOTPLUG); 343 return 0; 344 } 345 346 static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2, 347 unsigned long start_pfn, unsigned long end_pfn) 348 { 349 int ret; 350 unsigned long flags; 351 unsigned long z1_start_pfn; 352 353 ret = ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn); 354 if (ret) 355 return ret; 356 357 pgdat_resize_lock(z1->zone_pgdat, &flags); 358 359 /* can't move pfns which are higher than @z2 */ 360 if (end_pfn > zone_end_pfn(z2)) 361 goto out_fail; 362 /* the move out part must be at the left most of @z2 */ 363 if (start_pfn > z2->zone_start_pfn) 364 goto out_fail; 365 /* must included/overlap */ 366 if (end_pfn <= z2->zone_start_pfn) 367 goto out_fail; 368 369 /* use start_pfn for z1's start_pfn if z1 is empty */ 370 if (!zone_is_empty(z1)) 371 z1_start_pfn = z1->zone_start_pfn; 372 else 373 z1_start_pfn = start_pfn; 374 375 resize_zone(z1, z1_start_pfn, end_pfn); 376 resize_zone(z2, end_pfn, zone_end_pfn(z2)); 377 378 pgdat_resize_unlock(z1->zone_pgdat, &flags); 379 380 fix_zone_id(z1, start_pfn, end_pfn); 381 382 return 0; 383 out_fail: 384 pgdat_resize_unlock(z1->zone_pgdat, &flags); 385 return -1; 386 } 387 388 static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2, 389 unsigned long start_pfn, unsigned long end_pfn) 390 { 391 int ret; 392 unsigned long flags; 393 unsigned long z2_end_pfn; 394 395 ret = ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn); 396 if (ret) 397 return ret; 398 399 pgdat_resize_lock(z1->zone_pgdat, &flags); 400 401 /* can't move pfns which are lower than @z1 */ 402 if (z1->zone_start_pfn > start_pfn) 403 goto out_fail; 404 /* the move out part mast at the right most of @z1 */ 405 if (zone_end_pfn(z1) > end_pfn) 406 goto out_fail; 407 /* must included/overlap */ 408 if (start_pfn >= zone_end_pfn(z1)) 409 goto out_fail; 410 411 /* use end_pfn for z2's end_pfn if z2 is empty */ 412 if (!zone_is_empty(z2)) 413 z2_end_pfn = zone_end_pfn(z2); 414 else 415 z2_end_pfn = end_pfn; 416 417 resize_zone(z1, z1->zone_start_pfn, start_pfn); 418 resize_zone(z2, start_pfn, z2_end_pfn); 419 420 pgdat_resize_unlock(z1->zone_pgdat, &flags); 421 422 fix_zone_id(z2, start_pfn, end_pfn); 423 424 return 0; 425 out_fail: 426 pgdat_resize_unlock(z1->zone_pgdat, &flags); 427 return -1; 428 } 429 430 static void __meminit grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, 431 unsigned long end_pfn) 432 { 433 unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat); 434 435 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) 436 pgdat->node_start_pfn = start_pfn; 437 438 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - 439 pgdat->node_start_pfn; 440 } 441 442 static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) 443 { 444 struct pglist_data *pgdat = zone->zone_pgdat; 445 int nr_pages = PAGES_PER_SECTION; 446 int nid = pgdat->node_id; 447 int zone_type; 448 unsigned long flags; 449 int ret; 450 451 zone_type = zone - pgdat->node_zones; 452 ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages); 453 if (ret) 454 return ret; 455 456 pgdat_resize_lock(zone->zone_pgdat, &flags); 457 grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); 458 grow_pgdat_span(zone->zone_pgdat, phys_start_pfn, 459 phys_start_pfn + nr_pages); 460 pgdat_resize_unlock(zone->zone_pgdat, &flags); 461 memmap_init_zone(nr_pages, nid, zone_type, 462 phys_start_pfn, MEMMAP_HOTPLUG); 463 return 0; 464 } 465 466 static int __meminit __add_section(int nid, struct zone *zone, 467 unsigned long phys_start_pfn) 468 { 469 int ret; 470 471 if (pfn_valid(phys_start_pfn)) 472 return -EEXIST; 473 474 ret = sparse_add_one_section(zone, phys_start_pfn); 475 476 if (ret < 0) 477 return ret; 478 479 ret = __add_zone(zone, phys_start_pfn); 480 481 if (ret < 0) 482 return ret; 483 484 return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); 485 } 486 487 /* 488 * Reasonably generic function for adding memory. It is 489 * expected that archs that support memory hotplug will 490 * call this function after deciding the zone to which to 491 * add the new pages. 492 */ 493 int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, 494 unsigned long nr_pages) 495 { 496 unsigned long i; 497 int err = 0; 498 int start_sec, end_sec; 499 /* during initialize mem_map, align hot-added range to section */ 500 start_sec = pfn_to_section_nr(phys_start_pfn); 501 end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); 502 503 for (i = start_sec; i <= end_sec; i++) { 504 err = __add_section(nid, zone, i << PFN_SECTION_SHIFT); 505 506 /* 507 * EEXIST is finally dealt with by ioresource collision 508 * check. see add_memory() => register_memory_resource() 509 * Warning will be printed if there is collision. 510 */ 511 if (err && (err != -EEXIST)) 512 break; 513 err = 0; 514 } 515 516 return err; 517 } 518 EXPORT_SYMBOL_GPL(__add_pages); 519 520 #ifdef CONFIG_MEMORY_HOTREMOVE 521 /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ 522 static int find_smallest_section_pfn(int nid, struct zone *zone, 523 unsigned long start_pfn, 524 unsigned long end_pfn) 525 { 526 struct mem_section *ms; 527 528 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) { 529 ms = __pfn_to_section(start_pfn); 530 531 if (unlikely(!valid_section(ms))) 532 continue; 533 534 if (unlikely(pfn_to_nid(start_pfn) != nid)) 535 continue; 536 537 if (zone && zone != page_zone(pfn_to_page(start_pfn))) 538 continue; 539 540 return start_pfn; 541 } 542 543 return 0; 544 } 545 546 /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ 547 static int find_biggest_section_pfn(int nid, struct zone *zone, 548 unsigned long start_pfn, 549 unsigned long end_pfn) 550 { 551 struct mem_section *ms; 552 unsigned long pfn; 553 554 /* pfn is the end pfn of a memory section. */ 555 pfn = end_pfn - 1; 556 for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) { 557 ms = __pfn_to_section(pfn); 558 559 if (unlikely(!valid_section(ms))) 560 continue; 561 562 if (unlikely(pfn_to_nid(pfn) != nid)) 563 continue; 564 565 if (zone && zone != page_zone(pfn_to_page(pfn))) 566 continue; 567 568 return pfn; 569 } 570 571 return 0; 572 } 573 574 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, 575 unsigned long end_pfn) 576 { 577 unsigned long zone_start_pfn = zone->zone_start_pfn; 578 unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */ 579 unsigned long zone_end_pfn = z; 580 unsigned long pfn; 581 struct mem_section *ms; 582 int nid = zone_to_nid(zone); 583 584 zone_span_writelock(zone); 585 if (zone_start_pfn == start_pfn) { 586 /* 587 * If the section is smallest section in the zone, it need 588 * shrink zone->zone_start_pfn and zone->zone_spanned_pages. 589 * In this case, we find second smallest valid mem_section 590 * for shrinking zone. 591 */ 592 pfn = find_smallest_section_pfn(nid, zone, end_pfn, 593 zone_end_pfn); 594 if (pfn) { 595 zone->zone_start_pfn = pfn; 596 zone->spanned_pages = zone_end_pfn - pfn; 597 } 598 } else if (zone_end_pfn == end_pfn) { 599 /* 600 * If the section is biggest section in the zone, it need 601 * shrink zone->spanned_pages. 602 * In this case, we find second biggest valid mem_section for 603 * shrinking zone. 604 */ 605 pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn, 606 start_pfn); 607 if (pfn) 608 zone->spanned_pages = pfn - zone_start_pfn + 1; 609 } 610 611 /* 612 * The section is not biggest or smallest mem_section in the zone, it 613 * only creates a hole in the zone. So in this case, we need not 614 * change the zone. But perhaps, the zone has only hole data. Thus 615 * it check the zone has only hole or not. 616 */ 617 pfn = zone_start_pfn; 618 for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) { 619 ms = __pfn_to_section(pfn); 620 621 if (unlikely(!valid_section(ms))) 622 continue; 623 624 if (page_zone(pfn_to_page(pfn)) != zone) 625 continue; 626 627 /* If the section is current section, it continues the loop */ 628 if (start_pfn == pfn) 629 continue; 630 631 /* If we find valid section, we have nothing to do */ 632 zone_span_writeunlock(zone); 633 return; 634 } 635 636 /* The zone has no valid section */ 637 zone->zone_start_pfn = 0; 638 zone->spanned_pages = 0; 639 zone_span_writeunlock(zone); 640 } 641 642 static void shrink_pgdat_span(struct pglist_data *pgdat, 643 unsigned long start_pfn, unsigned long end_pfn) 644 { 645 unsigned long pgdat_start_pfn = pgdat->node_start_pfn; 646 unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */ 647 unsigned long pgdat_end_pfn = p; 648 unsigned long pfn; 649 struct mem_section *ms; 650 int nid = pgdat->node_id; 651 652 if (pgdat_start_pfn == start_pfn) { 653 /* 654 * If the section is smallest section in the pgdat, it need 655 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages. 656 * In this case, we find second smallest valid mem_section 657 * for shrinking zone. 658 */ 659 pfn = find_smallest_section_pfn(nid, NULL, end_pfn, 660 pgdat_end_pfn); 661 if (pfn) { 662 pgdat->node_start_pfn = pfn; 663 pgdat->node_spanned_pages = pgdat_end_pfn - pfn; 664 } 665 } else if (pgdat_end_pfn == end_pfn) { 666 /* 667 * If the section is biggest section in the pgdat, it need 668 * shrink pgdat->node_spanned_pages. 669 * In this case, we find second biggest valid mem_section for 670 * shrinking zone. 671 */ 672 pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn, 673 start_pfn); 674 if (pfn) 675 pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1; 676 } 677 678 /* 679 * If the section is not biggest or smallest mem_section in the pgdat, 680 * it only creates a hole in the pgdat. So in this case, we need not 681 * change the pgdat. 682 * But perhaps, the pgdat has only hole data. Thus it check the pgdat 683 * has only hole or not. 684 */ 685 pfn = pgdat_start_pfn; 686 for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) { 687 ms = __pfn_to_section(pfn); 688 689 if (unlikely(!valid_section(ms))) 690 continue; 691 692 if (pfn_to_nid(pfn) != nid) 693 continue; 694 695 /* If the section is current section, it continues the loop */ 696 if (start_pfn == pfn) 697 continue; 698 699 /* If we find valid section, we have nothing to do */ 700 return; 701 } 702 703 /* The pgdat has no valid section */ 704 pgdat->node_start_pfn = 0; 705 pgdat->node_spanned_pages = 0; 706 } 707 708 static void __remove_zone(struct zone *zone, unsigned long start_pfn) 709 { 710 struct pglist_data *pgdat = zone->zone_pgdat; 711 int nr_pages = PAGES_PER_SECTION; 712 int zone_type; 713 unsigned long flags; 714 715 zone_type = zone - pgdat->node_zones; 716 717 pgdat_resize_lock(zone->zone_pgdat, &flags); 718 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); 719 shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages); 720 pgdat_resize_unlock(zone->zone_pgdat, &flags); 721 } 722 723 static int __remove_section(struct zone *zone, struct mem_section *ms) 724 { 725 unsigned long start_pfn; 726 int scn_nr; 727 int ret = -EINVAL; 728 729 if (!valid_section(ms)) 730 return ret; 731 732 ret = unregister_memory_section(ms); 733 if (ret) 734 return ret; 735 736 scn_nr = __section_nr(ms); 737 start_pfn = section_nr_to_pfn(scn_nr); 738 __remove_zone(zone, start_pfn); 739 740 sparse_remove_one_section(zone, ms); 741 return 0; 742 } 743 744 /** 745 * __remove_pages() - remove sections of pages from a zone 746 * @zone: zone from which pages need to be removed 747 * @phys_start_pfn: starting pageframe (must be aligned to start of a section) 748 * @nr_pages: number of pages to remove (must be multiple of section size) 749 * 750 * Generic helper function to remove section mappings and sysfs entries 751 * for the section of the memory we are removing. Caller needs to make 752 * sure that pages are marked reserved and zones are adjust properly by 753 * calling offline_pages(). 754 */ 755 int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, 756 unsigned long nr_pages) 757 { 758 unsigned long i; 759 int sections_to_remove; 760 resource_size_t start, size; 761 int ret = 0; 762 763 /* 764 * We can only remove entire sections 765 */ 766 BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); 767 BUG_ON(nr_pages % PAGES_PER_SECTION); 768 769 start = phys_start_pfn << PAGE_SHIFT; 770 size = nr_pages * PAGE_SIZE; 771 ret = release_mem_region_adjustable(&iomem_resource, start, size); 772 if (ret) { 773 resource_size_t endres = start + size - 1; 774 775 pr_warn("Unable to release resource <%pa-%pa> (%d)\n", 776 &start, &endres, ret); 777 } 778 779 sections_to_remove = nr_pages / PAGES_PER_SECTION; 780 for (i = 0; i < sections_to_remove; i++) { 781 unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; 782 ret = __remove_section(zone, __pfn_to_section(pfn)); 783 if (ret) 784 break; 785 } 786 return ret; 787 } 788 EXPORT_SYMBOL_GPL(__remove_pages); 789 #endif /* CONFIG_MEMORY_HOTREMOVE */ 790 791 int set_online_page_callback(online_page_callback_t callback) 792 { 793 int rc = -EINVAL; 794 795 get_online_mems(); 796 mutex_lock(&online_page_callback_lock); 797 798 if (online_page_callback == generic_online_page) { 799 online_page_callback = callback; 800 rc = 0; 801 } 802 803 mutex_unlock(&online_page_callback_lock); 804 put_online_mems(); 805 806 return rc; 807 } 808 EXPORT_SYMBOL_GPL(set_online_page_callback); 809 810 int restore_online_page_callback(online_page_callback_t callback) 811 { 812 int rc = -EINVAL; 813 814 get_online_mems(); 815 mutex_lock(&online_page_callback_lock); 816 817 if (online_page_callback == callback) { 818 online_page_callback = generic_online_page; 819 rc = 0; 820 } 821 822 mutex_unlock(&online_page_callback_lock); 823 put_online_mems(); 824 825 return rc; 826 } 827 EXPORT_SYMBOL_GPL(restore_online_page_callback); 828 829 void __online_page_set_limits(struct page *page) 830 { 831 } 832 EXPORT_SYMBOL_GPL(__online_page_set_limits); 833 834 void __online_page_increment_counters(struct page *page) 835 { 836 adjust_managed_page_count(page, 1); 837 } 838 EXPORT_SYMBOL_GPL(__online_page_increment_counters); 839 840 void __online_page_free(struct page *page) 841 { 842 __free_reserved_page(page); 843 } 844 EXPORT_SYMBOL_GPL(__online_page_free); 845 846 static void generic_online_page(struct page *page) 847 { 848 __online_page_set_limits(page); 849 __online_page_increment_counters(page); 850 __online_page_free(page); 851 } 852 853 static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, 854 void *arg) 855 { 856 unsigned long i; 857 unsigned long onlined_pages = *(unsigned long *)arg; 858 struct page *page; 859 if (PageReserved(pfn_to_page(start_pfn))) 860 for (i = 0; i < nr_pages; i++) { 861 page = pfn_to_page(start_pfn + i); 862 (*online_page_callback)(page); 863 onlined_pages++; 864 } 865 *(unsigned long *)arg = onlined_pages; 866 return 0; 867 } 868 869 #ifdef CONFIG_MOVABLE_NODE 870 /* 871 * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have 872 * normal memory. 873 */ 874 static bool can_online_high_movable(struct zone *zone) 875 { 876 return true; 877 } 878 #else /* CONFIG_MOVABLE_NODE */ 879 /* ensure every online node has NORMAL memory */ 880 static bool can_online_high_movable(struct zone *zone) 881 { 882 return node_state(zone_to_nid(zone), N_NORMAL_MEMORY); 883 } 884 #endif /* CONFIG_MOVABLE_NODE */ 885 886 /* check which state of node_states will be changed when online memory */ 887 static void node_states_check_changes_online(unsigned long nr_pages, 888 struct zone *zone, struct memory_notify *arg) 889 { 890 int nid = zone_to_nid(zone); 891 enum zone_type zone_last = ZONE_NORMAL; 892 893 /* 894 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] 895 * contains nodes which have zones of 0...ZONE_NORMAL, 896 * set zone_last to ZONE_NORMAL. 897 * 898 * If we don't have HIGHMEM nor movable node, 899 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of 900 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. 901 */ 902 if (N_MEMORY == N_NORMAL_MEMORY) 903 zone_last = ZONE_MOVABLE; 904 905 /* 906 * if the memory to be online is in a zone of 0...zone_last, and 907 * the zones of 0...zone_last don't have memory before online, we will 908 * need to set the node to node_states[N_NORMAL_MEMORY] after 909 * the memory is online. 910 */ 911 if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY)) 912 arg->status_change_nid_normal = nid; 913 else 914 arg->status_change_nid_normal = -1; 915 916 #ifdef CONFIG_HIGHMEM 917 /* 918 * If we have movable node, node_states[N_HIGH_MEMORY] 919 * contains nodes which have zones of 0...ZONE_HIGHMEM, 920 * set zone_last to ZONE_HIGHMEM. 921 * 922 * If we don't have movable node, node_states[N_NORMAL_MEMORY] 923 * contains nodes which have zones of 0...ZONE_MOVABLE, 924 * set zone_last to ZONE_MOVABLE. 925 */ 926 zone_last = ZONE_HIGHMEM; 927 if (N_MEMORY == N_HIGH_MEMORY) 928 zone_last = ZONE_MOVABLE; 929 930 if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY)) 931 arg->status_change_nid_high = nid; 932 else 933 arg->status_change_nid_high = -1; 934 #else 935 arg->status_change_nid_high = arg->status_change_nid_normal; 936 #endif 937 938 /* 939 * if the node don't have memory befor online, we will need to 940 * set the node to node_states[N_MEMORY] after the memory 941 * is online. 942 */ 943 if (!node_state(nid, N_MEMORY)) 944 arg->status_change_nid = nid; 945 else 946 arg->status_change_nid = -1; 947 } 948 949 static void node_states_set_node(int node, struct memory_notify *arg) 950 { 951 if (arg->status_change_nid_normal >= 0) 952 node_set_state(node, N_NORMAL_MEMORY); 953 954 if (arg->status_change_nid_high >= 0) 955 node_set_state(node, N_HIGH_MEMORY); 956 957 node_set_state(node, N_MEMORY); 958 } 959 960 961 int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) 962 { 963 unsigned long flags; 964 unsigned long onlined_pages = 0; 965 struct zone *zone; 966 int need_zonelists_rebuild = 0; 967 int nid; 968 int ret; 969 struct memory_notify arg; 970 971 mem_hotplug_begin(); 972 /* 973 * This doesn't need a lock to do pfn_to_page(). 974 * The section can't be removed here because of the 975 * memory_block->state_mutex. 976 */ 977 zone = page_zone(pfn_to_page(pfn)); 978 979 ret = -EINVAL; 980 if ((zone_idx(zone) > ZONE_NORMAL || 981 online_type == MMOP_ONLINE_MOVABLE) && 982 !can_online_high_movable(zone)) 983 goto out; 984 985 if (online_type == MMOP_ONLINE_KERNEL && 986 zone_idx(zone) == ZONE_MOVABLE) { 987 if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) 988 goto out; 989 } 990 if (online_type == MMOP_ONLINE_MOVABLE && 991 zone_idx(zone) == ZONE_MOVABLE - 1) { 992 if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) 993 goto out; 994 } 995 996 /* Previous code may changed the zone of the pfn range */ 997 zone = page_zone(pfn_to_page(pfn)); 998 999 arg.start_pfn = pfn; 1000 arg.nr_pages = nr_pages; 1001 node_states_check_changes_online(nr_pages, zone, &arg); 1002 1003 nid = pfn_to_nid(pfn); 1004 1005 ret = memory_notify(MEM_GOING_ONLINE, &arg); 1006 ret = notifier_to_errno(ret); 1007 if (ret) { 1008 memory_notify(MEM_CANCEL_ONLINE, &arg); 1009 goto out; 1010 } 1011 /* 1012 * If this zone is not populated, then it is not in zonelist. 1013 * This means the page allocator ignores this zone. 1014 * So, zonelist must be updated after online. 1015 */ 1016 mutex_lock(&zonelists_mutex); 1017 if (!populated_zone(zone)) { 1018 need_zonelists_rebuild = 1; 1019 build_all_zonelists(NULL, zone); 1020 } 1021 1022 ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, 1023 online_pages_range); 1024 if (ret) { 1025 if (need_zonelists_rebuild) 1026 zone_pcp_reset(zone); 1027 mutex_unlock(&zonelists_mutex); 1028 printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n", 1029 (unsigned long long) pfn << PAGE_SHIFT, 1030 (((unsigned long long) pfn + nr_pages) 1031 << PAGE_SHIFT) - 1); 1032 memory_notify(MEM_CANCEL_ONLINE, &arg); 1033 goto out; 1034 } 1035 1036 zone->present_pages += onlined_pages; 1037 1038 pgdat_resize_lock(zone->zone_pgdat, &flags); 1039 zone->zone_pgdat->node_present_pages += onlined_pages; 1040 pgdat_resize_unlock(zone->zone_pgdat, &flags); 1041 1042 if (onlined_pages) { 1043 node_states_set_node(zone_to_nid(zone), &arg); 1044 if (need_zonelists_rebuild) 1045 build_all_zonelists(NULL, NULL); 1046 else 1047 zone_pcp_update(zone); 1048 } 1049 1050 mutex_unlock(&zonelists_mutex); 1051 1052 init_per_zone_wmark_min(); 1053 1054 if (onlined_pages) 1055 kswapd_run(zone_to_nid(zone)); 1056 1057 vm_total_pages = nr_free_pagecache_pages(); 1058 1059 writeback_set_ratelimit(); 1060 1061 if (onlined_pages) 1062 memory_notify(MEM_ONLINE, &arg); 1063 out: 1064 mem_hotplug_done(); 1065 return ret; 1066 } 1067 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 1068 1069 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ 1070 static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) 1071 { 1072 struct pglist_data *pgdat; 1073 unsigned long zones_size[MAX_NR_ZONES] = {0}; 1074 unsigned long zholes_size[MAX_NR_ZONES] = {0}; 1075 unsigned long start_pfn = PFN_DOWN(start); 1076 1077 pgdat = NODE_DATA(nid); 1078 if (!pgdat) { 1079 pgdat = arch_alloc_nodedata(nid); 1080 if (!pgdat) 1081 return NULL; 1082 1083 arch_refresh_nodedata(nid, pgdat); 1084 } 1085 1086 /* we can use NODE_DATA(nid) from here */ 1087 1088 /* init node's zones as empty zones, we don't have any present pages.*/ 1089 free_area_init_node(nid, zones_size, start_pfn, zholes_size); 1090 1091 /* 1092 * The node we allocated has no zone fallback lists. For avoiding 1093 * to access not-initialized zonelist, build here. 1094 */ 1095 mutex_lock(&zonelists_mutex); 1096 build_all_zonelists(pgdat, NULL); 1097 mutex_unlock(&zonelists_mutex); 1098 1099 return pgdat; 1100 } 1101 1102 static void rollback_node_hotadd(int nid, pg_data_t *pgdat) 1103 { 1104 arch_refresh_nodedata(nid, NULL); 1105 arch_free_nodedata(pgdat); 1106 return; 1107 } 1108 1109 1110 /** 1111 * try_online_node - online a node if offlined 1112 * 1113 * called by cpu_up() to online a node without onlined memory. 1114 */ 1115 int try_online_node(int nid) 1116 { 1117 pg_data_t *pgdat; 1118 int ret; 1119 1120 if (node_online(nid)) 1121 return 0; 1122 1123 mem_hotplug_begin(); 1124 pgdat = hotadd_new_pgdat(nid, 0); 1125 if (!pgdat) { 1126 pr_err("Cannot online node %d due to NULL pgdat\n", nid); 1127 ret = -ENOMEM; 1128 goto out; 1129 } 1130 node_set_online(nid); 1131 ret = register_one_node(nid); 1132 BUG_ON(ret); 1133 1134 if (pgdat->node_zonelists->_zonerefs->zone == NULL) { 1135 mutex_lock(&zonelists_mutex); 1136 build_all_zonelists(NULL, NULL); 1137 mutex_unlock(&zonelists_mutex); 1138 } 1139 1140 out: 1141 mem_hotplug_done(); 1142 return ret; 1143 } 1144 1145 static int check_hotplug_memory_range(u64 start, u64 size) 1146 { 1147 u64 start_pfn = PFN_DOWN(start); 1148 u64 nr_pages = size >> PAGE_SHIFT; 1149 1150 /* Memory range must be aligned with section */ 1151 if ((start_pfn & ~PAGE_SECTION_MASK) || 1152 (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) { 1153 pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n", 1154 (unsigned long long)start, 1155 (unsigned long long)size); 1156 return -EINVAL; 1157 } 1158 1159 return 0; 1160 } 1161 1162 /* 1163 * If movable zone has already been setup, newly added memory should be check. 1164 * If its address is higher than movable zone, it should be added as movable. 1165 * Without this check, movable zone may overlap with other zone. 1166 */ 1167 static int should_add_memory_movable(int nid, u64 start, u64 size) 1168 { 1169 unsigned long start_pfn = start >> PAGE_SHIFT; 1170 pg_data_t *pgdat = NODE_DATA(nid); 1171 struct zone *movable_zone = pgdat->node_zones + ZONE_MOVABLE; 1172 1173 if (zone_is_empty(movable_zone)) 1174 return 0; 1175 1176 if (movable_zone->zone_start_pfn <= start_pfn) 1177 return 1; 1178 1179 return 0; 1180 } 1181 1182 int zone_for_memory(int nid, u64 start, u64 size, int zone_default) 1183 { 1184 if (should_add_memory_movable(nid, start, size)) 1185 return ZONE_MOVABLE; 1186 1187 return zone_default; 1188 } 1189 1190 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ 1191 int __ref add_memory(int nid, u64 start, u64 size) 1192 { 1193 pg_data_t *pgdat = NULL; 1194 bool new_pgdat; 1195 bool new_node; 1196 struct resource *res; 1197 int ret; 1198 1199 ret = check_hotplug_memory_range(start, size); 1200 if (ret) 1201 return ret; 1202 1203 res = register_memory_resource(start, size); 1204 ret = -EEXIST; 1205 if (!res) 1206 return ret; 1207 1208 { /* Stupid hack to suppress address-never-null warning */ 1209 void *p = NODE_DATA(nid); 1210 new_pgdat = !p; 1211 } 1212 1213 mem_hotplug_begin(); 1214 1215 new_node = !node_online(nid); 1216 if (new_node) { 1217 pgdat = hotadd_new_pgdat(nid, start); 1218 ret = -ENOMEM; 1219 if (!pgdat) 1220 goto error; 1221 } 1222 1223 /* call arch's memory hotadd */ 1224 ret = arch_add_memory(nid, start, size); 1225 1226 if (ret < 0) 1227 goto error; 1228 1229 /* we online node here. we can't roll back from here. */ 1230 node_set_online(nid); 1231 1232 if (new_node) { 1233 ret = register_one_node(nid); 1234 /* 1235 * If sysfs file of new node can't create, cpu on the node 1236 * can't be hot-added. There is no rollback way now. 1237 * So, check by BUG_ON() to catch it reluctantly.. 1238 */ 1239 BUG_ON(ret); 1240 } 1241 1242 /* create new memmap entry */ 1243 firmware_map_add_hotplug(start, start + size, "System RAM"); 1244 1245 goto out; 1246 1247 error: 1248 /* rollback pgdat allocation and others */ 1249 if (new_pgdat) 1250 rollback_node_hotadd(nid, pgdat); 1251 release_memory_resource(res); 1252 1253 out: 1254 mem_hotplug_done(); 1255 return ret; 1256 } 1257 EXPORT_SYMBOL_GPL(add_memory); 1258 1259 #ifdef CONFIG_MEMORY_HOTREMOVE 1260 /* 1261 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy 1262 * set and the size of the free page is given by page_order(). Using this, 1263 * the function determines if the pageblock contains only free pages. 1264 * Due to buddy contraints, a free page at least the size of a pageblock will 1265 * be located at the start of the pageblock 1266 */ 1267 static inline int pageblock_free(struct page *page) 1268 { 1269 return PageBuddy(page) && page_order(page) >= pageblock_order; 1270 } 1271 1272 /* Return the start of the next active pageblock after a given page */ 1273 static struct page *next_active_pageblock(struct page *page) 1274 { 1275 /* Ensure the starting page is pageblock-aligned */ 1276 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); 1277 1278 /* If the entire pageblock is free, move to the end of free page */ 1279 if (pageblock_free(page)) { 1280 int order; 1281 /* be careful. we don't have locks, page_order can be changed.*/ 1282 order = page_order(page); 1283 if ((order < MAX_ORDER) && (order >= pageblock_order)) 1284 return page + (1 << order); 1285 } 1286 1287 return page + pageblock_nr_pages; 1288 } 1289 1290 /* Checks if this range of memory is likely to be hot-removable. */ 1291 int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) 1292 { 1293 struct page *page = pfn_to_page(start_pfn); 1294 struct page *end_page = page + nr_pages; 1295 1296 /* Check the starting page of each pageblock within the range */ 1297 for (; page < end_page; page = next_active_pageblock(page)) { 1298 if (!is_pageblock_removable_nolock(page)) 1299 return 0; 1300 cond_resched(); 1301 } 1302 1303 /* All pageblocks in the memory block are likely to be hot-removable */ 1304 return 1; 1305 } 1306 1307 /* 1308 * Confirm all pages in a range [start, end) is belongs to the same zone. 1309 */ 1310 static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) 1311 { 1312 unsigned long pfn; 1313 struct zone *zone = NULL; 1314 struct page *page; 1315 int i; 1316 for (pfn = start_pfn; 1317 pfn < end_pfn; 1318 pfn += MAX_ORDER_NR_PAGES) { 1319 i = 0; 1320 /* This is just a CONFIG_HOLES_IN_ZONE check.*/ 1321 while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) 1322 i++; 1323 if (i == MAX_ORDER_NR_PAGES) 1324 continue; 1325 page = pfn_to_page(pfn + i); 1326 if (zone && page_zone(page) != zone) 1327 return 0; 1328 zone = page_zone(page); 1329 } 1330 return 1; 1331 } 1332 1333 /* 1334 * Scan pfn range [start,end) to find movable/migratable pages (LRU pages 1335 * and hugepages). We scan pfn because it's much easier than scanning over 1336 * linked list. This function returns the pfn of the first found movable 1337 * page if it's found, otherwise 0. 1338 */ 1339 static unsigned long scan_movable_pages(unsigned long start, unsigned long end) 1340 { 1341 unsigned long pfn; 1342 struct page *page; 1343 for (pfn = start; pfn < end; pfn++) { 1344 if (pfn_valid(pfn)) { 1345 page = pfn_to_page(pfn); 1346 if (PageLRU(page)) 1347 return pfn; 1348 if (PageHuge(page)) { 1349 if (is_hugepage_active(page)) 1350 return pfn; 1351 else 1352 pfn = round_up(pfn + 1, 1353 1 << compound_order(page)) - 1; 1354 } 1355 } 1356 } 1357 return 0; 1358 } 1359 1360 #define NR_OFFLINE_AT_ONCE_PAGES (256) 1361 static int 1362 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) 1363 { 1364 unsigned long pfn; 1365 struct page *page; 1366 int move_pages = NR_OFFLINE_AT_ONCE_PAGES; 1367 int not_managed = 0; 1368 int ret = 0; 1369 LIST_HEAD(source); 1370 1371 for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { 1372 if (!pfn_valid(pfn)) 1373 continue; 1374 page = pfn_to_page(pfn); 1375 1376 if (PageHuge(page)) { 1377 struct page *head = compound_head(page); 1378 pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1; 1379 if (compound_order(head) > PFN_SECTION_SHIFT) { 1380 ret = -EBUSY; 1381 break; 1382 } 1383 if (isolate_huge_page(page, &source)) 1384 move_pages -= 1 << compound_order(head); 1385 continue; 1386 } 1387 1388 if (!get_page_unless_zero(page)) 1389 continue; 1390 /* 1391 * We can skip free pages. And we can only deal with pages on 1392 * LRU. 1393 */ 1394 ret = isolate_lru_page(page); 1395 if (!ret) { /* Success */ 1396 put_page(page); 1397 list_add_tail(&page->lru, &source); 1398 move_pages--; 1399 inc_zone_page_state(page, NR_ISOLATED_ANON + 1400 page_is_file_cache(page)); 1401 1402 } else { 1403 #ifdef CONFIG_DEBUG_VM 1404 printk(KERN_ALERT "removing pfn %lx from LRU failed\n", 1405 pfn); 1406 dump_page(page, "failed to remove from LRU"); 1407 #endif 1408 put_page(page); 1409 /* Because we don't have big zone->lock. we should 1410 check this again here. */ 1411 if (page_count(page)) { 1412 not_managed++; 1413 ret = -EBUSY; 1414 break; 1415 } 1416 } 1417 } 1418 if (!list_empty(&source)) { 1419 if (not_managed) { 1420 putback_movable_pages(&source); 1421 goto out; 1422 } 1423 1424 /* 1425 * alloc_migrate_target should be improooooved!! 1426 * migrate_pages returns # of failed pages. 1427 */ 1428 ret = migrate_pages(&source, alloc_migrate_target, NULL, 0, 1429 MIGRATE_SYNC, MR_MEMORY_HOTPLUG); 1430 if (ret) 1431 putback_movable_pages(&source); 1432 } 1433 out: 1434 return ret; 1435 } 1436 1437 /* 1438 * remove from free_area[] and mark all as Reserved. 1439 */ 1440 static int 1441 offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, 1442 void *data) 1443 { 1444 __offline_isolated_pages(start, start + nr_pages); 1445 return 0; 1446 } 1447 1448 static void 1449 offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 1450 { 1451 walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, 1452 offline_isolated_pages_cb); 1453 } 1454 1455 /* 1456 * Check all pages in range, recoreded as memory resource, are isolated. 1457 */ 1458 static int 1459 check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, 1460 void *data) 1461 { 1462 int ret; 1463 long offlined = *(long *)data; 1464 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true); 1465 offlined = nr_pages; 1466 if (!ret) 1467 *(long *)data += offlined; 1468 return ret; 1469 } 1470 1471 static long 1472 check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) 1473 { 1474 long offlined = 0; 1475 int ret; 1476 1477 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, 1478 check_pages_isolated_cb); 1479 if (ret < 0) 1480 offlined = (long)ret; 1481 return offlined; 1482 } 1483 1484 #ifdef CONFIG_MOVABLE_NODE 1485 /* 1486 * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have 1487 * normal memory. 1488 */ 1489 static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) 1490 { 1491 return true; 1492 } 1493 #else /* CONFIG_MOVABLE_NODE */ 1494 /* ensure the node has NORMAL memory if it is still online */ 1495 static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) 1496 { 1497 struct pglist_data *pgdat = zone->zone_pgdat; 1498 unsigned long present_pages = 0; 1499 enum zone_type zt; 1500 1501 for (zt = 0; zt <= ZONE_NORMAL; zt++) 1502 present_pages += pgdat->node_zones[zt].present_pages; 1503 1504 if (present_pages > nr_pages) 1505 return true; 1506 1507 present_pages = 0; 1508 for (; zt <= ZONE_MOVABLE; zt++) 1509 present_pages += pgdat->node_zones[zt].present_pages; 1510 1511 /* 1512 * we can't offline the last normal memory until all 1513 * higher memory is offlined. 1514 */ 1515 return present_pages == 0; 1516 } 1517 #endif /* CONFIG_MOVABLE_NODE */ 1518 1519 static int __init cmdline_parse_movable_node(char *p) 1520 { 1521 #ifdef CONFIG_MOVABLE_NODE 1522 /* 1523 * Memory used by the kernel cannot be hot-removed because Linux 1524 * cannot migrate the kernel pages. When memory hotplug is 1525 * enabled, we should prevent memblock from allocating memory 1526 * for the kernel. 1527 * 1528 * ACPI SRAT records all hotpluggable memory ranges. But before 1529 * SRAT is parsed, we don't know about it. 1530 * 1531 * The kernel image is loaded into memory at very early time. We 1532 * cannot prevent this anyway. So on NUMA system, we set any 1533 * node the kernel resides in as un-hotpluggable. 1534 * 1535 * Since on modern servers, one node could have double-digit 1536 * gigabytes memory, we can assume the memory around the kernel 1537 * image is also un-hotpluggable. So before SRAT is parsed, just 1538 * allocate memory near the kernel image to try the best to keep 1539 * the kernel away from hotpluggable memory. 1540 */ 1541 memblock_set_bottom_up(true); 1542 movable_node_enabled = true; 1543 #else 1544 pr_warn("movable_node option not supported\n"); 1545 #endif 1546 return 0; 1547 } 1548 early_param("movable_node", cmdline_parse_movable_node); 1549 1550 /* check which state of node_states will be changed when offline memory */ 1551 static void node_states_check_changes_offline(unsigned long nr_pages, 1552 struct zone *zone, struct memory_notify *arg) 1553 { 1554 struct pglist_data *pgdat = zone->zone_pgdat; 1555 unsigned long present_pages = 0; 1556 enum zone_type zt, zone_last = ZONE_NORMAL; 1557 1558 /* 1559 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] 1560 * contains nodes which have zones of 0...ZONE_NORMAL, 1561 * set zone_last to ZONE_NORMAL. 1562 * 1563 * If we don't have HIGHMEM nor movable node, 1564 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of 1565 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. 1566 */ 1567 if (N_MEMORY == N_NORMAL_MEMORY) 1568 zone_last = ZONE_MOVABLE; 1569 1570 /* 1571 * check whether node_states[N_NORMAL_MEMORY] will be changed. 1572 * If the memory to be offline is in a zone of 0...zone_last, 1573 * and it is the last present memory, 0...zone_last will 1574 * become empty after offline , thus we can determind we will 1575 * need to clear the node from node_states[N_NORMAL_MEMORY]. 1576 */ 1577 for (zt = 0; zt <= zone_last; zt++) 1578 present_pages += pgdat->node_zones[zt].present_pages; 1579 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) 1580 arg->status_change_nid_normal = zone_to_nid(zone); 1581 else 1582 arg->status_change_nid_normal = -1; 1583 1584 #ifdef CONFIG_HIGHMEM 1585 /* 1586 * If we have movable node, node_states[N_HIGH_MEMORY] 1587 * contains nodes which have zones of 0...ZONE_HIGHMEM, 1588 * set zone_last to ZONE_HIGHMEM. 1589 * 1590 * If we don't have movable node, node_states[N_NORMAL_MEMORY] 1591 * contains nodes which have zones of 0...ZONE_MOVABLE, 1592 * set zone_last to ZONE_MOVABLE. 1593 */ 1594 zone_last = ZONE_HIGHMEM; 1595 if (N_MEMORY == N_HIGH_MEMORY) 1596 zone_last = ZONE_MOVABLE; 1597 1598 for (; zt <= zone_last; zt++) 1599 present_pages += pgdat->node_zones[zt].present_pages; 1600 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) 1601 arg->status_change_nid_high = zone_to_nid(zone); 1602 else 1603 arg->status_change_nid_high = -1; 1604 #else 1605 arg->status_change_nid_high = arg->status_change_nid_normal; 1606 #endif 1607 1608 /* 1609 * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE 1610 */ 1611 zone_last = ZONE_MOVABLE; 1612 1613 /* 1614 * check whether node_states[N_HIGH_MEMORY] will be changed 1615 * If we try to offline the last present @nr_pages from the node, 1616 * we can determind we will need to clear the node from 1617 * node_states[N_HIGH_MEMORY]. 1618 */ 1619 for (; zt <= zone_last; zt++) 1620 present_pages += pgdat->node_zones[zt].present_pages; 1621 if (nr_pages >= present_pages) 1622 arg->status_change_nid = zone_to_nid(zone); 1623 else 1624 arg->status_change_nid = -1; 1625 } 1626 1627 static void node_states_clear_node(int node, struct memory_notify *arg) 1628 { 1629 if (arg->status_change_nid_normal >= 0) 1630 node_clear_state(node, N_NORMAL_MEMORY); 1631 1632 if ((N_MEMORY != N_NORMAL_MEMORY) && 1633 (arg->status_change_nid_high >= 0)) 1634 node_clear_state(node, N_HIGH_MEMORY); 1635 1636 if ((N_MEMORY != N_HIGH_MEMORY) && 1637 (arg->status_change_nid >= 0)) 1638 node_clear_state(node, N_MEMORY); 1639 } 1640 1641 static int __ref __offline_pages(unsigned long start_pfn, 1642 unsigned long end_pfn, unsigned long timeout) 1643 { 1644 unsigned long pfn, nr_pages, expire; 1645 long offlined_pages; 1646 int ret, drain, retry_max, node; 1647 unsigned long flags; 1648 struct zone *zone; 1649 struct memory_notify arg; 1650 1651 /* at least, alignment against pageblock is necessary */ 1652 if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) 1653 return -EINVAL; 1654 if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) 1655 return -EINVAL; 1656 /* This makes hotplug much easier...and readable. 1657 we assume this for now. .*/ 1658 if (!test_pages_in_a_zone(start_pfn, end_pfn)) 1659 return -EINVAL; 1660 1661 mem_hotplug_begin(); 1662 1663 zone = page_zone(pfn_to_page(start_pfn)); 1664 node = zone_to_nid(zone); 1665 nr_pages = end_pfn - start_pfn; 1666 1667 ret = -EINVAL; 1668 if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages)) 1669 goto out; 1670 1671 /* set above range as isolated */ 1672 ret = start_isolate_page_range(start_pfn, end_pfn, 1673 MIGRATE_MOVABLE, true); 1674 if (ret) 1675 goto out; 1676 1677 arg.start_pfn = start_pfn; 1678 arg.nr_pages = nr_pages; 1679 node_states_check_changes_offline(nr_pages, zone, &arg); 1680 1681 ret = memory_notify(MEM_GOING_OFFLINE, &arg); 1682 ret = notifier_to_errno(ret); 1683 if (ret) 1684 goto failed_removal; 1685 1686 pfn = start_pfn; 1687 expire = jiffies + timeout; 1688 drain = 0; 1689 retry_max = 5; 1690 repeat: 1691 /* start memory hot removal */ 1692 ret = -EAGAIN; 1693 if (time_after(jiffies, expire)) 1694 goto failed_removal; 1695 ret = -EINTR; 1696 if (signal_pending(current)) 1697 goto failed_removal; 1698 ret = 0; 1699 if (drain) { 1700 lru_add_drain_all(); 1701 cond_resched(); 1702 drain_all_pages(); 1703 } 1704 1705 pfn = scan_movable_pages(start_pfn, end_pfn); 1706 if (pfn) { /* We have movable pages */ 1707 ret = do_migrate_range(pfn, end_pfn); 1708 if (!ret) { 1709 drain = 1; 1710 goto repeat; 1711 } else { 1712 if (ret < 0) 1713 if (--retry_max == 0) 1714 goto failed_removal; 1715 yield(); 1716 drain = 1; 1717 goto repeat; 1718 } 1719 } 1720 /* drain all zone's lru pagevec, this is asynchronous... */ 1721 lru_add_drain_all(); 1722 yield(); 1723 /* drain pcp pages, this is synchronous. */ 1724 drain_all_pages(); 1725 /* 1726 * dissolve free hugepages in the memory block before doing offlining 1727 * actually in order to make hugetlbfs's object counting consistent. 1728 */ 1729 dissolve_free_huge_pages(start_pfn, end_pfn); 1730 /* check again */ 1731 offlined_pages = check_pages_isolated(start_pfn, end_pfn); 1732 if (offlined_pages < 0) { 1733 ret = -EBUSY; 1734 goto failed_removal; 1735 } 1736 printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages); 1737 /* Ok, all of our target is isolated. 1738 We cannot do rollback at this point. */ 1739 offline_isolated_pages(start_pfn, end_pfn); 1740 /* reset pagetype flags and makes migrate type to be MOVABLE */ 1741 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 1742 /* removal success */ 1743 adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); 1744 zone->present_pages -= offlined_pages; 1745 1746 pgdat_resize_lock(zone->zone_pgdat, &flags); 1747 zone->zone_pgdat->node_present_pages -= offlined_pages; 1748 pgdat_resize_unlock(zone->zone_pgdat, &flags); 1749 1750 init_per_zone_wmark_min(); 1751 1752 if (!populated_zone(zone)) { 1753 zone_pcp_reset(zone); 1754 mutex_lock(&zonelists_mutex); 1755 build_all_zonelists(NULL, NULL); 1756 mutex_unlock(&zonelists_mutex); 1757 } else 1758 zone_pcp_update(zone); 1759 1760 node_states_clear_node(node, &arg); 1761 if (arg.status_change_nid >= 0) 1762 kswapd_stop(node); 1763 1764 vm_total_pages = nr_free_pagecache_pages(); 1765 writeback_set_ratelimit(); 1766 1767 memory_notify(MEM_OFFLINE, &arg); 1768 mem_hotplug_done(); 1769 return 0; 1770 1771 failed_removal: 1772 printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n", 1773 (unsigned long long) start_pfn << PAGE_SHIFT, 1774 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); 1775 memory_notify(MEM_CANCEL_OFFLINE, &arg); 1776 /* pushback to free area */ 1777 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 1778 1779 out: 1780 mem_hotplug_done(); 1781 return ret; 1782 } 1783 1784 int offline_pages(unsigned long start_pfn, unsigned long nr_pages) 1785 { 1786 return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ); 1787 } 1788 #endif /* CONFIG_MEMORY_HOTREMOVE */ 1789 1790 /** 1791 * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn) 1792 * @start_pfn: start pfn of the memory range 1793 * @end_pfn: end pfn of the memory range 1794 * @arg: argument passed to func 1795 * @func: callback for each memory section walked 1796 * 1797 * This function walks through all present mem sections in range 1798 * [start_pfn, end_pfn) and call func on each mem section. 1799 * 1800 * Returns the return value of func. 1801 */ 1802 int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, 1803 void *arg, int (*func)(struct memory_block *, void *)) 1804 { 1805 struct memory_block *mem = NULL; 1806 struct mem_section *section; 1807 unsigned long pfn, section_nr; 1808 int ret; 1809 1810 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 1811 section_nr = pfn_to_section_nr(pfn); 1812 if (!present_section_nr(section_nr)) 1813 continue; 1814 1815 section = __nr_to_section(section_nr); 1816 /* same memblock? */ 1817 if (mem) 1818 if ((section_nr >= mem->start_section_nr) && 1819 (section_nr <= mem->end_section_nr)) 1820 continue; 1821 1822 mem = find_memory_block_hinted(section, mem); 1823 if (!mem) 1824 continue; 1825 1826 ret = func(mem, arg); 1827 if (ret) { 1828 kobject_put(&mem->dev.kobj); 1829 return ret; 1830 } 1831 } 1832 1833 if (mem) 1834 kobject_put(&mem->dev.kobj); 1835 1836 return 0; 1837 } 1838 1839 #ifdef CONFIG_MEMORY_HOTREMOVE 1840 static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) 1841 { 1842 int ret = !is_memblock_offlined(mem); 1843 1844 if (unlikely(ret)) { 1845 phys_addr_t beginpa, endpa; 1846 1847 beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); 1848 endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1; 1849 pr_warn("removing memory fails, because memory " 1850 "[%pa-%pa] is onlined\n", 1851 &beginpa, &endpa); 1852 } 1853 1854 return ret; 1855 } 1856 1857 static int check_cpu_on_node(pg_data_t *pgdat) 1858 { 1859 int cpu; 1860 1861 for_each_present_cpu(cpu) { 1862 if (cpu_to_node(cpu) == pgdat->node_id) 1863 /* 1864 * the cpu on this node isn't removed, and we can't 1865 * offline this node. 1866 */ 1867 return -EBUSY; 1868 } 1869 1870 return 0; 1871 } 1872 1873 static void unmap_cpu_on_node(pg_data_t *pgdat) 1874 { 1875 #ifdef CONFIG_ACPI_NUMA 1876 int cpu; 1877 1878 for_each_possible_cpu(cpu) 1879 if (cpu_to_node(cpu) == pgdat->node_id) 1880 numa_clear_node(cpu); 1881 #endif 1882 } 1883 1884 static int check_and_unmap_cpu_on_node(pg_data_t *pgdat) 1885 { 1886 int ret; 1887 1888 ret = check_cpu_on_node(pgdat); 1889 if (ret) 1890 return ret; 1891 1892 /* 1893 * the node will be offlined when we come here, so we can clear 1894 * the cpu_to_node() now. 1895 */ 1896 1897 unmap_cpu_on_node(pgdat); 1898 return 0; 1899 } 1900 1901 /** 1902 * try_offline_node 1903 * 1904 * Offline a node if all memory sections and cpus of the node are removed. 1905 * 1906 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug 1907 * and online/offline operations before this call. 1908 */ 1909 void try_offline_node(int nid) 1910 { 1911 pg_data_t *pgdat = NODE_DATA(nid); 1912 unsigned long start_pfn = pgdat->node_start_pfn; 1913 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; 1914 unsigned long pfn; 1915 struct page *pgdat_page = virt_to_page(pgdat); 1916 int i; 1917 1918 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 1919 unsigned long section_nr = pfn_to_section_nr(pfn); 1920 1921 if (!present_section_nr(section_nr)) 1922 continue; 1923 1924 if (pfn_to_nid(pfn) != nid) 1925 continue; 1926 1927 /* 1928 * some memory sections of this node are not removed, and we 1929 * can't offline node now. 1930 */ 1931 return; 1932 } 1933 1934 if (check_and_unmap_cpu_on_node(pgdat)) 1935 return; 1936 1937 /* 1938 * all memory/cpu of this node are removed, we can offline this 1939 * node now. 1940 */ 1941 node_set_offline(nid); 1942 unregister_one_node(nid); 1943 1944 if (!PageSlab(pgdat_page) && !PageCompound(pgdat_page)) 1945 /* node data is allocated from boot memory */ 1946 return; 1947 1948 /* free waittable in each zone */ 1949 for (i = 0; i < MAX_NR_ZONES; i++) { 1950 struct zone *zone = pgdat->node_zones + i; 1951 1952 /* 1953 * wait_table may be allocated from boot memory, 1954 * here only free if it's allocated by vmalloc. 1955 */ 1956 if (is_vmalloc_addr(zone->wait_table)) 1957 vfree(zone->wait_table); 1958 } 1959 1960 /* 1961 * Since there is no way to guarentee the address of pgdat/zone is not 1962 * on stack of any kernel threads or used by other kernel objects 1963 * without reference counting or other symchronizing method, do not 1964 * reset node_data and free pgdat here. Just reset it to 0 and reuse 1965 * the memory when the node is online again. 1966 */ 1967 memset(pgdat, 0, sizeof(*pgdat)); 1968 } 1969 EXPORT_SYMBOL(try_offline_node); 1970 1971 /** 1972 * remove_memory 1973 * 1974 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug 1975 * and online/offline operations before this call, as required by 1976 * try_offline_node(). 1977 */ 1978 void __ref remove_memory(int nid, u64 start, u64 size) 1979 { 1980 int ret; 1981 1982 BUG_ON(check_hotplug_memory_range(start, size)); 1983 1984 mem_hotplug_begin(); 1985 1986 /* 1987 * All memory blocks must be offlined before removing memory. Check 1988 * whether all memory blocks in question are offline and trigger a BUG() 1989 * if this is not the case. 1990 */ 1991 ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL, 1992 check_memblock_offlined_cb); 1993 if (ret) 1994 BUG(); 1995 1996 /* remove memmap entry */ 1997 firmware_map_remove(start, start + size, "System RAM"); 1998 1999 arch_remove_memory(start, size); 2000 2001 try_offline_node(nid); 2002 2003 mem_hotplug_done(); 2004 } 2005 EXPORT_SYMBOL_GPL(remove_memory); 2006 #endif /* CONFIG_MEMORY_HOTREMOVE */ 2007