1 /* 2 * Memory Migration functionality - linux/mm/migration.c 3 * 4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 5 * 6 * Page migration was first developed in the context of the memory hotplug 7 * project. The main authors of the migration code are: 8 * 9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 10 * Hirokazu Takahashi <taka@valinux.co.jp> 11 * Dave Hansen <haveblue@us.ibm.com> 12 * Christoph Lameter <clameter@sgi.com> 13 */ 14 15 #include <linux/migrate.h> 16 #include <linux/module.h> 17 #include <linux/swap.h> 18 #include <linux/swapops.h> 19 #include <linux/pagemap.h> 20 #include <linux/buffer_head.h> 21 #include <linux/mm_inline.h> 22 #include <linux/pagevec.h> 23 #include <linux/rmap.h> 24 #include <linux/topology.h> 25 #include <linux/cpu.h> 26 #include <linux/cpuset.h> 27 #include <linux/writeback.h> 28 #include <linux/mempolicy.h> 29 #include <linux/vmalloc.h> 30 #include <linux/security.h> 31 32 #include "internal.h" 33 34 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 35 36 /* 37 * Isolate one page from the LRU lists. If successful put it onto 38 * the indicated list with elevated page count. 39 * 40 * Result: 41 * -EBUSY: page not on LRU list 42 * 0: page removed from LRU list and added to the specified list. 43 */ 44 int isolate_lru_page(struct page *page, struct list_head *pagelist) 45 { 46 int ret = -EBUSY; 47 48 if (PageLRU(page)) { 49 struct zone *zone = page_zone(page); 50 51 spin_lock_irq(&zone->lru_lock); 52 if (PageLRU(page)) { 53 ret = 0; 54 get_page(page); 55 ClearPageLRU(page); 56 if (PageActive(page)) 57 del_page_from_active_list(zone, page); 58 else 59 del_page_from_inactive_list(zone, page); 60 list_add_tail(&page->lru, pagelist); 61 } 62 spin_unlock_irq(&zone->lru_lock); 63 } 64 return ret; 65 } 66 67 /* 68 * migrate_prep() needs to be called before we start compiling a list of pages 69 * to be migrated using isolate_lru_page(). 70 */ 71 int migrate_prep(void) 72 { 73 /* 74 * Clear the LRU lists so pages can be isolated. 75 * Note that pages may be moved off the LRU after we have 76 * drained them. Those pages will fail to migrate like other 77 * pages that may be busy. 78 */ 79 lru_add_drain_all(); 80 81 return 0; 82 } 83 84 static inline void move_to_lru(struct page *page) 85 { 86 if (PageActive(page)) { 87 /* 88 * lru_cache_add_active checks that 89 * the PG_active bit is off. 90 */ 91 ClearPageActive(page); 92 lru_cache_add_active(page); 93 } else { 94 lru_cache_add(page); 95 } 96 put_page(page); 97 } 98 99 /* 100 * Add isolated pages on the list back to the LRU. 101 * 102 * returns the number of pages put back. 103 */ 104 int putback_lru_pages(struct list_head *l) 105 { 106 struct page *page; 107 struct page *page2; 108 int count = 0; 109 110 list_for_each_entry_safe(page, page2, l, lru) { 111 list_del(&page->lru); 112 move_to_lru(page); 113 count++; 114 } 115 return count; 116 } 117 118 static inline int is_swap_pte(pte_t pte) 119 { 120 return !pte_none(pte) && !pte_present(pte) && !pte_file(pte); 121 } 122 123 /* 124 * Restore a potential migration pte to a working pte entry 125 */ 126 static void remove_migration_pte(struct vm_area_struct *vma, 127 struct page *old, struct page *new) 128 { 129 struct mm_struct *mm = vma->vm_mm; 130 swp_entry_t entry; 131 pgd_t *pgd; 132 pud_t *pud; 133 pmd_t *pmd; 134 pte_t *ptep, pte; 135 spinlock_t *ptl; 136 unsigned long addr = page_address_in_vma(new, vma); 137 138 if (addr == -EFAULT) 139 return; 140 141 pgd = pgd_offset(mm, addr); 142 if (!pgd_present(*pgd)) 143 return; 144 145 pud = pud_offset(pgd, addr); 146 if (!pud_present(*pud)) 147 return; 148 149 pmd = pmd_offset(pud, addr); 150 if (!pmd_present(*pmd)) 151 return; 152 153 ptep = pte_offset_map(pmd, addr); 154 155 if (!is_swap_pte(*ptep)) { 156 pte_unmap(ptep); 157 return; 158 } 159 160 ptl = pte_lockptr(mm, pmd); 161 spin_lock(ptl); 162 pte = *ptep; 163 if (!is_swap_pte(pte)) 164 goto out; 165 166 entry = pte_to_swp_entry(pte); 167 168 if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) 169 goto out; 170 171 get_page(new); 172 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 173 if (is_write_migration_entry(entry)) 174 pte = pte_mkwrite(pte); 175 set_pte_at(mm, addr, ptep, pte); 176 177 if (PageAnon(new)) 178 page_add_anon_rmap(new, vma, addr); 179 else 180 page_add_file_rmap(new); 181 182 /* No need to invalidate - it was non-present before */ 183 update_mmu_cache(vma, addr, pte); 184 lazy_mmu_prot_update(pte); 185 186 out: 187 pte_unmap_unlock(ptep, ptl); 188 } 189 190 /* 191 * Note that remove_file_migration_ptes will only work on regular mappings, 192 * Nonlinear mappings do not use migration entries. 193 */ 194 static void remove_file_migration_ptes(struct page *old, struct page *new) 195 { 196 struct vm_area_struct *vma; 197 struct address_space *mapping = page_mapping(new); 198 struct prio_tree_iter iter; 199 pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 200 201 if (!mapping) 202 return; 203 204 spin_lock(&mapping->i_mmap_lock); 205 206 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) 207 remove_migration_pte(vma, old, new); 208 209 spin_unlock(&mapping->i_mmap_lock); 210 } 211 212 /* 213 * Must hold mmap_sem lock on at least one of the vmas containing 214 * the page so that the anon_vma cannot vanish. 215 */ 216 static void remove_anon_migration_ptes(struct page *old, struct page *new) 217 { 218 struct anon_vma *anon_vma; 219 struct vm_area_struct *vma; 220 unsigned long mapping; 221 222 mapping = (unsigned long)new->mapping; 223 224 if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0) 225 return; 226 227 /* 228 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma. 229 */ 230 anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON); 231 spin_lock(&anon_vma->lock); 232 233 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) 234 remove_migration_pte(vma, old, new); 235 236 spin_unlock(&anon_vma->lock); 237 } 238 239 /* 240 * Get rid of all migration entries and replace them by 241 * references to the indicated page. 242 */ 243 static void remove_migration_ptes(struct page *old, struct page *new) 244 { 245 if (PageAnon(new)) 246 remove_anon_migration_ptes(old, new); 247 else 248 remove_file_migration_ptes(old, new); 249 } 250 251 /* 252 * Something used the pte of a page under migration. We need to 253 * get to the page and wait until migration is finished. 254 * When we return from this function the fault will be retried. 255 * 256 * This function is called from do_swap_page(). 257 */ 258 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 259 unsigned long address) 260 { 261 pte_t *ptep, pte; 262 spinlock_t *ptl; 263 swp_entry_t entry; 264 struct page *page; 265 266 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 267 pte = *ptep; 268 if (!is_swap_pte(pte)) 269 goto out; 270 271 entry = pte_to_swp_entry(pte); 272 if (!is_migration_entry(entry)) 273 goto out; 274 275 page = migration_entry_to_page(entry); 276 277 get_page(page); 278 pte_unmap_unlock(ptep, ptl); 279 wait_on_page_locked(page); 280 put_page(page); 281 return; 282 out: 283 pte_unmap_unlock(ptep, ptl); 284 } 285 286 /* 287 * Replace the page in the mapping. 288 * 289 * The number of remaining references must be: 290 * 1 for anonymous pages without a mapping 291 * 2 for pages with a mapping 292 * 3 for pages with a mapping and PagePrivate set. 293 */ 294 static int migrate_page_move_mapping(struct address_space *mapping, 295 struct page *newpage, struct page *page) 296 { 297 void **pslot; 298 299 if (!mapping) { 300 /* Anonymous page without mapping */ 301 if (page_count(page) != 1) 302 return -EAGAIN; 303 return 0; 304 } 305 306 write_lock_irq(&mapping->tree_lock); 307 308 pslot = radix_tree_lookup_slot(&mapping->page_tree, 309 page_index(page)); 310 311 if (page_count(page) != 2 + !!PagePrivate(page) || 312 (struct page *)radix_tree_deref_slot(pslot) != page) { 313 write_unlock_irq(&mapping->tree_lock); 314 return -EAGAIN; 315 } 316 317 /* 318 * Now we know that no one else is looking at the page. 319 */ 320 get_page(newpage); /* add cache reference */ 321 #ifdef CONFIG_SWAP 322 if (PageSwapCache(page)) { 323 SetPageSwapCache(newpage); 324 set_page_private(newpage, page_private(page)); 325 } 326 #endif 327 328 radix_tree_replace_slot(pslot, newpage); 329 330 /* 331 * Drop cache reference from old page. 332 * We know this isn't the last reference. 333 */ 334 __put_page(page); 335 336 /* 337 * If moved to a different zone then also account 338 * the page for that zone. Other VM counters will be 339 * taken care of when we establish references to the 340 * new page and drop references to the old page. 341 * 342 * Note that anonymous pages are accounted for 343 * via NR_FILE_PAGES and NR_ANON_PAGES if they 344 * are mapped to swap space. 345 */ 346 __dec_zone_page_state(page, NR_FILE_PAGES); 347 __inc_zone_page_state(newpage, NR_FILE_PAGES); 348 349 write_unlock_irq(&mapping->tree_lock); 350 351 return 0; 352 } 353 354 /* 355 * Copy the page to its new location 356 */ 357 static void migrate_page_copy(struct page *newpage, struct page *page) 358 { 359 copy_highpage(newpage, page); 360 361 if (PageError(page)) 362 SetPageError(newpage); 363 if (PageReferenced(page)) 364 SetPageReferenced(newpage); 365 if (PageUptodate(page)) 366 SetPageUptodate(newpage); 367 if (PageActive(page)) 368 SetPageActive(newpage); 369 if (PageChecked(page)) 370 SetPageChecked(newpage); 371 if (PageMappedToDisk(page)) 372 SetPageMappedToDisk(newpage); 373 374 if (PageDirty(page)) { 375 clear_page_dirty_for_io(page); 376 set_page_dirty(newpage); 377 } 378 379 #ifdef CONFIG_SWAP 380 ClearPageSwapCache(page); 381 #endif 382 ClearPageActive(page); 383 ClearPagePrivate(page); 384 set_page_private(page, 0); 385 page->mapping = NULL; 386 387 /* 388 * If any waiters have accumulated on the new page then 389 * wake them up. 390 */ 391 if (PageWriteback(newpage)) 392 end_page_writeback(newpage); 393 } 394 395 /************************************************************ 396 * Migration functions 397 ***********************************************************/ 398 399 /* Always fail migration. Used for mappings that are not movable */ 400 int fail_migrate_page(struct address_space *mapping, 401 struct page *newpage, struct page *page) 402 { 403 return -EIO; 404 } 405 EXPORT_SYMBOL(fail_migrate_page); 406 407 /* 408 * Common logic to directly migrate a single page suitable for 409 * pages that do not use PagePrivate. 410 * 411 * Pages are locked upon entry and exit. 412 */ 413 int migrate_page(struct address_space *mapping, 414 struct page *newpage, struct page *page) 415 { 416 int rc; 417 418 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 419 420 rc = migrate_page_move_mapping(mapping, newpage, page); 421 422 if (rc) 423 return rc; 424 425 migrate_page_copy(newpage, page); 426 return 0; 427 } 428 EXPORT_SYMBOL(migrate_page); 429 430 #ifdef CONFIG_BLOCK 431 /* 432 * Migration function for pages with buffers. This function can only be used 433 * if the underlying filesystem guarantees that no other references to "page" 434 * exist. 435 */ 436 int buffer_migrate_page(struct address_space *mapping, 437 struct page *newpage, struct page *page) 438 { 439 struct buffer_head *bh, *head; 440 int rc; 441 442 if (!page_has_buffers(page)) 443 return migrate_page(mapping, newpage, page); 444 445 head = page_buffers(page); 446 447 rc = migrate_page_move_mapping(mapping, newpage, page); 448 449 if (rc) 450 return rc; 451 452 bh = head; 453 do { 454 get_bh(bh); 455 lock_buffer(bh); 456 bh = bh->b_this_page; 457 458 } while (bh != head); 459 460 ClearPagePrivate(page); 461 set_page_private(newpage, page_private(page)); 462 set_page_private(page, 0); 463 put_page(page); 464 get_page(newpage); 465 466 bh = head; 467 do { 468 set_bh_page(bh, newpage, bh_offset(bh)); 469 bh = bh->b_this_page; 470 471 } while (bh != head); 472 473 SetPagePrivate(newpage); 474 475 migrate_page_copy(newpage, page); 476 477 bh = head; 478 do { 479 unlock_buffer(bh); 480 put_bh(bh); 481 bh = bh->b_this_page; 482 483 } while (bh != head); 484 485 return 0; 486 } 487 EXPORT_SYMBOL(buffer_migrate_page); 488 #endif 489 490 /* 491 * Writeback a page to clean the dirty state 492 */ 493 static int writeout(struct address_space *mapping, struct page *page) 494 { 495 struct writeback_control wbc = { 496 .sync_mode = WB_SYNC_NONE, 497 .nr_to_write = 1, 498 .range_start = 0, 499 .range_end = LLONG_MAX, 500 .nonblocking = 1, 501 .for_reclaim = 1 502 }; 503 int rc; 504 505 if (!mapping->a_ops->writepage) 506 /* No write method for the address space */ 507 return -EINVAL; 508 509 if (!clear_page_dirty_for_io(page)) 510 /* Someone else already triggered a write */ 511 return -EAGAIN; 512 513 /* 514 * A dirty page may imply that the underlying filesystem has 515 * the page on some queue. So the page must be clean for 516 * migration. Writeout may mean we loose the lock and the 517 * page state is no longer what we checked for earlier. 518 * At this point we know that the migration attempt cannot 519 * be successful. 520 */ 521 remove_migration_ptes(page, page); 522 523 rc = mapping->a_ops->writepage(page, &wbc); 524 if (rc < 0) 525 /* I/O Error writing */ 526 return -EIO; 527 528 if (rc != AOP_WRITEPAGE_ACTIVATE) 529 /* unlocked. Relock */ 530 lock_page(page); 531 532 return -EAGAIN; 533 } 534 535 /* 536 * Default handling if a filesystem does not provide a migration function. 537 */ 538 static int fallback_migrate_page(struct address_space *mapping, 539 struct page *newpage, struct page *page) 540 { 541 if (PageDirty(page)) 542 return writeout(mapping, page); 543 544 /* 545 * Buffers may be managed in a filesystem specific way. 546 * We must have no buffers or drop them. 547 */ 548 if (PagePrivate(page) && 549 !try_to_release_page(page, GFP_KERNEL)) 550 return -EAGAIN; 551 552 return migrate_page(mapping, newpage, page); 553 } 554 555 /* 556 * Move a page to a newly allocated page 557 * The page is locked and all ptes have been successfully removed. 558 * 559 * The new page will have replaced the old page if this function 560 * is successful. 561 */ 562 static int move_to_new_page(struct page *newpage, struct page *page) 563 { 564 struct address_space *mapping; 565 int rc; 566 567 /* 568 * Block others from accessing the page when we get around to 569 * establishing additional references. We are the only one 570 * holding a reference to the new page at this point. 571 */ 572 if (TestSetPageLocked(newpage)) 573 BUG(); 574 575 /* Prepare mapping for the new page.*/ 576 newpage->index = page->index; 577 newpage->mapping = page->mapping; 578 579 mapping = page_mapping(page); 580 if (!mapping) 581 rc = migrate_page(mapping, newpage, page); 582 else if (mapping->a_ops->migratepage) 583 /* 584 * Most pages have a mapping and most filesystems 585 * should provide a migration function. Anonymous 586 * pages are part of swap space which also has its 587 * own migration function. This is the most common 588 * path for page migration. 589 */ 590 rc = mapping->a_ops->migratepage(mapping, 591 newpage, page); 592 else 593 rc = fallback_migrate_page(mapping, newpage, page); 594 595 if (!rc) 596 remove_migration_ptes(page, newpage); 597 else 598 newpage->mapping = NULL; 599 600 unlock_page(newpage); 601 602 return rc; 603 } 604 605 /* 606 * Obtain the lock on page, remove all ptes and migrate the page 607 * to the newly allocated page in newpage. 608 */ 609 static int unmap_and_move(new_page_t get_new_page, unsigned long private, 610 struct page *page, int force) 611 { 612 int rc = 0; 613 int *result = NULL; 614 struct page *newpage = get_new_page(page, private, &result); 615 616 if (!newpage) 617 return -ENOMEM; 618 619 if (page_count(page) == 1) 620 /* page was freed from under us. So we are done. */ 621 goto move_newpage; 622 623 rc = -EAGAIN; 624 if (TestSetPageLocked(page)) { 625 if (!force) 626 goto move_newpage; 627 lock_page(page); 628 } 629 630 if (PageWriteback(page)) { 631 if (!force) 632 goto unlock; 633 wait_on_page_writeback(page); 634 } 635 636 /* 637 * Establish migration ptes or remove ptes 638 */ 639 try_to_unmap(page, 1); 640 if (!page_mapped(page)) 641 rc = move_to_new_page(newpage, page); 642 643 if (rc) 644 remove_migration_ptes(page, page); 645 646 unlock: 647 unlock_page(page); 648 649 if (rc != -EAGAIN) { 650 /* 651 * A page that has been migrated has all references 652 * removed and will be freed. A page that has not been 653 * migrated will have kepts its references and be 654 * restored. 655 */ 656 list_del(&page->lru); 657 move_to_lru(page); 658 } 659 660 move_newpage: 661 /* 662 * Move the new page to the LRU. If migration was not successful 663 * then this will free the page. 664 */ 665 move_to_lru(newpage); 666 if (result) { 667 if (rc) 668 *result = rc; 669 else 670 *result = page_to_nid(newpage); 671 } 672 return rc; 673 } 674 675 /* 676 * migrate_pages 677 * 678 * The function takes one list of pages to migrate and a function 679 * that determines from the page to be migrated and the private data 680 * the target of the move and allocates the page. 681 * 682 * The function returns after 10 attempts or if no pages 683 * are movable anymore because to has become empty 684 * or no retryable pages exist anymore. All pages will be 685 * retruned to the LRU or freed. 686 * 687 * Return: Number of pages not migrated or error code. 688 */ 689 int migrate_pages(struct list_head *from, 690 new_page_t get_new_page, unsigned long private) 691 { 692 int retry = 1; 693 int nr_failed = 0; 694 int pass = 0; 695 struct page *page; 696 struct page *page2; 697 int swapwrite = current->flags & PF_SWAPWRITE; 698 int rc; 699 700 if (!swapwrite) 701 current->flags |= PF_SWAPWRITE; 702 703 for(pass = 0; pass < 10 && retry; pass++) { 704 retry = 0; 705 706 list_for_each_entry_safe(page, page2, from, lru) { 707 cond_resched(); 708 709 rc = unmap_and_move(get_new_page, private, 710 page, pass > 2); 711 712 switch(rc) { 713 case -ENOMEM: 714 goto out; 715 case -EAGAIN: 716 retry++; 717 break; 718 case 0: 719 break; 720 default: 721 /* Permanent failure */ 722 nr_failed++; 723 break; 724 } 725 } 726 } 727 rc = 0; 728 out: 729 if (!swapwrite) 730 current->flags &= ~PF_SWAPWRITE; 731 732 putback_lru_pages(from); 733 734 if (rc) 735 return rc; 736 737 return nr_failed + retry; 738 } 739 740 #ifdef CONFIG_NUMA 741 /* 742 * Move a list of individual pages 743 */ 744 struct page_to_node { 745 unsigned long addr; 746 struct page *page; 747 int node; 748 int status; 749 }; 750 751 static struct page *new_page_node(struct page *p, unsigned long private, 752 int **result) 753 { 754 struct page_to_node *pm = (struct page_to_node *)private; 755 756 while (pm->node != MAX_NUMNODES && pm->page != p) 757 pm++; 758 759 if (pm->node == MAX_NUMNODES) 760 return NULL; 761 762 *result = &pm->status; 763 764 return alloc_pages_node(pm->node, 765 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); 766 } 767 768 /* 769 * Move a set of pages as indicated in the pm array. The addr 770 * field must be set to the virtual address of the page to be moved 771 * and the node number must contain a valid target node. 772 */ 773 static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm, 774 int migrate_all) 775 { 776 int err; 777 struct page_to_node *pp; 778 LIST_HEAD(pagelist); 779 780 down_read(&mm->mmap_sem); 781 782 /* 783 * Build a list of pages to migrate 784 */ 785 migrate_prep(); 786 for (pp = pm; pp->node != MAX_NUMNODES; pp++) { 787 struct vm_area_struct *vma; 788 struct page *page; 789 790 /* 791 * A valid page pointer that will not match any of the 792 * pages that will be moved. 793 */ 794 pp->page = ZERO_PAGE(0); 795 796 err = -EFAULT; 797 vma = find_vma(mm, pp->addr); 798 if (!vma || !vma_migratable(vma)) 799 goto set_status; 800 801 page = follow_page(vma, pp->addr, FOLL_GET); 802 err = -ENOENT; 803 if (!page) 804 goto set_status; 805 806 if (PageReserved(page)) /* Check for zero page */ 807 goto put_and_set; 808 809 pp->page = page; 810 err = page_to_nid(page); 811 812 if (err == pp->node) 813 /* 814 * Node already in the right place 815 */ 816 goto put_and_set; 817 818 err = -EACCES; 819 if (page_mapcount(page) > 1 && 820 !migrate_all) 821 goto put_and_set; 822 823 err = isolate_lru_page(page, &pagelist); 824 put_and_set: 825 /* 826 * Either remove the duplicate refcount from 827 * isolate_lru_page() or drop the page ref if it was 828 * not isolated. 829 */ 830 put_page(page); 831 set_status: 832 pp->status = err; 833 } 834 835 if (!list_empty(&pagelist)) 836 err = migrate_pages(&pagelist, new_page_node, 837 (unsigned long)pm); 838 else 839 err = -ENOENT; 840 841 up_read(&mm->mmap_sem); 842 return err; 843 } 844 845 /* 846 * Determine the nodes of a list of pages. The addr in the pm array 847 * must have been set to the virtual address of which we want to determine 848 * the node number. 849 */ 850 static int do_pages_stat(struct mm_struct *mm, struct page_to_node *pm) 851 { 852 down_read(&mm->mmap_sem); 853 854 for ( ; pm->node != MAX_NUMNODES; pm++) { 855 struct vm_area_struct *vma; 856 struct page *page; 857 int err; 858 859 err = -EFAULT; 860 vma = find_vma(mm, pm->addr); 861 if (!vma) 862 goto set_status; 863 864 page = follow_page(vma, pm->addr, 0); 865 err = -ENOENT; 866 /* Use PageReserved to check for zero page */ 867 if (!page || PageReserved(page)) 868 goto set_status; 869 870 err = page_to_nid(page); 871 set_status: 872 pm->status = err; 873 } 874 875 up_read(&mm->mmap_sem); 876 return 0; 877 } 878 879 /* 880 * Move a list of pages in the address space of the currently executing 881 * process. 882 */ 883 asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, 884 const void __user * __user *pages, 885 const int __user *nodes, 886 int __user *status, int flags) 887 { 888 int err = 0; 889 int i; 890 struct task_struct *task; 891 nodemask_t task_nodes; 892 struct mm_struct *mm; 893 struct page_to_node *pm = NULL; 894 895 /* Check flags */ 896 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 897 return -EINVAL; 898 899 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 900 return -EPERM; 901 902 /* Find the mm_struct */ 903 read_lock(&tasklist_lock); 904 task = pid ? find_task_by_pid(pid) : current; 905 if (!task) { 906 read_unlock(&tasklist_lock); 907 return -ESRCH; 908 } 909 mm = get_task_mm(task); 910 read_unlock(&tasklist_lock); 911 912 if (!mm) 913 return -EINVAL; 914 915 /* 916 * Check if this process has the right to modify the specified 917 * process. The right exists if the process has administrative 918 * capabilities, superuser privileges or the same 919 * userid as the target process. 920 */ 921 if ((current->euid != task->suid) && (current->euid != task->uid) && 922 (current->uid != task->suid) && (current->uid != task->uid) && 923 !capable(CAP_SYS_NICE)) { 924 err = -EPERM; 925 goto out2; 926 } 927 928 err = security_task_movememory(task); 929 if (err) 930 goto out2; 931 932 933 task_nodes = cpuset_mems_allowed(task); 934 935 /* Limit nr_pages so that the multiplication may not overflow */ 936 if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) { 937 err = -E2BIG; 938 goto out2; 939 } 940 941 pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node)); 942 if (!pm) { 943 err = -ENOMEM; 944 goto out2; 945 } 946 947 /* 948 * Get parameters from user space and initialize the pm 949 * array. Return various errors if the user did something wrong. 950 */ 951 for (i = 0; i < nr_pages; i++) { 952 const void *p; 953 954 err = -EFAULT; 955 if (get_user(p, pages + i)) 956 goto out; 957 958 pm[i].addr = (unsigned long)p; 959 if (nodes) { 960 int node; 961 962 if (get_user(node, nodes + i)) 963 goto out; 964 965 err = -ENODEV; 966 if (!node_online(node)) 967 goto out; 968 969 err = -EACCES; 970 if (!node_isset(node, task_nodes)) 971 goto out; 972 973 pm[i].node = node; 974 } else 975 pm[i].node = 0; /* anything to not match MAX_NUMNODES */ 976 } 977 /* End marker */ 978 pm[nr_pages].node = MAX_NUMNODES; 979 980 if (nodes) 981 err = do_move_pages(mm, pm, flags & MPOL_MF_MOVE_ALL); 982 else 983 err = do_pages_stat(mm, pm); 984 985 if (err >= 0) 986 /* Return status information */ 987 for (i = 0; i < nr_pages; i++) 988 if (put_user(pm[i].status, status + i)) 989 err = -EFAULT; 990 991 out: 992 vfree(pm); 993 out2: 994 mmput(mm); 995 return err; 996 } 997 #endif 998 999 /* 1000 * Call migration functions in the vma_ops that may prepare 1001 * memory in a vm for migration. migration functions may perform 1002 * the migration for vmas that do not have an underlying page struct. 1003 */ 1004 int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, 1005 const nodemask_t *from, unsigned long flags) 1006 { 1007 struct vm_area_struct *vma; 1008 int err = 0; 1009 1010 for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) { 1011 if (vma->vm_ops && vma->vm_ops->migrate) { 1012 err = vma->vm_ops->migrate(vma, to, from, flags); 1013 if (err) 1014 break; 1015 } 1016 } 1017 return err; 1018 } 1019