migrate.c (94723aafb9e76414fada7c1c198733a86f01ea8f) | migrate.c (b93b016313b3ba8003c3b8bb71f569af91f19fc7) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Memory Migration functionality - linux/mm/migrate.c 4 * 5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6 * 7 * Page migration was first developed in the context of the memory hotplug 8 * project. The main authors of the migration code are: --- 453 unchanged lines hidden (view full) --- 462 __SetPageSwapBacked(newpage); 463 464 return MIGRATEPAGE_SUCCESS; 465 } 466 467 oldzone = page_zone(page); 468 newzone = page_zone(newpage); 469 | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Memory Migration functionality - linux/mm/migrate.c 4 * 5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6 * 7 * Page migration was first developed in the context of the memory hotplug 8 * project. The main authors of the migration code are: --- 453 unchanged lines hidden (view full) --- 462 __SetPageSwapBacked(newpage); 463 464 return MIGRATEPAGE_SUCCESS; 465 } 466 467 oldzone = page_zone(page); 468 newzone = page_zone(newpage); 469 |
470 spin_lock_irq(&mapping->tree_lock); | 470 xa_lock_irq(&mapping->i_pages); |
471 | 471 |
472 pslot = radix_tree_lookup_slot(&mapping->page_tree, | 472 pslot = radix_tree_lookup_slot(&mapping->i_pages, |
473 page_index(page)); 474 475 expected_count += 1 + page_has_private(page); 476 if (page_count(page) != expected_count || | 473 page_index(page)); 474 475 expected_count += 1 + page_has_private(page); 476 if (page_count(page) != expected_count || |
477 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 478 spin_unlock_irq(&mapping->tree_lock); | 477 radix_tree_deref_slot_protected(pslot, 478 &mapping->i_pages.xa_lock) != page) { 479 xa_unlock_irq(&mapping->i_pages); |
479 return -EAGAIN; 480 } 481 482 if (!page_ref_freeze(page, expected_count)) { | 480 return -EAGAIN; 481 } 482 483 if (!page_ref_freeze(page, expected_count)) { |
483 spin_unlock_irq(&mapping->tree_lock); | 484 xa_unlock_irq(&mapping->i_pages); |
484 return -EAGAIN; 485 } 486 487 /* 488 * In the async migration case of moving a page with buffers, lock the 489 * buffers using trylock before the mapping is moved. If the mapping 490 * was moved, we later failed to lock the buffers and could not move 491 * the mapping back due to an elevated page count, we would have to 492 * block waiting on other references to be dropped. 493 */ 494 if (mode == MIGRATE_ASYNC && head && 495 !buffer_migrate_lock_buffers(head, mode)) { 496 page_ref_unfreeze(page, expected_count); | 485 return -EAGAIN; 486 } 487 488 /* 489 * In the async migration case of moving a page with buffers, lock the 490 * buffers using trylock before the mapping is moved. If the mapping 491 * was moved, we later failed to lock the buffers and could not move 492 * the mapping back due to an elevated page count, we would have to 493 * block waiting on other references to be dropped. 494 */ 495 if (mode == MIGRATE_ASYNC && head && 496 !buffer_migrate_lock_buffers(head, mode)) { 497 page_ref_unfreeze(page, expected_count); |
497 spin_unlock_irq(&mapping->tree_lock); | 498 xa_unlock_irq(&mapping->i_pages); |
498 return -EAGAIN; 499 } 500 501 /* 502 * Now we know that no one else is looking at the page: 503 * no turning back from here. 504 */ 505 newpage->index = page->index; --- 11 unchanged lines hidden (view full) --- 517 518 /* Move dirty while page refs frozen and newpage not yet exposed */ 519 dirty = PageDirty(page); 520 if (dirty) { 521 ClearPageDirty(page); 522 SetPageDirty(newpage); 523 } 524 | 499 return -EAGAIN; 500 } 501 502 /* 503 * Now we know that no one else is looking at the page: 504 * no turning back from here. 505 */ 506 newpage->index = page->index; --- 11 unchanged lines hidden (view full) --- 518 519 /* Move dirty while page refs frozen and newpage not yet exposed */ 520 dirty = PageDirty(page); 521 if (dirty) { 522 ClearPageDirty(page); 523 SetPageDirty(newpage); 524 } 525 |
525 radix_tree_replace_slot(&mapping->page_tree, pslot, newpage); | 526 radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); |
526 527 /* 528 * Drop cache reference from old page by unfreezing 529 * to one less reference. 530 * We know this isn't the last reference. 531 */ 532 page_ref_unfreeze(page, expected_count - 1); 533 | 527 528 /* 529 * Drop cache reference from old page by unfreezing 530 * to one less reference. 531 * We know this isn't the last reference. 532 */ 533 page_ref_unfreeze(page, expected_count - 1); 534 |
534 spin_unlock(&mapping->tree_lock); | 535 xa_unlock(&mapping->i_pages); |
535 /* Leave irq disabled to prevent preemption while updating stats */ 536 537 /* 538 * If moved to a different zone then also account 539 * the page for that zone. Other VM counters will be 540 * taken care of when we establish references to the 541 * new page and drop references to the old page. 542 * --- 26 unchanged lines hidden (view full) --- 569 * of migrate_page_move_mapping(). 570 */ 571int migrate_huge_page_move_mapping(struct address_space *mapping, 572 struct page *newpage, struct page *page) 573{ 574 int expected_count; 575 void **pslot; 576 | 536 /* Leave irq disabled to prevent preemption while updating stats */ 537 538 /* 539 * If moved to a different zone then also account 540 * the page for that zone. Other VM counters will be 541 * taken care of when we establish references to the 542 * new page and drop references to the old page. 543 * --- 26 unchanged lines hidden (view full) --- 570 * of migrate_page_move_mapping(). 571 */ 572int migrate_huge_page_move_mapping(struct address_space *mapping, 573 struct page *newpage, struct page *page) 574{ 575 int expected_count; 576 void **pslot; 577 |
577 spin_lock_irq(&mapping->tree_lock); | 578 xa_lock_irq(&mapping->i_pages); |
578 | 579 |
579 pslot = radix_tree_lookup_slot(&mapping->page_tree, 580 page_index(page)); | 580 pslot = radix_tree_lookup_slot(&mapping->i_pages, page_index(page)); |
581 582 expected_count = 2 + page_has_private(page); 583 if (page_count(page) != expected_count || | 581 582 expected_count = 2 + page_has_private(page); 583 if (page_count(page) != expected_count || |
584 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 585 spin_unlock_irq(&mapping->tree_lock); | 584 radix_tree_deref_slot_protected(pslot, &mapping->i_pages.xa_lock) != page) { 585 xa_unlock_irq(&mapping->i_pages); |
586 return -EAGAIN; 587 } 588 589 if (!page_ref_freeze(page, expected_count)) { | 586 return -EAGAIN; 587 } 588 589 if (!page_ref_freeze(page, expected_count)) { |
590 spin_unlock_irq(&mapping->tree_lock); | 590 xa_unlock_irq(&mapping->i_pages); |
591 return -EAGAIN; 592 } 593 594 newpage->index = page->index; 595 newpage->mapping = page->mapping; 596 597 get_page(newpage); 598 | 591 return -EAGAIN; 592 } 593 594 newpage->index = page->index; 595 newpage->mapping = page->mapping; 596 597 get_page(newpage); 598 |
599 radix_tree_replace_slot(&mapping->page_tree, pslot, newpage); | 599 radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); |
600 601 page_ref_unfreeze(page, expected_count - 1); 602 | 600 601 page_ref_unfreeze(page, expected_count - 1); 602 |
603 spin_unlock_irq(&mapping->tree_lock); | 603 xa_unlock_irq(&mapping->i_pages); |
604 605 return MIGRATEPAGE_SUCCESS; 606} 607 608/* 609 * Gigantic pages are so large that we do not guarantee that page++ pointer 610 * arithmetic will work across the entire page. We need something more 611 * specialized. --- 2383 unchanged lines hidden --- | 604 605 return MIGRATEPAGE_SUCCESS; 606} 607 608/* 609 * Gigantic pages are so large that we do not guarantee that page++ pointer 610 * arithmetic will work across the entire page. We need something more 611 * specialized. --- 2383 unchanged lines hidden --- |