rmap.c (fbc90c042cd1dc7258ebfebe6d226017e5b5ac8c) rmap.c (9651fcedf7b92d3f7f1ab179e8ab55b85ee10fc1)
1/*
2 * mm/rmap.c - physical to virtual reverse mappings
3 *
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
6 *
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.

--- 1255 unchanged lines hidden (view full) ---

1264 * over the call to folio_add_new_anon_rmap.
1265 */
1266 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
1267 folio);
1268 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
1269 page);
1270}
1271
1/*
2 * mm/rmap.c - physical to virtual reverse mappings
3 *
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
6 *
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.

--- 1255 unchanged lines hidden (view full) ---

1264 * over the call to folio_add_new_anon_rmap.
1265 */
1266 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
1267 folio);
1268 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
1269 page);
1270}
1271
1272static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped)
1273{
1274 int idx;
1275
1276 if (nr) {
1277 idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
1278 __lruvec_stat_mod_folio(folio, idx, nr);
1279 }
1280 if (nr_pmdmapped) {
1281 if (folio_test_anon(folio)) {
1282 idx = NR_ANON_THPS;
1283 __lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
1284 } else {
1285 /* NR_*_PMDMAPPED are not maintained per-memcg */
1286 idx = folio_test_swapbacked(folio) ?
1287 NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED;
1288 __mod_node_page_state(folio_pgdat(folio), idx,
1289 nr_pmdmapped);
1290 }
1291 }
1292}
1293
1294static __always_inline void __folio_add_anon_rmap(struct folio *folio,
1295 struct page *page, int nr_pages, struct vm_area_struct *vma,
1296 unsigned long address, rmap_t flags, enum rmap_level level)
1297{
1298 int i, nr, nr_pmdmapped = 0;
1299
1272static __always_inline void __folio_add_anon_rmap(struct folio *folio,
1273 struct page *page, int nr_pages, struct vm_area_struct *vma,
1274 unsigned long address, rmap_t flags, enum rmap_level level)
1275{
1276 int i, nr, nr_pmdmapped = 0;
1277
1300 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
1301
1302 nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
1278 nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
1279 if (nr_pmdmapped)
1280 __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped);
1281 if (nr)
1282 __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
1303
1283
1304 if (likely(!folio_test_ksm(folio)))
1284 if (unlikely(!folio_test_anon(folio))) {
1285 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
1286 /*
1287 * For a PTE-mapped large folio, we only know that the single
1288 * PTE is exclusive. Further, __folio_set_anon() might not get
1289 * folio->index right when not given the address of the head
1290 * page.
1291 */
1292 VM_WARN_ON_FOLIO(folio_test_large(folio) &&
1293 level != RMAP_LEVEL_PMD, folio);
1294 __folio_set_anon(folio, vma, address,
1295 !!(flags & RMAP_EXCLUSIVE));
1296 } else if (likely(!folio_test_ksm(folio))) {
1305 __page_check_anon_rmap(folio, page, vma, address);
1297 __page_check_anon_rmap(folio, page, vma, address);
1298 }
1306
1299
1307 __folio_mod_stat(folio, nr, nr_pmdmapped);
1308
1309 if (flags & RMAP_EXCLUSIVE) {
1310 switch (level) {
1311 case RMAP_LEVEL_PTE:
1312 for (i = 0; i < nr_pages; i++)
1313 SetPageAnonExclusive(page + i);
1314 break;
1315 case RMAP_LEVEL_PMD:
1316 SetPageAnonExclusive(page);

--- 68 unchanged lines hidden (view full) ---

1385#endif
1386}
1387
1388/**
1389 * folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
1390 * @folio: The folio to add the mapping to.
1391 * @vma: the vm area in which the mapping is added
1392 * @address: the user virtual address mapped
1300 if (flags & RMAP_EXCLUSIVE) {
1301 switch (level) {
1302 case RMAP_LEVEL_PTE:
1303 for (i = 0; i < nr_pages; i++)
1304 SetPageAnonExclusive(page + i);
1305 break;
1306 case RMAP_LEVEL_PMD:
1307 SetPageAnonExclusive(page);

--- 68 unchanged lines hidden (view full) ---

1376#endif
1377}
1378
1379/**
1380 * folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
1381 * @folio: The folio to add the mapping to.
1382 * @vma: the vm area in which the mapping is added
1383 * @address: the user virtual address mapped
1393 * @flags: The rmap flags
1394 *
1395 * Like folio_add_anon_rmap_*() but must only be called on *new* folios.
1396 * This means the inc-and-test can be bypassed.
1384 *
1385 * Like folio_add_anon_rmap_*() but must only be called on *new* folios.
1386 * This means the inc-and-test can be bypassed.
1397 * The folio doesn't necessarily need to be locked while it's exclusive
1398 * unless two threads map it concurrently. However, the folio must be
1399 * locked if it's shared.
1387 * The folio does not have to be locked.
1400 *
1388 *
1401 * If the folio is pmd-mappable, it is accounted as a THP.
1389 * If the folio is pmd-mappable, it is accounted as a THP. As the folio
1390 * is new, it's assumed to be mapped exclusively by a single process.
1402 */
1403void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
1391 */
1392void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
1404 unsigned long address, rmap_t flags)
1393 unsigned long address)
1405{
1394{
1406 const int nr = folio_nr_pages(folio);
1407 const bool exclusive = flags & RMAP_EXCLUSIVE;
1408 int nr_pmdmapped = 0;
1395 int nr = folio_nr_pages(folio);
1409
1410 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
1396
1397 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
1411 VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio);
1412 VM_BUG_ON_VMA(address < vma->vm_start ||
1413 address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
1398 VM_BUG_ON_VMA(address < vma->vm_start ||
1399 address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
1414
1415 if (!folio_test_swapbacked(folio))
1400 /*
1401 * VM_DROPPABLE mappings don't swap; instead they're just dropped when
1402 * under memory pressure.
1403 */
1404 if (!(vma->vm_flags & VM_DROPPABLE))
1416 __folio_set_swapbacked(folio);
1405 __folio_set_swapbacked(folio);
1417 __folio_set_anon(folio, vma, address, exclusive);
1406 __folio_set_anon(folio, vma, address, true);
1418
1419 if (likely(!folio_test_large(folio))) {
1420 /* increment count (starts at -1) */
1421 atomic_set(&folio->_mapcount, 0);
1407
1408 if (likely(!folio_test_large(folio))) {
1409 /* increment count (starts at -1) */
1410 atomic_set(&folio->_mapcount, 0);
1422 if (exclusive)
1423 SetPageAnonExclusive(&folio->page);
1411 SetPageAnonExclusive(&folio->page);
1424 } else if (!folio_test_pmd_mappable(folio)) {
1425 int i;
1426
1427 for (i = 0; i < nr; i++) {
1428 struct page *page = folio_page(folio, i);
1429
1430 /* increment count (starts at -1) */
1431 atomic_set(&page->_mapcount, 0);
1412 } else if (!folio_test_pmd_mappable(folio)) {
1413 int i;
1414
1415 for (i = 0; i < nr; i++) {
1416 struct page *page = folio_page(folio, i);
1417
1418 /* increment count (starts at -1) */
1419 atomic_set(&page->_mapcount, 0);
1432 if (exclusive)
1433 SetPageAnonExclusive(page);
1420 SetPageAnonExclusive(page);
1434 }
1435
1436 /* increment count (starts at -1) */
1437 atomic_set(&folio->_large_mapcount, nr - 1);
1438 atomic_set(&folio->_nr_pages_mapped, nr);
1439 } else {
1440 /* increment count (starts at -1) */
1441 atomic_set(&folio->_entire_mapcount, 0);
1442 /* increment count (starts at -1) */
1443 atomic_set(&folio->_large_mapcount, 0);
1444 atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED);
1421 }
1422
1423 /* increment count (starts at -1) */
1424 atomic_set(&folio->_large_mapcount, nr - 1);
1425 atomic_set(&folio->_nr_pages_mapped, nr);
1426 } else {
1427 /* increment count (starts at -1) */
1428 atomic_set(&folio->_entire_mapcount, 0);
1429 /* increment count (starts at -1) */
1430 atomic_set(&folio->_large_mapcount, 0);
1431 atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED);
1445 if (exclusive)
1446 SetPageAnonExclusive(&folio->page);
1447 nr_pmdmapped = nr;
1432 SetPageAnonExclusive(&folio->page);
1433 __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr);
1448 }
1449
1434 }
1435
1450 __folio_mod_stat(folio, nr, nr_pmdmapped);
1436 __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
1451}
1452
1453static __always_inline void __folio_add_file_rmap(struct folio *folio,
1454 struct page *page, int nr_pages, struct vm_area_struct *vma,
1455 enum rmap_level level)
1456{
1437}
1438
1439static __always_inline void __folio_add_file_rmap(struct folio *folio,
1440 struct page *page, int nr_pages, struct vm_area_struct *vma,
1441 enum rmap_level level)
1442{
1443 pg_data_t *pgdat = folio_pgdat(folio);
1457 int nr, nr_pmdmapped = 0;
1458
1459 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
1460
1461 nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
1444 int nr, nr_pmdmapped = 0;
1445
1446 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
1447
1448 nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
1462 __folio_mod_stat(folio, nr, nr_pmdmapped);
1449 if (nr_pmdmapped)
1450 __mod_node_page_state(pgdat, folio_test_swapbacked(folio) ?
1451 NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
1452 if (nr)
1453 __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr);
1463
1464 /* See comments in folio_add_anon_rmap_*() */
1465 if (!folio_test_large(folio))
1466 mlock_vma_folio(folio, vma);
1467}
1468
1469/**
1470 * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio

--- 32 unchanged lines hidden (view full) ---

1503#endif
1504}
1505
1506static __always_inline void __folio_remove_rmap(struct folio *folio,
1507 struct page *page, int nr_pages, struct vm_area_struct *vma,
1508 enum rmap_level level)
1509{
1510 atomic_t *mapped = &folio->_nr_pages_mapped;
1454
1455 /* See comments in folio_add_anon_rmap_*() */
1456 if (!folio_test_large(folio))
1457 mlock_vma_folio(folio, vma);
1458}
1459
1460/**
1461 * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio

--- 32 unchanged lines hidden (view full) ---

1494#endif
1495}
1496
1497static __always_inline void __folio_remove_rmap(struct folio *folio,
1498 struct page *page, int nr_pages, struct vm_area_struct *vma,
1499 enum rmap_level level)
1500{
1501 atomic_t *mapped = &folio->_nr_pages_mapped;
1502 pg_data_t *pgdat = folio_pgdat(folio);
1511 int last, nr = 0, nr_pmdmapped = 0;
1512 bool partially_mapped = false;
1503 int last, nr = 0, nr_pmdmapped = 0;
1504 bool partially_mapped = false;
1505 enum node_stat_item idx;
1513
1514 __folio_rmap_sanity_checks(folio, page, nr_pages, level);
1515
1516 switch (level) {
1517 case RMAP_LEVEL_PTE:
1518 if (!folio_test_large(folio)) {
1519 nr = atomic_add_negative(-1, &page->_mapcount);
1520 break;

--- 27 unchanged lines hidden (view full) ---

1548 nr = 0;
1549 }
1550 }
1551
1552 partially_mapped = nr < nr_pmdmapped;
1553 break;
1554 }
1555
1506
1507 __folio_rmap_sanity_checks(folio, page, nr_pages, level);
1508
1509 switch (level) {
1510 case RMAP_LEVEL_PTE:
1511 if (!folio_test_large(folio)) {
1512 nr = atomic_add_negative(-1, &page->_mapcount);
1513 break;

--- 27 unchanged lines hidden (view full) ---

1541 nr = 0;
1542 }
1543 }
1544
1545 partially_mapped = nr < nr_pmdmapped;
1546 break;
1547 }
1548
1549 if (nr_pmdmapped) {
1550 /* NR_{FILE/SHMEM}_PMDMAPPED are not maintained per-memcg */
1551 if (folio_test_anon(folio))
1552 __lruvec_stat_mod_folio(folio, NR_ANON_THPS, -nr_pmdmapped);
1553 else
1554 __mod_node_page_state(pgdat,
1555 folio_test_swapbacked(folio) ?
1556 NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED,
1557 -nr_pmdmapped);
1558 }
1556 if (nr) {
1559 if (nr) {
1560 idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
1561 __lruvec_stat_mod_folio(folio, idx, -nr);
1562
1557 /*
1558 * Queue anon large folio for deferred split if at least one
1559 * page of the folio is unmapped and at least one page
1560 * is still mapped.
1561 *
1562 * Check partially_mapped first to ensure it is a large folio.
1563 */
1564 if (folio_test_anon(folio) && partially_mapped &&
1565 list_empty(&folio->_deferred_list))
1566 deferred_split_folio(folio);
1567 }
1563 /*
1564 * Queue anon large folio for deferred split if at least one
1565 * page of the folio is unmapped and at least one page
1566 * is still mapped.
1567 *
1568 * Check partially_mapped first to ensure it is a large folio.
1569 */
1570 if (folio_test_anon(folio) && partially_mapped &&
1571 list_empty(&folio->_deferred_list))
1572 deferred_split_folio(folio);
1573 }
1568 __folio_mod_stat(folio, -nr, -nr_pmdmapped);
1569
1570 /*
1571 * It would be tidy to reset folio_test_anon mapping when fully
1572 * unmapped, but that might overwrite a racing folio_add_anon_rmap_*()
1573 * which increments mapcount after us but sets mapping before us:
1574 * so leave the reset to free_pages_prepare, and remember that
1575 * it's only reliable while mapped.
1576 */

--- 58 unchanged lines hidden (view full) ---

1635 * When racing against e.g. zap_pte_range() on another cpu,
1636 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
1637 * try_to_unmap() may return before page_mapped() has become false,
1638 * if page table locking is skipped: use TTU_SYNC to wait for that.
1639 */
1640 if (flags & TTU_SYNC)
1641 pvmw.flags = PVMW_SYNC;
1642
1574
1575 /*
1576 * It would be tidy to reset folio_test_anon mapping when fully
1577 * unmapped, but that might overwrite a racing folio_add_anon_rmap_*()
1578 * which increments mapcount after us but sets mapping before us:
1579 * so leave the reset to free_pages_prepare, and remember that
1580 * it's only reliable while mapped.
1581 */

--- 58 unchanged lines hidden (view full) ---

1640 * When racing against e.g. zap_pte_range() on another cpu,
1641 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
1642 * try_to_unmap() may return before page_mapped() has become false,
1643 * if page table locking is skipped: use TTU_SYNC to wait for that.
1644 */
1645 if (flags & TTU_SYNC)
1646 pvmw.flags = PVMW_SYNC;
1647
1648 if (flags & TTU_SPLIT_HUGE_PMD)
1649 split_huge_pmd_address(vma, address, false, folio);
1650
1643 /*
1644 * For THP, we have to assume the worse case ie pmd for invalidation.
1645 * For hugetlb, it could be much worse if we need to do pud
1646 * invalidation in the case of pmd sharing.
1647 *
1648 * Note that the folio can not be freed in this function as call of
1649 * try_to_unmap() must hold a reference on the folio.
1650 */

--- 9 unchanged lines hidden (view full) ---

1660 &range.end);
1661
1662 /* We need the huge page size for set_huge_pte_at() */
1663 hsz = huge_page_size(hstate_vma(vma));
1664 }
1665 mmu_notifier_invalidate_range_start(&range);
1666
1667 while (page_vma_mapped_walk(&pvmw)) {
1651 /*
1652 * For THP, we have to assume the worse case ie pmd for invalidation.
1653 * For hugetlb, it could be much worse if we need to do pud
1654 * invalidation in the case of pmd sharing.
1655 *
1656 * Note that the folio can not be freed in this function as call of
1657 * try_to_unmap() must hold a reference on the folio.
1658 */

--- 9 unchanged lines hidden (view full) ---

1668 &range.end);
1669
1670 /* We need the huge page size for set_huge_pte_at() */
1671 hsz = huge_page_size(hstate_vma(vma));
1672 }
1673 mmu_notifier_invalidate_range_start(&range);
1674
1675 while (page_vma_mapped_walk(&pvmw)) {
1676 /* Unexpected PMD-mapped THP? */
1677 VM_BUG_ON_FOLIO(!pvmw.pte, folio);
1678
1668 /*
1669 * If the folio is in an mlock()d vma, we must not swap it out.
1670 */
1671 if (!(flags & TTU_IGNORE_MLOCK) &&
1672 (vma->vm_flags & VM_LOCKED)) {
1673 /* Restore the mlock which got missed */
1674 if (!folio_test_large(folio))
1675 mlock_vma_folio(folio, vma);
1679 /*
1680 * If the folio is in an mlock()d vma, we must not swap it out.
1681 */
1682 if (!(flags & TTU_IGNORE_MLOCK) &&
1683 (vma->vm_flags & VM_LOCKED)) {
1684 /* Restore the mlock which got missed */
1685 if (!folio_test_large(folio))
1686 mlock_vma_folio(folio, vma);
1676 goto walk_abort;
1687 page_vma_mapped_walk_done(&pvmw);
1688 ret = false;
1689 break;
1677 }
1678
1690 }
1691
1679 if (!pvmw.pte) {
1680 if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd,
1681 folio))
1682 goto walk_done;
1683
1684 if (flags & TTU_SPLIT_HUGE_PMD) {
1685 /*
1686 * We temporarily have to drop the PTL and
1687 * restart so we can process the PTE-mapped THP.
1688 */
1689 split_huge_pmd_locked(vma, pvmw.address,
1690 pvmw.pmd, false, folio);
1691 flags &= ~TTU_SPLIT_HUGE_PMD;
1692 page_vma_mapped_walk_restart(&pvmw);
1693 continue;
1694 }
1695 }
1696
1697 /* Unexpected PMD-mapped THP? */
1698 VM_BUG_ON_FOLIO(!pvmw.pte, folio);
1699
1700 pfn = pte_pfn(ptep_get(pvmw.pte));
1701 subpage = folio_page(folio, pfn - folio_pfn(folio));
1702 address = pvmw.address;
1703 anon_exclusive = folio_test_anon(folio) &&
1704 PageAnonExclusive(subpage);
1705
1706 if (folio_test_hugetlb(folio)) {
1707 bool anon = folio_test_anon(folio);

--- 19 unchanged lines hidden (view full) ---

1727 *
1728 * We also must hold hugetlb vma_lock in write mode.
1729 * Lock order dictates acquiring vma_lock BEFORE
1730 * i_mmap_rwsem. We can only try lock here and fail
1731 * if unsuccessful.
1732 */
1733 if (!anon) {
1734 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
1692 pfn = pte_pfn(ptep_get(pvmw.pte));
1693 subpage = folio_page(folio, pfn - folio_pfn(folio));
1694 address = pvmw.address;
1695 anon_exclusive = folio_test_anon(folio) &&
1696 PageAnonExclusive(subpage);
1697
1698 if (folio_test_hugetlb(folio)) {
1699 bool anon = folio_test_anon(folio);

--- 19 unchanged lines hidden (view full) ---

1719 *
1720 * We also must hold hugetlb vma_lock in write mode.
1721 * Lock order dictates acquiring vma_lock BEFORE
1722 * i_mmap_rwsem. We can only try lock here and fail
1723 * if unsuccessful.
1724 */
1725 if (!anon) {
1726 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
1735 if (!hugetlb_vma_trylock_write(vma))
1736 goto walk_abort;
1727 if (!hugetlb_vma_trylock_write(vma)) {
1728 page_vma_mapped_walk_done(&pvmw);
1729 ret = false;
1730 break;
1731 }
1737 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
1738 hugetlb_vma_unlock_write(vma);
1739 flush_tlb_range(vma,
1740 range.start, range.end);
1741 /*
1742 * The ref count of the PMD page was
1743 * dropped which is part of the way map
1744 * counting is done for shared PMDs.
1745 * Return 'true' here. When there is
1746 * no other sharing, huge_pmd_unshare
1747 * returns false and we will unmap the
1748 * actual page and drop map count
1749 * to zero.
1750 */
1732 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
1733 hugetlb_vma_unlock_write(vma);
1734 flush_tlb_range(vma,
1735 range.start, range.end);
1736 /*
1737 * The ref count of the PMD page was
1738 * dropped which is part of the way map
1739 * counting is done for shared PMDs.
1740 * Return 'true' here. When there is
1741 * no other sharing, huge_pmd_unshare
1742 * returns false and we will unmap the
1743 * actual page and drop map count
1744 * to zero.
1745 */
1751 goto walk_done;
1746 page_vma_mapped_walk_done(&pvmw);
1747 break;
1752 }
1753 hugetlb_vma_unlock_write(vma);
1754 }
1755 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
1756 } else {
1757 flush_cache_page(vma, address, pfn);
1758 /* Nuke the page table entry. */
1759 if (should_defer_flush(mm, flags)) {

--- 55 unchanged lines hidden (view full) ---

1815 pte_t swp_pte;
1816 /*
1817 * Store the swap location in the pte.
1818 * See handle_pte_fault() ...
1819 */
1820 if (unlikely(folio_test_swapbacked(folio) !=
1821 folio_test_swapcache(folio))) {
1822 WARN_ON_ONCE(1);
1748 }
1749 hugetlb_vma_unlock_write(vma);
1750 }
1751 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
1752 } else {
1753 flush_cache_page(vma, address, pfn);
1754 /* Nuke the page table entry. */
1755 if (should_defer_flush(mm, flags)) {

--- 55 unchanged lines hidden (view full) ---

1811 pte_t swp_pte;
1812 /*
1813 * Store the swap location in the pte.
1814 * See handle_pte_fault() ...
1815 */
1816 if (unlikely(folio_test_swapbacked(folio) !=
1817 folio_test_swapcache(folio))) {
1818 WARN_ON_ONCE(1);
1823 goto walk_abort;
1819 ret = false;
1820 page_vma_mapped_walk_done(&pvmw);
1821 break;
1824 }
1825
1826 /* MADV_FREE page check */
1827 if (!folio_test_swapbacked(folio)) {
1828 int ref_count, map_count;
1829
1830 /*
1831 * Synchronize with gup_pte_range():

--- 11 unchanged lines hidden (view full) ---

1843 */
1844 smp_rmb();
1845
1846 /*
1847 * The only page refs must be one from isolation
1848 * plus the rmap(s) (dropped by discard:).
1849 */
1850 if (ref_count == 1 + map_count &&
1822 }
1823
1824 /* MADV_FREE page check */
1825 if (!folio_test_swapbacked(folio)) {
1826 int ref_count, map_count;
1827
1828 /*
1829 * Synchronize with gup_pte_range():

--- 11 unchanged lines hidden (view full) ---

1841 */
1842 smp_rmb();
1843
1844 /*
1845 * The only page refs must be one from isolation
1846 * plus the rmap(s) (dropped by discard:).
1847 */
1848 if (ref_count == 1 + map_count &&
1851 !folio_test_dirty(folio)) {
1849 (!folio_test_dirty(folio) ||
1850 /*
1851 * Unlike MADV_FREE mappings, VM_DROPPABLE
1852 * ones can be dropped even if they've
1853 * been dirtied.
1854 */
1855 (vma->vm_flags & VM_DROPPABLE))) {
1852 dec_mm_counter(mm, MM_ANONPAGES);
1853 goto discard;
1854 }
1855
1856 /*
1857 * If the folio was redirtied, it cannot be
1858 * discarded. Remap the page to page table.
1859 */
1860 set_pte_at(mm, address, pvmw.pte, pteval);
1856 dec_mm_counter(mm, MM_ANONPAGES);
1857 goto discard;
1858 }
1859
1860 /*
1861 * If the folio was redirtied, it cannot be
1862 * discarded. Remap the page to page table.
1863 */
1864 set_pte_at(mm, address, pvmw.pte, pteval);
1861 folio_set_swapbacked(folio);
1862 goto walk_abort;
1865 /*
1866 * Unlike MADV_FREE mappings, VM_DROPPABLE ones
1867 * never get swap backed on failure to drop.
1868 */
1869 if (!(vma->vm_flags & VM_DROPPABLE))
1870 folio_set_swapbacked(folio);
1871 ret = false;
1872 page_vma_mapped_walk_done(&pvmw);
1873 break;
1863 }
1864
1865 if (swap_duplicate(entry) < 0) {
1866 set_pte_at(mm, address, pvmw.pte, pteval);
1874 }
1875
1876 if (swap_duplicate(entry) < 0) {
1877 set_pte_at(mm, address, pvmw.pte, pteval);
1867 goto walk_abort;
1878 ret = false;
1879 page_vma_mapped_walk_done(&pvmw);
1880 break;
1868 }
1869 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1870 swap_free(entry);
1871 set_pte_at(mm, address, pvmw.pte, pteval);
1881 }
1882 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1883 swap_free(entry);
1884 set_pte_at(mm, address, pvmw.pte, pteval);
1872 goto walk_abort;
1885 ret = false;
1886 page_vma_mapped_walk_done(&pvmw);
1887 break;
1873 }
1874
1875 /* See folio_try_share_anon_rmap(): clear PTE first. */
1876 if (anon_exclusive &&
1877 folio_try_share_anon_rmap_pte(folio, subpage)) {
1878 swap_free(entry);
1879 set_pte_at(mm, address, pvmw.pte, pteval);
1888 }
1889
1890 /* See folio_try_share_anon_rmap(): clear PTE first. */
1891 if (anon_exclusive &&
1892 folio_try_share_anon_rmap_pte(folio, subpage)) {
1893 swap_free(entry);
1894 set_pte_at(mm, address, pvmw.pte, pteval);
1880 goto walk_abort;
1895 ret = false;
1896 page_vma_mapped_walk_done(&pvmw);
1897 break;
1881 }
1882 if (list_empty(&mm->mmlist)) {
1883 spin_lock(&mmlist_lock);
1884 if (list_empty(&mm->mmlist))
1885 list_add(&mm->mmlist, &init_mm.mmlist);
1886 spin_unlock(&mmlist_lock);
1887 }
1888 dec_mm_counter(mm, MM_ANONPAGES);

--- 23 unchanged lines hidden (view full) ---

1912discard:
1913 if (unlikely(folio_test_hugetlb(folio)))
1914 hugetlb_remove_rmap(folio);
1915 else
1916 folio_remove_rmap_pte(folio, subpage, vma);
1917 if (vma->vm_flags & VM_LOCKED)
1918 mlock_drain_local();
1919 folio_put(folio);
1898 }
1899 if (list_empty(&mm->mmlist)) {
1900 spin_lock(&mmlist_lock);
1901 if (list_empty(&mm->mmlist))
1902 list_add(&mm->mmlist, &init_mm.mmlist);
1903 spin_unlock(&mmlist_lock);
1904 }
1905 dec_mm_counter(mm, MM_ANONPAGES);

--- 23 unchanged lines hidden (view full) ---

1929discard:
1930 if (unlikely(folio_test_hugetlb(folio)))
1931 hugetlb_remove_rmap(folio);
1932 else
1933 folio_remove_rmap_pte(folio, subpage, vma);
1934 if (vma->vm_flags & VM_LOCKED)
1935 mlock_drain_local();
1936 folio_put(folio);
1920 continue;
1921walk_abort:
1922 ret = false;
1923walk_done:
1924 page_vma_mapped_walk_done(&pvmw);
1925 break;
1926 }
1927
1928 mmu_notifier_invalidate_range_end(&range);
1929
1930 return ret;
1931}
1932
1933static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)

--- 804 unchanged lines hidden ---
1937 }
1938
1939 mmu_notifier_invalidate_range_end(&range);
1940
1941 return ret;
1942}
1943
1944static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)

--- 804 unchanged lines hidden ---