/linux/mm/ |
H A D | folio-compat.c | 15 return folio_unlock(page_folio(page)); in unlock_page() 21 return folio_end_writeback(page_folio(page)); in end_page_writeback() 27 return folio_wait_writeback(page_folio(page)); in wait_on_page_writeback() 33 return folio_wait_stable(page_folio(page)); in wait_for_stable_page() 39 folio_mark_accessed(page_folio(page)); in mark_page_accessed() 45 folio_start_writeback(page_folio(page)); in set_page_writeback() 51 return folio_mark_dirty(page_folio(page)); in set_page_dirty() 57 return folio_mark_dirty_lock(page_folio(page)); in clear_page_dirty_for_io() 63 return folio_clear_dirty_for_io(page_folio(page)); in redirty_page_for_writepage() 70 return folio_redirty_for_writepage(wbc, page_folio(pag in add_to_page_cache_lru() [all...] |
H A D | memory-failure.c | 179 ret = dissolve_free_hugetlb_folio(page_folio(page)); in __page_handle_poison() 230 struct folio *folio = page_folio(p); in hwpoison_filter_dev() 398 shake_folio(page_folio(page)); in shake_page() 464 tk->size_shift = folio_shift(page_folio(p)); in __add_to_kill() 1024 count -= folio_nr_pages(page_folio(p)); in has_extra_refcount() 1062 struct folio *folio = page_folio(p); in me_pagecache_clean() 1120 struct folio *folio = page_folio(p); in me_pagecache_dirty() 1158 struct folio *folio = page_folio(p); in me_swapcache_dirty() 1180 struct folio *folio = page_folio(p); in me_swapcache_clean() 1202 struct folio *folio = page_folio( in me_huge_page() [all...] |
H A D | migrate_device.c | 180 folio = page_folio(page); in migrate_vma_collect_pmd() 327 struct folio *folio = page_folio(page); in migrate_vma_check_page() 382 folio = page_folio(page); in migrate_device_unmap() 426 folio = page_folio(page); in migrate_device_unmap() 572 struct folio *folio = page_folio(page); in migrate_vma_insert_page() 730 newfolio = page_folio(newpage); in __migrate_device_pages() 731 folio = page_folio(page); in __migrate_device_pages() 823 dst = page_folio(newpage); in migrate_device_finalize() 833 src = page_folio(page); in migrate_device_finalize()
|
H A D | mmu_gather.c | 66 folio_remove_rmap_ptes(page_folio(page), page, nr_pages, in tlb_flush_rmap_batch() 176 VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1)); in __tlb_remove_folio_pages_size()
|
H A D | page_idle.c | 42 folio = page_folio(page); in page_idle_get_folio() 45 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in page_idle_get_folio()
|
H A D | gup.c | 60 folio = page_folio(page); in sanity_check_pinned_pages() 83 folio = page_folio(page); in try_get_folio() 98 if (unlikely(page_folio(page) != folio)) { in try_get_folio() 194 gup_put_folio(page_folio(page), 1, FOLL_PIN); in unpin_user_page() 242 struct folio *folio = page_folio(next); in gup_folio_range_next() 256 struct folio *folio = page_folio(list[i]); in gup_folio_next() 260 if (page_folio(list[nr]) != folio) in gup_folio_next() 555 return page_folio(page); in try_grab_folio_fast() 671 ret = try_grab_folio(page_folio(page), 1, flags); in follow_huge_pud() 748 ret = try_grab_folio(page_folio(page), 1, flags); in follow_huge_pmd() [all …]
|
H A D | hwpoison-inject.c | 28 folio = page_folio(p); in hwpoison_inject()
|
/linux/arch/csky/abiv1/ |
H A D | cacheflush.c | 40 flush_dcache_folio(page_folio(page)); in flush_dcache_page() 58 folio = page_folio(pfn_to_page(pfn)); in update_mmu_cache_range()
|
/linux/arch/mips/mm/ |
H A D | cache.c | 104 struct folio *folio = page_folio(page); in __flush_dcache_pages() 130 struct folio *folio = page_folio(page); in __flush_anon_page() 157 folio = page_folio(pfn_to_page(pfn)); in __update_cache()
|
H A D | init.c | 93 BUG_ON(folio_test_dcache_dirty(page_folio(page))); in __kmap_pgprot() 174 struct folio *src = page_folio(from); in copy_user_highpage() 200 struct folio *folio = page_folio(page); in copy_to_user_page() 220 struct folio *folio = page_folio(page); in copy_from_user_page()
|
/linux/arch/arm64/mm/ |
H A D | flush.c | 54 struct folio *folio = page_folio(pte_page(pte)); in __sync_icache_dcache() 79 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
|
/linux/mm/damon/ |
H A D | ops-common.c | 30 folio = page_folio(page); in damon_get_folio() 33 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in damon_get_folio()
|
/linux/arch/s390/include/asm/ |
H A D | tlb.h | 66 VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1)); in __tlb_remove_folio_pages()
|
/linux/include/linux/ |
H A D | page-flags.h | 264 #define page_folio(p) (_Generic((p), \ macro 694 unsigned long flags = (unsigned long)page_folio(page)->mapping; in PageAnonNotKsm() 701 return folio_test_anon(page_folio(page)); in PageAnon() 782 return folio_test_uptodate(page_folio(page)); in PageUptodate() 1070 return folio_test_slab(page_folio(page)); in PAGE_TYPE_OPS() 1098 return folio_test_hugetlb(page_folio(page)); in FOLIO_TYPE_OPS() 1112 folio = page_folio(page); in is_page_hwpoison()
|
/linux/fs/btrfs/tests/ |
H A D | extent-io-tests.c | 183 found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, in test_find_delalloc() 214 found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, in test_find_delalloc() 248 found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, in test_find_delalloc() 269 found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, in test_find_delalloc() 310 found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, in test_find_delalloc()
|
/linux/arch/openrisc/include/asm/ |
H A D | cacheflush.h | 68 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
|
/linux/arch/openrisc/mm/ |
H A D | cache.c | 46 struct folio *folio = page_folio(pfn_to_page(pfn)); in update_cache()
|
/linux/arch/arm/mm/ |
H A D | copypage-xscale.c | 87 struct folio *src = page_folio(from); in xscale_mc_copy_user_highpage()
|
H A D | copypage-v4mc.c | 67 struct folio *src = page_folio(from); in v4_mc_copy_user_highpage()
|
H A D | flush.c | 298 folio = page_folio(pfn_to_page(pfn)); in __sync_icache_dcache() 369 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
|
/linux/arch/sh/mm/ |
H A D | kmap.c | 30 struct folio *folio = page_folio(page); in kmap_coherent()
|
/linux/arch/riscv/include/asm/ |
H A D | cacheflush.h | 34 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
|
/linux/arch/nios2/mm/ |
H A D | cacheflush.c | 205 flush_dcache_folio(page_folio(page)); in flush_dcache_page() 229 folio = page_folio(pfn_to_page(pfn)); in update_mmu_cache_range()
|
/linux/arch/csky/abiv2/inc/abi/ |
H A D | cacheflush.h | 31 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
|
/linux/arch/arc/mm/ |
H A D | cache.c | 714 return flush_dcache_folio(page_folio(page)); in flush_dcache_page() 885 struct folio *src = page_folio(from); in copy_user_highpage() 886 struct folio *dst = page_folio(to); in copy_user_highpage() 901 struct folio *folio = page_folio(page); in clear_user_page()
|