/linux/mm/ |
H A D | memory-failure.c | 179 ret = dissolve_free_hugetlb_folio(page_folio(page)); in __page_handle_poison() 230 struct folio *folio = page_folio(p); in hwpoison_filter_dev() 398 shake_folio(page_folio(page)); in shake_page() 464 tk->size_shift = folio_shift(page_folio(p)); in __add_to_kill() 1024 count -= folio_nr_pages(page_folio(p)); in has_extra_refcount() 1062 struct folio *folio = page_folio(p); in me_pagecache_clean() 1120 struct folio *folio = page_folio(p); in me_pagecache_dirty() 1158 struct folio *folio = page_folio(p); in me_swapcache_dirty() 1180 struct folio *folio = page_folio(p); in me_swapcache_clean() 1202 struct folio *folio = page_folio(p); in me_huge_page() [all …]
|
H A D | migrate_device.c | 180 folio = page_folio(page); in migrate_vma_collect_pmd() 327 struct folio *folio = page_folio(page); in migrate_vma_check_page() 382 folio = page_folio(page); in migrate_device_unmap() 426 folio = page_folio(page); in migrate_device_unmap() 572 struct folio *folio = page_folio(page); in migrate_vma_insert_page() 730 newfolio = page_folio(newpage); in __migrate_device_pages() 731 folio = page_folio(page); in __migrate_device_pages() 823 dst = page_folio(newpage); in migrate_device_finalize() 833 src = page_folio(page); in migrate_device_finalize()
|
H A D | mmu_gather.c | 66 folio_remove_rmap_ptes(page_folio(page), page, nr_pages, in tlb_flush_rmap_batch() 176 VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1)); in __tlb_remove_folio_pages_size()
|
H A D | page_idle.c | 42 folio = page_folio(page); in page_idle_get_folio() 45 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in page_idle_get_folio()
|
H A D | gup.c | 60 folio = page_folio(page); in sanity_check_pinned_pages() 83 folio = page_folio(page); in try_get_folio() 98 if (unlikely(page_folio(page) != folio)) { in try_get_folio() 194 gup_put_folio(page_folio(page), 1, FOLL_PIN); in unpin_user_page() 242 struct folio *folio = page_folio(next); in gup_folio_range_next() 256 struct folio *folio = page_folio(list[i]); in gup_folio_next() 260 if (page_folio(list[nr]) != folio) in gup_folio_next() 555 return page_folio(page); in try_grab_folio_fast() 711 ret = try_grab_folio(page_folio(page), 1, flags); in follow_huge_pud() 768 ret = try_grab_folio(page_folio(page), 1, flags); in follow_huge_pmd() [all …]
|
/linux/arch/s390/kvm/ |
H A D | gmap.c | 60 struct folio *folio = page_folio(page); in __gmap_make_secure() 79 folio = page_folio(page); in __gmap_make_secure() 163 struct folio *folio = page_folio(page); in __gmap_destroy_page()
|
/linux/arch/csky/abiv1/ |
H A D | cacheflush.c | 40 flush_dcache_folio(page_folio(page)); in flush_dcache_page() 58 folio = page_folio(pfn_to_page(pfn)); in update_mmu_cache_range()
|
/linux/arch/arm64/mm/ |
H A D | copypage.c | 21 struct folio *src = page_folio(from); in copy_highpage() 22 struct folio *dst = page_folio(to); in copy_highpage()
|
H A D | flush.c | 54 struct folio *folio = page_folio(pte_page(pte)); in __sync_icache_dcache() 79 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
|
/linux/arch/mips/mm/ |
H A D | cache.c | 104 struct folio *folio = page_folio(page); in __flush_dcache_pages() 130 struct folio *folio = page_folio(page); in __flush_anon_page() 157 folio = page_folio(pfn_to_page(pfn)); in __update_cache()
|
H A D | init.c | 93 BUG_ON(folio_test_dcache_dirty(page_folio(page))); in __kmap_pgprot() 174 struct folio *src = page_folio(from); in copy_user_highpage() 200 struct folio *folio = page_folio(page); in copy_to_user_page() 220 struct folio *folio = page_folio(page); in copy_from_user_page()
|
/linux/mm/damon/ |
H A D | ops-common.c | 30 folio = page_folio(page); in damon_get_folio() 33 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in damon_get_folio()
|
/linux/include/linux/ |
H A D | page-flags.h | 265 #define page_folio(p) (_Generic((p), \ macro 699 unsigned long flags = (unsigned long)page_folio(page)->mapping; in PageAnonNotKsm() 706 return folio_test_anon(page_folio(page)); in PageAnon() 787 return folio_test_uptodate(page_folio(page)); in PageUptodate() 1061 return folio_test_slab(page_folio(page)); in PAGE_TYPE_OPS() 1089 return folio_test_hugetlb(page_folio(page)); in FOLIO_TYPE_OPS() 1103 folio = page_folio(page); in is_page_hwpoison()
|
/linux/fs/btrfs/tests/ |
H A D | extent-io-tests.c | 183 found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, in test_find_delalloc() 214 found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, in test_find_delalloc() 248 found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, in test_find_delalloc() 269 found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, in test_find_delalloc() 310 found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, in test_find_delalloc()
|
/linux/arch/openrisc/include/asm/ |
H A D | cacheflush.h | 68 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
|
/linux/arch/openrisc/mm/ |
H A D | cache.c | 46 struct folio *folio = page_folio(pfn_to_page(pfn)); in update_cache()
|
/linux/arch/s390/mm/ |
H A D | hugetlbpage.c | 148 folio = page_folio(pud_page(__pud(rste))); in clear_huge_pte_skeys() 152 folio = page_folio(pmd_page(__pmd(rste))); in clear_huge_pte_skeys()
|
/linux/arch/arm/mm/ |
H A D | copypage-xscale.c | 87 struct folio *src = page_folio(from); in xscale_mc_copy_user_highpage()
|
H A D | copypage-v4mc.c | 67 struct folio *src = page_folio(from); in v4_mc_copy_user_highpage()
|
H A D | flush.c | 298 folio = page_folio(pfn_to_page(pfn)); in __sync_icache_dcache() 369 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
|
/linux/arch/sh/mm/ |
H A D | kmap.c | 30 struct folio *folio = page_folio(page); in kmap_coherent()
|
/linux/arch/nios2/mm/ |
H A D | cacheflush.c | 205 flush_dcache_folio(page_folio(page)); in flush_dcache_page() 229 folio = page_folio(pfn_to_page(pfn)); in update_mmu_cache_range()
|
/linux/arch/riscv/include/asm/ |
H A D | cacheflush.h | 34 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
|
/linux/arch/arc/mm/ |
H A D | cache.c | 714 return flush_dcache_folio(page_folio(page)); in flush_dcache_page() 885 struct folio *src = page_folio(from); in copy_user_highpage() 886 struct folio *dst = page_folio(to); in copy_user_highpage() 901 struct folio *folio = page_folio(page); in clear_user_page()
|
/linux/arch/csky/abiv2/inc/abi/ |
H A D | cacheflush.h | 31 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
|