Home
last modified time | relevance | path

Searched refs:page_folio (Results 1 – 25 of 134) sorted by relevance

123456

/linux/mm/
H A Dfolio-compat.c15 return folio_mapping(page_folio(page)); in page_mapping()
21 return folio_unlock(page_folio(page)); in unlock_page()
27 return folio_end_writeback(page_folio(page)); in end_page_writeback()
33 return folio_wait_writeback(page_folio(page)); in wait_on_page_writeback()
39 return folio_wait_stable(page_folio(page)); in wait_for_stable_page()
45 folio_mark_accessed(page_folio(page)); in mark_page_accessed()
51 folio_start_writeback(page_folio(page)); in set_page_writeback()
57 return folio_mark_dirty(page_folio(page)); in set_page_dirty()
63 return folio_clear_dirty_for_io(page_folio(page)); in __set_page_dirty_nobuffers()
70 return folio_redirty_for_writepage(wbc, page_folio(pag in clear_page_dirty_for_io()
[all...]
H A Dmigrate_device.c180 folio = page_folio(page); in migrate_vma_collect_pmd()
327 struct folio *folio = page_folio(page); in migrate_vma_check_page()
400 folio = page_folio(page); in migrate_device_unmap()
426 folio = page_folio(page); in migrate_device_unmap()
572 struct folio *folio = page_folio(page); in migrate_vma_insert_page()
730 folio = page_folio(page); in __migrate_device_pages()
757 r = migrate_folio_extra(mapping, page_folio(newpage), in __migrate_device_pages()
760 r = migrate_folio(mapping, page_folio(newpage), in __migrate_device_pages()
836 src = page_folio(page); in migrate_device_finalize()
837 dst = page_folio(newpag in migrate_device_finalize()
[all...]
H A Dmemory-failure.c168 ret = dissolve_free_hugetlb_folio(page_folio(page)); in __page_handle_poison()
219 struct folio *folio = page_folio(p); in hwpoison_filter_dev()
390 shake_folio(page_folio(page)); in dev_pagemap_mapping_shift()
999 count -= folio_nr_pages(page_folio(p)); in has_extra_refcount()
1036 struct folio *folio = page_folio(p); in me_pagecache_clean()
1094 struct folio *folio = page_folio(p); in me_pagecache_dirty()
1161 struct folio *folio = page_folio(p); in me_swapcache_dirty()
1183 struct folio *folio = page_folio(p); in me_swapcache_clean()
1205 struct folio *folio = page_folio(p); in me_huge_page()
1398 struct folio *folio = page_folio(pag in HWPoisonHandlable()
[all...]
H A Dmmu_gather.c66 folio_remove_rmap_ptes(page_folio(page), page, nr_pages, in tlb_flush_rmap_batch()
176 VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1)); in __tlb_remove_folio_pages_size()
H A Dpage_idle.c42 folio = page_folio(page); in page_idle_get_folio()
45 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in page_idle_get_folio()
H A Dgup.c53 struct folio *folio = page_folio(page); in sanity_check_pinned_pages()
76 folio = page_folio(page); in try_get_folio()
84 * could be that between calling page_folio() and the refcount in try_get_folio()
91 if (unlikely(page_folio(page) != folio)) { in try_get_folio()
146 return page_folio(page); in try_grab_folio()
227 struct folio *folio = page_folio(page); in try_grab_page()
275 gup_put_folio(page_folio(page), 1, FOLL_PIN); in unpin_user_page()
310 struct folio *folio = page_folio(next); in gup_folio_range_next()
324 struct folio *folio = page_folio(list[i]); in gup_folio_next()
328 if (page_folio(lis in gup_folio_next()
[all...]
H A Dhuge_memory.c922 struct folio *folio = page_folio(page); in __do_huge_pmd_anonymous_page()
1414 src_folio = page_folio(src_page); in follow_devmap_pud()
1550 folio = page_folio(page); in do_huge_pmd_wp_page()
1848 folio = page_folio(page); in madvise_free_huge_pmd()
2145 src_folio = page_folio(src_page); in change_huge_pmd()
2403 folio = page_folio(page); in __split_huge_zero_page_pmd()
2468 folio = page_folio(page); in __split_huge_pmd_locked()
2827 struct folio *folio = page_folio(page); in __split_huge_page_tail()
2855 struct folio *tail = page_folio(head + i); in __split_huge_page_tail()
2916 struct folio *new_folio = page_folio(subpag in __split_huge_page()
[all...]
/linux/include/linux/
H A Dpage_idle.h
H A Dpage-flags.h267 #define page_folio(p) (_Generic((p), \ macro
587 return folio_test_swapcache(page_folio(page)); in PageSwapCache()
708 return folio_test_anon(page_folio(page)); in PageAnon()
738 return folio_test_ksm(page_folio(page)); in PageKsm()
794 return folio_test_uptodate(page_folio(page)); in PageUptodate()
1064 return folio_test_slab(page_folio(page)); in PAGE_TYPE_OPS()
1083 return folio_test_hugetlb(page_folio(page)); in FOLIO_TYPE_OPS()
1097 folio = page_folio(page); in is_page_hwpoison()
H A Dpagemap.h475 return folio_file_mapping(page_folio(page));
550 folio_attach_private(page_folio(page), data); in detach_page_private()
555 return folio_detach_private(page_folio(page));
1031 return folio_trylock(page_folio(page));
1079 folio = page_folio(page);
1147 folio_wait_locked(page_folio(page));
1555 return i_blocks_per_folio(inode, page_folio(page));
H A Drmap.h213 VM_WARN_ON_FOLIO(page_folio(page) != folio, folio); in __folio_rmap_sanity_checks()
214 VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio); in __folio_rmap_sanity_checks()
792 return folio_mkclean(page_folio(page));
/linux/arch/sh/mm/
H A Dcache.c64 struct folio *folio = page_folio(page); in copy_to_user_page()
85 struct folio *folio = page_folio(page); in copy_from_user_page()
102 struct folio *src = page_folio(from); in copy_user_highpage()
150 struct folio *folio = page_folio(pfn_to_page(pfn)); in __update_cache()
160 struct folio *folio = page_folio(page); in __flush_anon_page()
247 cacheop_on_each_cpu(local_flush_icache_folio, page_folio(page), 1); in flush_icache_pages()
H A Dkmap.c30 struct folio *folio = page_folio(page); in kmap_coherent()
/linux/arch/csky/abiv1/
H A Dcacheflush.c40 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
58 folio = page_folio(pfn_to_page(pfn)); in update_mmu_cache_range()
/linux/arch/arm64/mm/
H A Dflush.c54 struct folio *folio = page_folio(pte_page(pte)); in __sync_icache_dcache()
79 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
/linux/arch/mips/mm/
H A Dcache.c104 struct folio *folio = page_folio(page); in __flush_dcache_pages()
130 struct folio *folio = page_folio(page); in __flush_anon_page()
157 folio = page_folio(pfn_to_page(pfn)); in __update_cache()
H A Dinit.c93 BUG_ON(folio_test_dcache_dirty(page_folio(page))); in __kmap_pgprot()
174 struct folio *src = page_folio(from); in copy_user_highpage()
200 struct folio *folio = page_folio(page); in copy_to_user_page()
220 struct folio *folio = page_folio(page); in copy_from_user_page()
/linux/mm/damon/
H A Dops-common.c30 folio = page_folio(page); in damon_get_folio()
33 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in damon_get_folio()
/linux/arch/s390/include/asm/
H A Dtlb.h66 VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1)); in __tlb_remove_folio_pages()
/linux/arch/openrisc/include/asm/
H A Dcacheflush.h68 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
/linux/arch/openrisc/mm/
H A Dcache.c46 struct folio *folio = page_folio(pfn_to_page(pfn)); in update_cache()
/linux/arch/arm/mm/
H A Dcopypage-xscale.c87 struct folio *src = page_folio(from); in xscale_mc_copy_user_highpage()
H A Dcopypage-v4mc.c67 struct folio *src = page_folio(from); in v4_mc_copy_user_highpage()
/linux/arch/riscv/include/asm/
H A Dcacheflush.h34 flush_dcache_folio(page_folio(page)); in flush_dcache_page()
/linux/arch/csky/abiv2/inc/abi/
H A Dcacheflush.h31 flush_dcache_folio(page_folio(page)); in flush_dcache_page()

123456