/linux/arch/arm64/mm/ |
H A D | copypage.c | 35 from != folio_page(src, 0)) in copy_highpage() 48 kfrom = page_address(folio_page(src, i)); in copy_highpage() 49 kto = page_address(folio_page(dst, i)); in copy_highpage()
|
/linux/mm/ |
H A D | util.c | 886 copy_highpage(folio_page(dst, i), folio_page(src, i)); in folio_copy() 900 if (copy_mc_highpage(folio_page(dst, i), folio_page(src, i))) in folio_mc_copy() 1241 flush_dcache_page(folio_page(folio, i)); in flush_dcache_folio()
|
H A D | khugepaged.c | 815 struct page *page = folio_page(folio, i); in __collapse_huge_page_copy() 1595 if (folio_page(folio, i) != page) in collapse_pte_mapped_thp() 1646 if (folio_page(folio, i) != page) in collapse_pte_mapped_thp() 2093 dst = folio_page(new_folio, 0); in collapse_file() 2104 if (copy_mc_highpage(dst, folio_page(folio, i)) > 0) { in collapse_file()
|
H A D | memory.c | 4487 page = folio_page(folio, 0); in do_swap_page() 6915 clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE); in clear_gigantic_page() 6923 clear_user_highpage(folio_page(folio, idx), addr); in clear_subpage() 6953 dst_page = folio_page(dst, i); in copy_user_gigantic_page() 6954 src_page = folio_page(src, i); in copy_user_gigantic_page() 6973 struct page *dst = folio_page(copy_arg->dst, idx); in copy_subpage() 6974 struct page *src = folio_page(copy_arg->src, idx); in copy_subpage() 7008 subpage = folio_page(dst_folio, i); in copy_folio_from_user()
|
H A D | migrate.c | 196 struct page *page = folio_page(folio, idx); in try_to_map_unused_to_zeropage() 256 new = folio_page(folio, idx); in remove_migration_pte()
|
H A D | zswap.c | 1569 struct page *page = folio_page(folio, index); in zswap_store()
|
/linux/fs/freevxfs/ |
H A D | vxfs_immed.c | 38 memcpy_to_page(folio_page(folio, i), 0, src, PAGE_SIZE); in vxfs_immed_read_folio()
|
/linux/arch/openrisc/mm/ |
H A D | cache.c | 58 sync_icache_dcache(folio_page(folio, nr)); in update_cache()
|
/linux/arch/arm/mm/ |
H A D | flush.c | 220 void *addr = kmap_high_get(folio_page(folio, i)); in __flush_dcache_folio() 223 kunmap_high(folio_page(folio, i)); in __flush_dcache_folio()
|
/linux/include/linux/ |
H A D | highmem-internal.h | 78 struct page *page = folio_page(folio, offset / PAGE_SIZE); in kmap_local_folio()
|
H A D | nfs_page.h | 196 return folio_page(folio, pgbase >> PAGE_SHIFT); in nfs_page_to_page()
|
H A D | page-flags.h | 278 #define folio_page(folio, n) nth_page(&(folio)->page, n) macro
|
/linux/fs/btrfs/ |
H A D | tree-checker.c | 68 dump_page(folio_page(eb->folios[0], 0), "eb page dump"); in generic_err() 96 dump_page(folio_page(eb->folios[0], 0), "eb page dump"); in file_extent_err() 157 dump_page(folio_page(eb->folios[0], 0), "eb page dump"); in dir_item_err() 672 dump_page(folio_page(eb->folios[0], 0), "eb page dump"); in block_group_err() 1041 dump_page(folio_page(eb->folios[0], 0), "eb page dump"); in dev_item_err() 1297 dump_page(folio_page(eb->folios[0], 0), "eb page dump"); in extent_err()
|
H A D | extent_io.c | 3004 __free_page(folio_page(eb->folios[i], 0)); in attach_eb_folio_to_filemap() 3019 __free_page(folio_page(eb->folios[i], 0)); in attach_eb_folio_to_filemap() 3159 if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0)) in alloc_extent_buffer()
|
H A D | subpage.c | 754 dump_page(folio_page(folio, 0), "btrfs subpage dump"); in btrfs_subpage_dump_bitmap()
|
/linux/virt/kvm/ |
H A D | guest_memfd.c | 65 clear_highpage(folio_page(folio, i)); in kvm_gmem_prepare_folio() 368 struct page *page = folio_page(folio, 0);
|
/linux/fs/crypto/ |
H A D | crypto.c | 285 struct page *page = folio_page(folio, i >> PAGE_SHIFT); in fscrypt_decrypt_pagecache_blocks()
|
/linux/fs/smb/client/ |
H A D | compress.c | 191 p = kmap_local_page(folio_page(folio, j)); in collect_sample()
|
/linux/lib/ |
H A D | scatterlist.c | 1297 sg_set_page(sg, folio_page(folio, 0), part, offset); in extract_folioq_to_sg() 1358 sg_set_page(sg, folio_page(folio, 0), len, offset); in extract_xarray_to_sg()
|
H A D | alloc_tag.c | 185 if (get_page_tag_ref(folio_page(folio, i), &ref, &handle)) { in pgalloc_tag_split()
|
H A D | iov_iter.c | 1032 *pages = folio_page(folio, offset / PAGE_SIZE); in iter_folioq_get_pages() 1618 p[nr++] = folio_page(folio, offset / PAGE_SIZE); in iov_iter_extract_folioq_pages()
|
/linux/drivers/iommu/iommufd/ |
H A D | pages.c | 650 unsigned long pfn = page_to_pfn(folio_page(folio, offset)); in batch_from_folios() 835 *upages++ = folio_page(folio, offset); in pin_memfd_pages() 843 *upages++ = folio_page(folio, offset++); in pin_memfd_pages()
|
/linux/fs/btrfs/tests/ |
H A D | extent-io-tests.c | 675 struct page *page = folio_page(eb->folios[i >> PAGE_SHIFT], 0); in dump_eb_and_memory_contents()
|
/linux/fs/erofs/ |
H A D | zdata.c | 554 folio_page(folio ?: newfolio, 0); in z_erofs_bind_cache() 1046 .page = folio_page(folio, pgs >> PAGE_SHIFT), in z_erofs_scan_folio()
|
/linux/fs/ceph/ |
H A D | addr.c | 1514 snapc = ceph_find_incompatible(folio_page(*foliop, 0)); in ceph_netfs_check_write_begin() 1936 pages[0] = folio_page(folio, 0); in ceph_uninline_data()
|