/linux/include/trace/events/ |
H A D | fs_dax.h | 12 pgoff_t max_pgoff, int result), 20 __field(pgoff_t, pgoff) 21 __field(pgoff_t, max_pgoff) 57 pgoff_t max_pgoff, int result), \ 112 __field(pgoff_t, pgoff) 150 TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index), 154 __field(pgoff_t, start_index) 155 __field(pgoff_t, end_index) 175 TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index),\ 182 TP_PROTO(struct inode *inode, pgoff_t pgoff, pgoff_t pglen), [all …]
|
H A D | filemap.h | 63 pgoff_t index, 64 pgoff_t last_index 99 pgoff_t index, 100 pgoff_t last_index 108 pgoff_t index, 109 pgoff_t last_index 115 TP_PROTO(struct address_space *mapping, pgoff_t index),
|
/linux/mm/ |
H A D | mapping_dirty_helpers.c | 67 pgoff_t bitmap_pgoff; 69 pgoff_t start; 70 pgoff_t end; 97 pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) + in clean_record_pte() 264 pgoff_t first_index, pgoff_t nr) in wp_shared_mapping_range() 314 pgoff_t first_index, pgoff_t nr, in clean_record_shared_mapping_range() 315 pgoff_t bitmap_pgoff, in clean_record_shared_mapping_range() 317 pgoff_t *star in clean_record_shared_mapping_range() [all...] |
H A D | swap.h | 49 static inline pgoff_t swap_cache_index(swp_entry_t entry) in swap_cache_index() 68 pgoff_t index); 74 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, 77 struct mempolicy *mpol, pgoff_t ilx); 114 pgoff_t offset = swp_offset(entry); in non_swapcache_batch() 144 static inline pgoff_t swap_cache_index(swp_entry_t entry) in swap_cache_index() 154 gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead() 183 pgoff_t index) in filemap_get_incore_folio() 241 static inline pgoff_t folio_index(struct folio *folio) in folio_index()
|
H A D | truncate.c | 61 struct folio_batch *fbatch, pgoff_t *indices) in truncate_folio_batch_exceptionals() 344 pgoff_t start; /* inclusive */ in truncate_inode_pages_range() 345 pgoff_t end; /* exclusive */ in truncate_inode_pages_range() 347 pgoff_t indices[PAGEVEC_SIZE]; in truncate_inode_pages_range() 348 pgoff_t index; in truncate_inode_pages_range() 366 * to the highest possible pgoff_t and since the type is in truncate_inode_pages_range() 510 pgoff_t start, pgoff_t end, unsigned long *nr_failed) in mapping_try_invalidate() 512 pgoff_t indices[PAGEVEC_SIZE]; in mapping_try_invalidate() 514 pgoff_t inde in mapping_try_invalidate() [all...] |
H A D | readahead.c | 315 pgoff_t end_index; /* The last page we want to read */ in do_page_cache_ra() 437 static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index, in ra_alloc_folio() 438 pgoff_t mark, unsigned int order, gfp_t gfp) in ra_alloc_folio() 463 pgoff_t start = readahead_index(ractl); in page_cache_ra_order() 464 pgoff_t index = start; in page_cache_ra_order() 466 pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT; in page_cache_ra_order() 467 pgoff_t mark = index + ra->size - ra->async_size; in page_cache_ra_order() 551 pgoff_t index = readahead_index(ractl); in page_cache_sync_ra() 555 pgoff_t prev_index, miss; in page_cache_sync_ra() 628 pgoff_t inde in page_cache_async_ra() [all...] |
H A D | filemap.c | 491 pgoff_t max = end_byte >> PAGE_SHIFT; in filemap_range_has_page() 520 pgoff_t index = start_byte >> PAGE_SHIFT; in __filemap_fdatawait_range() 521 pgoff_t end = end_byte >> PAGE_SHIFT; in __filemap_fdatawait_range() 648 pgoff_t max = end_byte >> PAGE_SHIFT; in filemap_range_has_writeback() 824 pgoff_t offset = old->index; in replace_page_cache_folio() 858 struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp) in __filemap_add_folio() 959 pgoff_t index, gfp_t gfp) in filemap_add_folio() 1777 pgoff_t page_cache_next_miss(struct address_space *mapping, in page_cache_next_miss() 1778 pgoff_t index, unsigned long max_scan) in page_cache_next_miss() 1814 pgoff_t page_cache_prev_mis in page_cache_prev_miss() [all...] |
H A D | shmem.c | 107 pgoff_t start; /* start of range currently being fallocated */ 108 pgoff_t next; /* the next page offset to be fallocated */ 109 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 110 pgoff_t nr_unswapped; /* how often writeout refused to swap out */ 161 static int shmem_swapin_folio(struct inode *inode, pgoff_t index, 499 pgoff_t index, void *expected, void *replacement) in shmem_replace_entry() 521 static int shmem_confirm_swap(struct address_space *mapping, pgoff_t index, in shmem_confirm_swap() 590 shmem_mapping_size_orders(struct address_space *mapping, pgoff_t index, loff_t write_end) in shmem_mapping_size_orders() 613 unsigned long within_size_orders, pgoff_t index, in shmem_get_orders_within_size() 616 pgoff_t aligned_index; in shmem_get_orders_within_size() [all …]
|
H A D | swap_state.c | 75 pgoff_t idx = swap_cache_index(entry); in get_shadow_from_swap_cache() 92 pgoff_t idx = swap_cache_index(entry); in add_to_swap_cache() 147 pgoff_t idx = swap_cache_index(entry); in __delete_from_swap_cache() 337 pgoff_t index) in filemap_get_incore_folio() 365 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, in __read_swap_cache_async() 487 pgoff_t ilx; in read_swap_cache_async() 586 struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead() 729 struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf) in swap_vma_readahead() 738 pgoff_t ilx; in swap_vma_readahead() 805 pgoff_t ilx; in swapin_readahead()
|
H A D | internal.h | 464 struct file *file, pgoff_t index, unsigned long nr_to_read) in force_page_cache_readahead() 470 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, 471 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); 472 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, 473 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); 480 pgoff_t start, pgoff_t end, unsigned long *nr_failed); 986 pgoff_t pgoff, addr; in folio_within_range() 1075 pgoff_t pgoff, unsigned long nr_pages) in vma_address() 1101 pgoff_t pgoff; in vma_address_end() 1526 pgoff_t pgoff) in vma_set_range()
|
H A D | folio-compat.c | 69 pgoff_t index, gfp_t gfp) in add_to_page_cache_lru() 76 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, in pagecache_get_page()
|
/linux/include/linux/ |
H A D | dax.h | 28 long (*direct_access)(struct dax_device *, pgoff_t, long, 31 int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); 36 size_t (*recovery_write)(struct dax_device *dax_dev, pgoff_t pgoff, 63 size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, 121 pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) in dax_recovery_write() 171 static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages) in dax_layout_busy_page_range() 245 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 247 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 249 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgof [all...] |
H A D | mempolicy.h | 118 pgoff_t start, end; 128 pgoff_t idx); 132 unsigned long addr, pgoff_t *ilx); 134 unsigned long addr, int order, pgoff_t *ilx); 223 mpol_shared_policy_lookup(struct shared_policy *sp, pgoff_t idx) in mpol_shared_policy_lookup() 229 unsigned long addr, int order, pgoff_t *ilx) in get_vma_policy()
|
H A D | memfd.h | 9 struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx); 23 static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx) in memfd_alloc_folio()
|
/linux/virt/kvm/ |
H A D | guest_memfd.c | 23 static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index) in folio_file_pfn() 29 pgoff_t index, struct folio *folio) in __kvm_gmem_prepare_folio() 60 pgoff_t index; in kvm_gmem_prepare_folio() 99 static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index) in kvm_gmem_get_folio() 105 static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start, in kvm_gmem_invalidate_begin() 106 pgoff_t end) in kvm_gmem_invalidate_begin() 114 pgoff_t pgoff = slot->gmem.pgoff; in kvm_gmem_invalidate_begin() 142 static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start, in kvm_gmem_invalidate_end() 143 pgoff_t end) in kvm_gmem_invalidate_end() 157 pgoff_t start = offset >> PAGE_SHIFT; in kvm_gmem_punch_hole() [all …]
|
/linux/drivers/dma-buf/ |
H A D | udmabuf.c | 28 pgoff_t pagecount; 39 pgoff_t nr_pinned; 44 pgoff_t *offsets; 51 pgoff_t pgoff = vmf->pgoff; in udmabuf_vm_fault() 114 pgoff_t pg; in vmap_udmabuf() 200 pgoff_t i; in unpin_all_folios() 208 static __always_inline int init_udmabuf(struct udmabuf *ubuf, pgoff_t pgcnt) in init_udmabuf() 328 pgoff_t nr_pinned = ubuf->nr_pinned; in udmabuf_pin_folios() 329 pgoff_t upgcnt = ubuf->pagecount; in udmabuf_pin_folios() 331 pgoff_t pgoff, pgcnt; in udmabuf_pin_folios() [all …]
|
/linux/include/drm/ttm/ |
H A D | ttm_backup.h | 56 void ttm_backup_drop(struct file *backup, pgoff_t handle); 59 pgoff_t handle, bool intr); 63 bool writeback, pgoff_t idx, gfp_t page_gfp,
|
/linux/fs/erofs/ |
H A D | dir.c | 54 pgoff_t ra_pages = DIV_ROUND_UP_POW2( in erofs_readdir() 56 pgoff_t nr_pages = DIV_ROUND_UP_POW2(dir->i_size, PAGE_SIZE); in erofs_readdir() 73 pgoff_t idx = DIV_ROUND_UP_POW2(ctx->pos, PAGE_SIZE); in erofs_readdir() 74 pgoff_t pages = min(nr_pages - idx, ra_pages); in erofs_readdir()
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_pt_walk.h | 63 typedef int (*xe_pt_entry_fn)(struct xe_ptw *parent, pgoff_t offset, 124 static inline pgoff_t 143 static inline pgoff_t
|
H A D | xe_pt_walk.c | 36 static bool xe_pt_next(pgoff_t *offset, u64 *addr, u64 next, u64 end, in xe_pt_next() 39 pgoff_t step = 1; in xe_pt_next() 76 pgoff_t offset = xe_pt_offset(addr, level, walk); in xe_pt_walk_range()
|
/linux/fs/f2fs/ |
H A D | file.c | 415 struct dnode_of_data *dn, pgoff_t index, int whence) in __found_offset() 453 pgoff_t pgofs, end_offset; in f2fs_seek_block() 475 pgofs = (pgoff_t)(offset >> PAGE_SHIFT); in f2fs_seek_block() 709 pgoff_t fofs; in f2fs_truncate_data_blocks_range() 731 pgoff_t index = from >> PAGE_SHIFT; in truncate_partial_data_page() 767 pgoff_t free_from; in f2fs_do_truncate_blocks() 779 free_from = (pgoff_t)F2FS_BLK_ALIGN(from); in f2fs_do_truncate_blocks() 1196 static int fill_zero(struct inode *inode, pgoff_t index, in fill_zero() 1221 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) in f2fs_truncate_hole() 1227 pgoff_t end_offset, count; in f2fs_truncate_hole() [all …]
|
H A D | node.h | 203 static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start) in current_nat_addr() 206 pgoff_t block_off; in current_nat_addr() 207 pgoff_t block_addr; in current_nat_addr() 216 block_addr = (pgoff_t)(nm_i->nat_blkaddr + in current_nat_addr() 226 static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi, in next_nat_addr() 227 pgoff_t block_addr) in next_nat_addr()
|
H A D | f2fs.h | 736 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ 737 pgoff_t *m_next_extent; /* point to next possible extent */ 878 pgoff_t donate_start, donate_end; /* inclusive */ 1515 pgoff_t cluster_idx; /* cluster index number */ 1545 pgoff_t cluster_idx; /* cluster index number */ 1624 pgoff_t page_eio_ofs[NR_PAGE_TYPE]; /* EIO page offset */ 2037 pgoff_t index) in F2FS_SUPER_BLOCK() 2039 pgoff_t idx_in_folio = index % (1 << folio_order(folio)); in F2FS_SUPER_BLOCK() 2860 pgoff_t index, bool for_write) in f2fs_grab_cache_folio() 2892 struct address_space *mapping, pgoff_t index, in f2fs_filemap_get_folio() [all …]
|
/linux/drivers/md/ |
H A D | dm-linear.c | 162 static struct dax_device *linear_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff) in linear_dax_pgoff() 171 static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, in linear_dax_direct_access() 180 static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, in linear_dax_zero_page_range() 188 static size_t linear_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff, in linear_dax_recovery_write()
|
/linux/fs/ |
H A D | dax.c | 129 pgoff_t entry_start; 567 dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index, in dax_lock_mapping_entry() 610 void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index, in dax_unlock_mapping_entry() 760 pgoff_t start_idx = start >> PAGE_SHIFT; in dax_layout_busy_page_range() 761 pgoff_t end_idx; in dax_layout_busy_page_range() 816 pgoff_t index, bool trunc) in __dax_invalidate_entry() 841 pgoff_t start, pgoff_t end) in __dax_clear_dirty_range() 873 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) in dax_delete_mapping_entry() 892 pgoff_t start_idx = start >> PAGE_SHIFT; in dax_delete_mapping_range() 893 pgoff_t end_idx; in dax_delete_mapping_range() [all …]
|