| /linux/include/linux/ |
| H A D | pagevec.h | 41 static inline void folio_batch_init(struct folio_batch *fbatch) in folio_batch_init() argument 43 fbatch->nr = 0; in folio_batch_init() 44 fbatch->i = 0; in folio_batch_init() 45 fbatch->percpu_pvec_drained = false; in folio_batch_init() 48 static inline void folio_batch_reinit(struct folio_batch *fbatch) in folio_batch_reinit() argument 50 fbatch->nr = 0; in folio_batch_reinit() 51 fbatch->i = 0; in folio_batch_reinit() 54 static inline unsigned int folio_batch_count(const struct folio_batch *fbatch) in folio_batch_count() argument 56 return fbatch->nr; in folio_batch_count() 59 static inline unsigned int folio_batch_space(const struct folio_batch *fbatch) in folio_batch_space() argument [all …]
|
| H A D | pagemap.h | 991 pgoff_t end, struct folio_batch *fbatch); 993 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); 995 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch); 997 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); 1293 struct folio_batch *fbatch);
|
| H A D | writeback.h | 75 struct folio_batch fbatch; member
|
| /linux/mm/ |
| H A D | swap.c | 158 static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) in folio_batch_move_lru() argument 164 for (i = 0; i < folio_batch_count(fbatch); i++) { in folio_batch_move_lru() 165 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru() 179 folios_put(fbatch); in folio_batch_move_lru() 182 static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch, in __folio_batch_add_and_move() argument 194 if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || in __folio_batch_add_and_move() 196 folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn); in __folio_batch_add_and_move() 323 struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_activate, cpu); in folio_activate_drain() local 325 if (folio_batch_count(fbatch)) in folio_activate_drain() 326 folio_batch_move_lru(fbatch, lru_activate); in folio_activate_drain() [all …]
|
| H A D | truncate.c | 61 struct folio_batch *fbatch, pgoff_t *indices) in truncate_folio_batch_exceptionals() argument 64 int nr = folio_batch_count(fbatch); in truncate_folio_batch_exceptionals() 73 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals() 81 if (xa_is_value(fbatch->folios[i])) { in truncate_folio_batch_exceptionals() 117 folio_batch_remove_exceptionals(fbatch); in truncate_folio_batch_exceptionals() 371 struct folio_batch fbatch; in truncate_inode_pages_range() local 398 folio_batch_init(&fbatch); in truncate_inode_pages_range() 401 &fbatch, indices)) { in truncate_inode_pages_range() 402 truncate_folio_batch_exceptionals(mapping, &fbatch, indices); in truncate_inode_pages_range() 403 for (i = 0; i < folio_batch_count(&fbatch); i++) in truncate_inode_pages_range() [all …]
|
| H A D | mlock.c | 33 struct folio_batch fbatch; member 186 static void mlock_folio_batch(struct folio_batch *fbatch) in mlock_folio_batch() argument 193 for (i = 0; i < folio_batch_count(fbatch); i++) { in mlock_folio_batch() 194 folio = fbatch->folios[i]; in mlock_folio_batch() 197 fbatch->folios[i] = folio; in mlock_folio_batch() 209 folios_put(fbatch); in mlock_folio_batch() 214 struct folio_batch *fbatch; in mlock_drain_local() local 217 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); in mlock_drain_local() 218 if (folio_batch_count(fbatch)) in mlock_drain_local() 219 mlock_folio_batch(fbatch); in mlock_drain_local() [all …]
|
| H A D | filemap.c | 280 struct folio_batch *fbatch) in page_cache_delete_batch() argument 282 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch() 289 if (i >= folio_batch_count(fbatch)) in page_cache_delete_batch() 302 if (folio != fbatch->folios[i]) { in page_cache_delete_batch() 304 fbatch->folios[i]->index, folio); in page_cache_delete_batch() 321 struct folio_batch *fbatch) in delete_from_page_cache_batch() argument 325 if (!folio_batch_count(fbatch)) in delete_from_page_cache_batch() 330 for (i = 0; i < folio_batch_count(fbatch); i++) { in delete_from_page_cache_batch() 331 struct folio *folio = fbatch->folios[i]; in delete_from_page_cache_batch() 336 page_cache_delete_batch(mapping, fbatch); in delete_from_page_cache_batch() [all …]
|
| H A D | shmem.c | 1049 struct folio_batch fbatch; in shmem_unlock_mapping() local 1052 folio_batch_init(&fbatch); in shmem_unlock_mapping() 1057 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) { in shmem_unlock_mapping() 1058 check_move_unevictable_folios(&fbatch); in shmem_unlock_mapping() 1059 folio_batch_release(&fbatch); in shmem_unlock_mapping() 1103 struct folio_batch fbatch; in shmem_undo_range() local 1117 folio_batch_init(&fbatch); in shmem_undo_range() 1120 &fbatch, indices)) { in shmem_undo_range() 1121 for (i = 0; i < folio_batch_count(&fbatch); i++) { in shmem_undo_range() 1122 folio = fbatch in shmem_undo_range() 1411 shmem_find_swap_entries(struct address_space * mapping,pgoff_t start,struct folio_batch * fbatch,pgoff_t * indices,unsigned int type) shmem_find_swap_entries() argument 1453 shmem_unuse_swap_entries(struct inode * inode,struct folio_batch * fbatch,pgoff_t * indices) shmem_unuse_swap_entries() argument 1484 struct folio_batch fbatch; shmem_unuse_inode() local [all...] |
| H A D | page-writeback.c | 2451 folio = folio_batch_next(&wbc->fbatch); in writeback_get_folio() 2453 folio_batch_release(&wbc->fbatch); in writeback_get_folio() 2456 wbc_to_tag(wbc), &wbc->fbatch); in writeback_get_folio() 2457 folio = folio_batch_next(&wbc->fbatch); in writeback_get_folio() 2502 folio_batch_init(&wbc->fbatch); in writeback_iter() 2582 folio_batch_release(&wbc->fbatch); in writeback_iter()
|
| H A D | gup.c | 3455 struct folio_batch fbatch; in memfd_pin_folios() local 3486 folio_batch_init(&fbatch); in memfd_pin_folios() 3497 &fbatch); in memfd_pin_folios() 3504 folio = fbatch.folios[i]; in memfd_pin_folios() 3507 folio_batch_release(&fbatch); in memfd_pin_folios() 3521 folio_batch_release(&fbatch); in memfd_pin_folios()
|
| /linux/fs/nilfs2/ |
| H A D | page.c | 246 struct folio_batch fbatch; in nilfs_copy_dirty_pages() local 251 folio_batch_init(&fbatch); in nilfs_copy_dirty_pages() 254 PAGECACHE_TAG_DIRTY, &fbatch)) in nilfs_copy_dirty_pages() 257 for (i = 0; i < folio_batch_count(&fbatch); i++) { in nilfs_copy_dirty_pages() 258 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_dirty_pages() 282 folio_batch_release(&fbatch); in nilfs_copy_dirty_pages() 301 struct folio_batch fbatch; in nilfs_copy_back_pages() local 305 folio_batch_init(&fbatch); in nilfs_copy_back_pages() 307 n = filemap_get_folios(smap, &start, ~0UL, &fbatch); in nilfs_copy_back_pages() 311 for (i = 0; i < folio_batch_count(&fbatch); i++) { in nilfs_copy_back_pages() [all …]
|
| H A D | segment.c | 704 struct folio_batch fbatch; in nilfs_lookup_dirty_data_buffers() local 718 folio_batch_init(&fbatch); in nilfs_lookup_dirty_data_buffers() 722 PAGECACHE_TAG_DIRTY, &fbatch)) in nilfs_lookup_dirty_data_buffers() 725 for (i = 0; i < folio_batch_count(&fbatch); i++) { in nilfs_lookup_dirty_data_buffers() 727 struct folio *folio = fbatch.folios[i]; in nilfs_lookup_dirty_data_buffers() 749 folio_batch_release(&fbatch); in nilfs_lookup_dirty_data_buffers() 757 folio_batch_release(&fbatch); in nilfs_lookup_dirty_data_buffers() 767 struct folio_batch fbatch; in nilfs_lookup_dirty_node_buffers() local 774 folio_batch_init(&fbatch); in nilfs_lookup_dirty_node_buffers() 777 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) { in nilfs_lookup_dirty_node_buffers() [all …]
|
| /linux/fs/ramfs/ |
| H A D | file-nommu.c | 208 struct folio_batch fbatch; in ramfs_nommu_get_unmapped_area() local 224 folio_batch_init(&fbatch); in ramfs_nommu_get_unmapped_area() 228 ULONG_MAX, &fbatch); in ramfs_nommu_get_unmapped_area() 235 ret = (unsigned long) folio_address(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area() 236 pfn = folio_pfn(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area() 240 if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { in ramfs_nommu_get_unmapped_area() 244 nr_pages += folio_nr_pages(fbatch.folios[loop]); in ramfs_nommu_get_unmapped_area() 250 folio_batch_release(&fbatch); in ramfs_nommu_get_unmapped_area() 256 folio_batch_release(&fbatch); in ramfs_nommu_get_unmapped_area()
|
| /linux/fs/iomap/ |
| H A D | iter.c | 11 if (iter->fbatch) { in iomap_iter_reset_iomap() 12 folio_batch_release(iter->fbatch); in iomap_iter_reset_iomap() 13 kfree(iter->fbatch); in iomap_iter_reset_iomap() 14 iter->fbatch = NULL; in iomap_iter_reset_iomap()
|
| H A D | buffered-io.c | 835 if (iter->fbatch) { in __iomap_get_folio() 836 struct folio *folio = folio_batch_next(iter->fbatch); in __iomap_get_folio() 932 WARN_ON_ONCE(!iter->fbatch); in iomap_write_begin() 1557 iter->fbatch = kmalloc(sizeof(struct folio_batch), GFP_KERNEL); in iomap_fill_dirty_folios() 1558 if (!iter->fbatch) in iomap_fill_dirty_folios() 1560 folio_batch_init(iter->fbatch); in iomap_fill_dirty_folios() 1562 filemap_get_folios_dirty(mapping, &start, end, iter->fbatch); in iomap_fill_dirty_folios() 1593 if (WARN_ON_ONCE(iter.fbatch && in iomap_zero_range() 1597 if (!iter.fbatch && in iomap_zero_range()
|
| /linux/fs/netfs/ |
| H A D | rolling_buffer.c | 199 struct folio_batch fbatch; in rolling_buffer_clear() local 202 folio_batch_init(&fbatch); in rolling_buffer_clear() 213 if (!folio_batch_add(&fbatch, folio)) in rolling_buffer_clear() 214 folio_batch_release(&fbatch); in rolling_buffer_clear() 221 folio_batch_release(&fbatch); in rolling_buffer_clear()
|
| H A D | misc.c | 81 struct folio_batch fbatch; in netfs_free_folioq_buffer() local 83 folio_batch_init(&fbatch); in netfs_free_folioq_buffer() 94 if (folio_batch_add(&fbatch, folio)) in netfs_free_folioq_buffer() 95 folio_batch_release(&fbatch); in netfs_free_folioq_buffer() 103 folio_batch_release(&fbatch); in netfs_free_folioq_buffer()
|
| /linux/fs/ceph/ |
| H A D | addr.c | 609 struct folio_batch fbatch; member 1014 folio_batch_init(&ceph_wbc->fbatch); in ceph_folio_batch_init() 1021 folio_batch_release(&ceph_wbc->fbatch); in ceph_folio_batch_reinit() 1298 folio = ceph_wbc->fbatch.folios[i]; in ceph_process_folio_batch() 1327 ceph_wbc->fbatch.folios[i] = NULL; in ceph_process_folio_batch() 1332 ceph_wbc->fbatch.folios[i] = NULL; in ceph_process_folio_batch() 1339 ceph_wbc->fbatch.folios[i] = NULL; in ceph_process_folio_batch() 1377 ceph_wbc->fbatch.folios[i] = NULL; in ceph_process_folio_batch() 1387 void ceph_shift_unused_folios_left(struct folio_batch *fbatch) in ceph_shift_unused_folios_left() argument 1392 for (j = 0; j < folio_batch_count(fbatch); j++) { in ceph_shift_unused_folios_left() [all …]
|
| /linux/fs/f2fs/ |
| H A D | node.c | 1657 struct folio_batch fbatch; in last_fsync_dnode() local 1661 folio_batch_init(&fbatch); in last_fsync_dnode() 1666 &fbatch))) { in last_fsync_dnode() 1670 struct folio *folio = fbatch.folios[i]; in last_fsync_dnode() 1674 folio_batch_release(&fbatch); in last_fsync_dnode() 1705 folio_batch_release(&fbatch); in last_fsync_dnode() 1854 struct folio_batch fbatch; in f2fs_fsync_node_pages() local 1868 folio_batch_init(&fbatch); in f2fs_fsync_node_pages() 1873 &fbatch))) { in f2fs_fsync_node_pages() 1877 struct folio *folio = fbatch.folios[i]; in f2fs_fsync_node_pages() [all …]
|
| H A D | checkpoint.c | 415 struct folio_batch fbatch; in f2fs_sync_meta_pages() local 421 folio_batch_init(&fbatch); in f2fs_sync_meta_pages() 427 PAGECACHE_TAG_DIRTY, &fbatch))) { in f2fs_sync_meta_pages() 431 struct folio *folio = fbatch.folios[i]; in f2fs_sync_meta_pages() 435 folio_nr_pages(fbatch.folios[i-1])) { in f2fs_sync_meta_pages() 436 folio_batch_release(&fbatch); in f2fs_sync_meta_pages() 467 folio_batch_release(&fbatch); in f2fs_sync_meta_pages()
|
| /linux/fs/btrfs/ |
| H A D | extent_io.c | 272 struct folio_batch fbatch; in __process_folios_contig() local 275 folio_batch_init(&fbatch); in __process_folios_contig() 280 end_index, &fbatch); in __process_folios_contig() 282 struct folio *folio = fbatch.folios[i]; in __process_folios_contig() 287 folio_batch_release(&fbatch); in __process_folios_contig() 311 struct folio_batch fbatch; in lock_delalloc_folios() local 313 folio_batch_init(&fbatch); in lock_delalloc_folios() 318 end_index, &fbatch); in lock_delalloc_folios() 323 struct folio *folio = fbatch.folios[i]; in lock_delalloc_folios() 341 folio_batch_release(&fbatch); in lock_delalloc_folios() 2435 struct folio_batch fbatch; extent_write_cache_pages() local [all...] |
| H A D | compression.c | 293 struct folio_batch fbatch; in end_compressed_writeback() local 301 folio_batch_init(&fbatch); in end_compressed_writeback() 304 &fbatch); in end_compressed_writeback() 310 struct folio *folio = fbatch.folios[i]; in end_compressed_writeback() 315 folio_batch_release(&fbatch); in end_compressed_writeback()
|
| /linux/fs/hugetlbfs/ |
| H A D | inode.c | 583 struct folio_batch fbatch; in remove_inode_hugepages() local 588 folio_batch_init(&fbatch); in remove_inode_hugepages() 590 while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { in remove_inode_hugepages() 591 for (i = 0; i < folio_batch_count(&fbatch); ++i) { in remove_inode_hugepages() 592 struct folio *folio = fbatch.folios[i]; in remove_inode_hugepages() 608 folio_batch_release(&fbatch); in remove_inode_hugepages()
|
| /linux/fs/ext4/ |
| H A D | inode.c | 1717 struct folio_batch fbatch; in mpage_release_unused_pages() local 1740 folio_batch_init(&fbatch); in mpage_release_unused_pages() 1744 nr = filemap_get_folios(mapping, &index, end - 1, &fbatch); in mpage_release_unused_pages() 1748 struct folio *folio = fbatch.folios[i]; in mpage_release_unused_pages() 1765 folio_batch_release(&fbatch); in mpage_release_unused_pages() 2298 struct folio_batch fbatch; in mpage_map_and_submit_buffers() local 2311 folio_batch_init(&fbatch); in mpage_map_and_submit_buffers() 2313 nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch); in mpage_map_and_submit_buffers() 2317 struct folio *folio = fbatch.folios[i]; in mpage_map_and_submit_buffers() 2335 folio_batch_release(&fbatch); in mpage_map_and_submit_buffers() [all …]
|
| /linux/fs/ |
| H A D | buffer.c | 1742 struct folio_batch fbatch; in clean_bdev_aliases() local 1750 folio_batch_init(&fbatch); in clean_bdev_aliases() 1751 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) { in clean_bdev_aliases() 1752 count = folio_batch_count(&fbatch); in clean_bdev_aliases() 1754 struct folio *folio = fbatch.folios[i]; in clean_bdev_aliases() 1783 folio_batch_release(&fbatch); in clean_bdev_aliases()
|