Home
last modified time | relevance | path

Searched refs:fbatch (Results 1 – 25 of 28) sorted by relevance

12

/linux/include/linux/
H A Dpagevec.h41 static inline void folio_batch_init(struct folio_batch *fbatch) in folio_batch_init() argument
43 fbatch->nr = 0; in folio_batch_init()
44 fbatch->i = 0; in folio_batch_init()
45 fbatch->percpu_pvec_drained = false; in folio_batch_init()
48 static inline void folio_batch_reinit(struct folio_batch *fbatch) in folio_batch_reinit() argument
50 fbatch->nr = 0; in folio_batch_reinit()
51 fbatch->i = 0; in folio_batch_reinit()
54 static inline unsigned int folio_batch_count(struct folio_batch *fbatch) in folio_batch_count() argument
56 return fbatch->nr; in folio_batch_count()
59 static inline unsigned int folio_batch_space(struct folio_batch *fbatch) in folio_batch_space() argument
[all …]
H A Dwriteback.h86 struct folio_batch fbatch; member
/linux/mm/
H A Dswap.c158 static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) in lru_add()
164 for (i = 0; i < folio_batch_count(fbatch); i++) { in lru_add()
165 struct folio *folio = fbatch->folios[i]; in lru_add()
175 folios_put(fbatch); in lru_add()
178 static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch, in lru_add()
194 if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) || in folio_batch_move_lru()
196 folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn); in folio_batch_move_lru()
318 struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_activate, cpu); in lru_note_cost()
320 if (folio_batch_count(fbatch)) in lru_note_cost()
321 folio_batch_move_lru(fbatch, lru_activat in lru_note_cost()
124 struct folio_batch fbatch; put_pages_list() local
189 folio_batch_move_lru(struct folio_batch * fbatch,move_fn_t move_fn) folio_batch_move_lru() argument
209 __folio_batch_add_and_move(struct folio_batch __percpu * fbatch,struct folio * folio,move_fn_t move_fn,bool on_lru,bool disable_irq) __folio_batch_add_and_move() argument
349 struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_activate, cpu); folio_activate_drain() local
384 struct folio_batch *fbatch; __lru_cache_activate_folio() local
644 struct folio_batch *fbatch = &fbatches->lru_add; lru_add_drain_cpu() local
1011 struct folio_batch fbatch; release_pages() local
1047 __folio_batch_release(struct folio_batch * fbatch) __folio_batch_release() argument
1066 folio_batch_remove_exceptionals(struct folio_batch * fbatch) folio_batch_remove_exceptionals() argument
[all...]
H A Dtruncate.c61 struct folio_batch *fbatch, pgoff_t *indices) in clear_shadow_entries()
64 int nr = folio_batch_count(fbatch); in clear_shadow_entries()
73 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals() argument
81 if (xa_is_value(fbatch->folios[i])) in truncate_folio_batch_exceptionals()
103 folio_batch_remove_exceptionals(fbatch); in truncate_folio_batch_exceptionals()
293 struct folio_batch fbatch;
320 folio_batch_init(&fbatch); in truncate_inode_pages_range()
323 &fbatch, indices)) { in truncate_inode_pages_range()
324 truncate_folio_batch_exceptionals(mapping, &fbatch, indices); in truncate_inode_pages_range()
325 for (i = 0; i < folio_batch_count(&fbatch); in truncate_inode_pages_range()
43 clear_shadow_entries(struct address_space * mapping,struct folio_batch * fbatch,pgoff_t * indices) clear_shadow_entries() argument
308 struct folio_batch fbatch; truncate_inode_pages_range() local
475 struct folio_batch fbatch; mapping_try_invalidate() local
601 struct folio_batch fbatch; invalidate_inode_pages2_range() local
[all...]
H A Dmlock.c33 struct folio_batch fbatch; member
186 static void mlock_folio_batch(struct folio_batch *fbatch) in mlock_folio_batch() argument
193 for (i = 0; i < folio_batch_count(fbatch); i++) { in mlock_folio_batch()
194 folio = fbatch->folios[i]; in mlock_folio_batch()
197 fbatch->folios[i] = folio; in mlock_folio_batch()
209 folios_put(fbatch); in mlock_folio_batch()
214 struct folio_batch *fbatch; in mlock_drain_local() local
217 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); in mlock_drain_local()
218 if (folio_batch_count(fbatch)) in mlock_drain_local()
219 mlock_folio_batch(fbatch); in mlock_drain_local()
[all …]
H A Dfilemap.c278 struct folio_batch *fbatch) in page_cache_delete_batch() argument
280 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch()
287 if (i >= folio_batch_count(fbatch)) in page_cache_delete_batch()
300 if (folio != fbatch->folios[i]) { in page_cache_delete_batch()
302 fbatch->folios[i]->index, folio); in page_cache_delete_batch()
319 struct folio_batch *fbatch) in delete_from_page_cache_batch() argument
323 if (!folio_batch_count(fbatch)) in delete_from_page_cache_batch()
328 for (i = 0; i < folio_batch_count(fbatch); i++) { in delete_from_page_cache_batch()
329 struct folio *folio = fbatch->folios[i]; in delete_from_page_cache_batch()
334 page_cache_delete_batch(mapping, fbatch); in delete_from_page_cache_batch()
[all …]
H A Dshmem.c956 struct folio_batch fbatch; in shmem_unlock_mapping() local
959 folio_batch_init(&fbatch); in shmem_unlock_mapping()
964 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) { in shmem_unlock_mapping()
965 check_move_unevictable_folios(&fbatch); in shmem_unlock_mapping()
966 folio_batch_release(&fbatch); in shmem_unlock_mapping()
1010 struct folio_batch fbatch; in shmem_undo_range() local
1024 folio_batch_init(&fbatch); in shmem_undo_range()
1027 &fbatch, indices)) { in shmem_undo_range()
1028 for (i = 0; i < folio_batch_count(&fbatch); i++) { in shmem_undo_range()
1029 folio = fbatch.folios[i]; in shmem_undo_range()
[all …]
H A Dpage-writeback.c2504 folio = folio_batch_next(&wbc->fbatch); in writeback_get_folio()
2506 folio_batch_release(&wbc->fbatch);
2509 wbc_to_tag(wbc), &wbc->fbatch);
2510 folio = folio_batch_next(&wbc->fbatch);
2555 folio_batch_init(&wbc->fbatch); in writeback_iter()
2635 folio_batch_release(&wbc->fbatch); in write_cache_pages()
H A Dgup.c3669 struct folio_batch fbatch; in memfd_pin_folios() local
3700 folio_batch_init(&fbatch); in memfd_pin_folios()
3711 &fbatch); in memfd_pin_folios()
3727 next_idx != folio_index(fbatch.folios[i])) in memfd_pin_folios()
3730 folio = page_folio(&fbatch.folios[i]->page); in memfd_pin_folios()
3733 folio_batch_release(&fbatch); in memfd_pin_folios()
3748 folio_batch_release(&fbatch); in memfd_pin_folios()
H A Dinternal.h407 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
409 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
745 void free_unref_folios(struct folio_batch *fbatch);
/linux/fs/nilfs2/
H A Dpage.c247 struct folio_batch fbatch; in nilfs_copy_dirty_pages() local
252 folio_batch_init(&fbatch); in nilfs_copy_dirty_pages()
255 PAGECACHE_TAG_DIRTY, &fbatch)) in nilfs_copy_dirty_pages()
258 for (i = 0; i < folio_batch_count(&fbatch); i++) { in nilfs_copy_dirty_pages()
259 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_dirty_pages()
283 folio_batch_release(&fbatch); in nilfs_copy_dirty_pages()
302 struct folio_batch fbatch; in nilfs_copy_back_pages() local
306 folio_batch_init(&fbatch); in nilfs_copy_back_pages()
308 n = filemap_get_folios(smap, &start, ~0UL, &fbatch); in nilfs_copy_back_pages()
312 for (i = 0; i < folio_batch_count(&fbatch); i++) { in nilfs_copy_back_pages()
[all …]
H A Dsegment.c702 struct folio_batch fbatch; in nilfs_lookup_dirty_data_buffers() local
716 folio_batch_init(&fbatch); in nilfs_lookup_dirty_data_buffers()
720 PAGECACHE_TAG_DIRTY, &fbatch)) in nilfs_lookup_dirty_data_buffers()
723 for (i = 0; i < folio_batch_count(&fbatch); i++) { in nilfs_lookup_dirty_data_buffers()
725 struct folio *folio = fbatch.folios[i]; in nilfs_lookup_dirty_data_buffers()
747 folio_batch_release(&fbatch); in nilfs_lookup_dirty_data_buffers()
753 folio_batch_release(&fbatch); in nilfs_lookup_dirty_data_buffers()
763 struct folio_batch fbatch; in nilfs_lookup_dirty_node_buffers() local
770 folio_batch_init(&fbatch); in nilfs_lookup_dirty_node_buffers()
773 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) { in nilfs_lookup_dirty_node_buffers()
[all …]
/linux/fs/ramfs/
H A Dfile-nommu.c208 struct folio_batch fbatch; in ramfs_nommu_get_unmapped_area() local
224 folio_batch_init(&fbatch); in ramfs_nommu_get_unmapped_area()
228 ULONG_MAX, &fbatch); in ramfs_nommu_get_unmapped_area()
235 ret = (unsigned long) folio_address(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area()
236 pfn = folio_pfn(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area()
240 if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { in ramfs_nommu_get_unmapped_area()
244 nr_pages += folio_nr_pages(fbatch.folios[loop]); in ramfs_nommu_get_unmapped_area()
250 folio_batch_release(&fbatch); in ramfs_nommu_get_unmapped_area()
256 folio_batch_release(&fbatch); in ramfs_nommu_get_unmapped_area()
/linux/fs/bcachefs/
H A Dfs-io-pagecache.c255 struct folio_batch fbatch; in bch2_mark_pagecache_unallocated() local
261 folio_batch_init(&fbatch); in bch2_mark_pagecache_unallocated()
264 &index, end_index, &fbatch)) { in bch2_mark_pagecache_unallocated()
265 for (i = 0; i < folio_batch_count(&fbatch); i++) { in bch2_mark_pagecache_unallocated()
266 struct folio *folio = fbatch.folios[i]; in bch2_mark_pagecache_unallocated()
287 folio_batch_release(&fbatch); in bch2_mark_pagecache_unallocated()
299 struct folio_batch fbatch; in bch2_mark_pagecache_reserved() local
306 folio_batch_init(&fbatch); in bch2_mark_pagecache_reserved()
309 &index, end_index, &fbatch)) { in bch2_mark_pagecache_reserved()
310 for (unsigned i = 0; i < folio_batch_count(&fbatch); i++) { in bch2_mark_pagecache_reserved()
[all …]
/linux/drivers/gpu/drm/
H A Ddrm_gem.c531 static void drm_gem_check_release_batch(struct folio_batch *fbatch) in drm_gem_check_release_batch() argument
533 check_move_unevictable_folios(fbatch); in drm_gem_check_release_batch()
534 __folio_batch_release(fbatch); in drm_gem_check_release_batch()
568 struct folio_batch fbatch; in drm_gem_get_pages() local
615 folio_batch_init(&fbatch); in drm_gem_get_pages()
619 if (!folio_batch_add(&fbatch, f)) in drm_gem_get_pages()
620 drm_gem_check_release_batch(&fbatch); in drm_gem_get_pages()
623 if (fbatch.nr) in drm_gem_get_pages()
624 drm_gem_check_release_batch(&fbatch); in drm_gem_get_pages()
643 struct folio_batch fbatch; in drm_gem_put_pages() local
[all …]
/linux/drivers/gpu/drm/i915/gem/
H A Di915_gem_shmem.c25 static void check_release_folio_batch(struct folio_batch *fbatch) in check_release_folio_batch() argument
27 check_move_unevictable_folios(fbatch); in check_release_folio_batch()
28 __folio_batch_release(fbatch); in check_release_folio_batch()
36 struct folio_batch fbatch; in shmem_sg_free_table() local
42 folio_batch_init(&fbatch); in shmem_sg_free_table()
54 if (!folio_batch_add(&fbatch, folio)) in shmem_sg_free_table()
55 check_release_folio_batch(&fbatch); in shmem_sg_free_table()
57 if (fbatch.nr) in shmem_sg_free_table()
58 check_release_folio_batch(&fbatch); in shmem_sg_free_table()
/linux/fs/gfs2/
H A Daops.c179 struct folio_batch *fbatch, in gfs2_write_jdata_batch() argument
188 int nr_folios = folio_batch_count(fbatch); in gfs2_write_jdata_batch()
191 size += folio_size(fbatch->folios[i]); in gfs2_write_jdata_batch()
199 struct folio *folio = fbatch->folios[i]; in gfs2_write_jdata_batch()
282 struct folio_batch fbatch; in gfs2_write_cache_jdata() local
292 folio_batch_init(&fbatch); in gfs2_write_cache_jdata()
319 tag, &fbatch); in gfs2_write_cache_jdata()
323 ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch, in gfs2_write_cache_jdata()
329 folio_batch_release(&fbatch); in gfs2_write_cache_jdata()
/linux/fs/ceph/
H A Daddr.c955 struct folio_batch fbatch; in ceph_writepages_start() local
985 folio_batch_init(&fbatch); in ceph_writepages_start()
1052 end, tag, &fbatch); in ceph_writepages_start()
1057 struct folio *folio = fbatch.folios[i]; in ceph_writepages_start()
1199 fbatch.folios[i] = NULL; in ceph_writepages_start()
1210 if (!fbatch.folios[j]) in ceph_writepages_start()
1213 fbatch.folios[n] = fbatch.folios[j]; in ceph_writepages_start()
1216 fbatch.nr = n; in ceph_writepages_start()
1221 folio_batch_release(&fbatch); in ceph_writepages_start()
1374 (int)fbatch.nr, fbatch.nr ? fbatch.folios[0] : NULL); in ceph_writepages_start()
[all …]
/linux/fs/btrfs/tests/
H A Dextent-io-tests.c25 struct folio_batch fbatch; in process_page_range() local
32 folio_batch_init(&fbatch); in process_page_range()
36 end_index, &fbatch); in process_page_range()
38 struct folio *folio = fbatch.folios[i]; in process_page_range()
48 folio_batch_release(&fbatch); in process_page_range()
/linux/drivers/gpu/drm/i915/
H A Di915_gpu_error.c191 static void pool_fini(struct folio_batch *fbatch) in pool_fini() argument
193 folio_batch_release(fbatch); in pool_fini()
196 static int pool_refill(struct folio_batch *fbatch, gfp_t gfp) in pool_refill() argument
198 while (folio_batch_space(fbatch)) { in pool_refill()
205 folio_batch_add(fbatch, folio); in pool_refill()
211 static int pool_init(struct folio_batch *fbatch, gfp_t gfp) in pool_init() argument
215 folio_batch_init(fbatch); in pool_init()
217 err = pool_refill(fbatch, gfp); in pool_init()
219 pool_fini(fbatch); in pool_init()
224 static void *pool_alloc(struct folio_batch *fbatch, gfp_t gfp) in pool_alloc() argument
[all …]
/linux/fs/btrfs/
H A Dextent_io.c204 struct folio_batch fbatch; in __process_folios_contig() local
207 folio_batch_init(&fbatch); in __process_folios_contig()
212 end_index, &fbatch); in __process_folios_contig()
214 struct folio *folio = fbatch.folios[i]; in __process_folios_contig()
219 folio_batch_release(&fbatch); in __process_folios_contig()
249 struct folio_batch fbatch; in lock_delalloc_folios() local
254 folio_batch_init(&fbatch); in lock_delalloc_folios()
259 end_index, &fbatch); in lock_delalloc_folios()
264 struct folio *folio = fbatch.folios[i]; in lock_delalloc_folios()
283 folio_batch_release(&fbatch); in lock_delalloc_folios()
[all …]
H A Dcompression.c287 struct folio_batch fbatch; in end_compressed_writeback() local
295 folio_batch_init(&fbatch); in end_compressed_writeback()
298 &fbatch); in end_compressed_writeback()
304 struct folio *folio = fbatch.folios[i]; in end_compressed_writeback()
309 folio_batch_release(&fbatch); in end_compressed_writeback()
/linux/fs/hugetlbfs/
H A Dinode.c580 struct folio_batch fbatch; in remove_inode_hugepages() local
585 folio_batch_init(&fbatch); in remove_inode_hugepages()
587 while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { in remove_inode_hugepages()
588 for (i = 0; i < folio_batch_count(&fbatch); ++i) { in remove_inode_hugepages()
589 struct folio *folio = fbatch.folios[i]; in remove_inode_hugepages()
605 folio_batch_release(&fbatch); in remove_inode_hugepages()
/linux/fs/ext4/
H A Dinode.c1567 struct folio_batch fbatch; in mpage_release_unused_pages() local
1592 folio_batch_init(&fbatch); in mpage_release_unused_pages()
1594 nr = filemap_get_folios(mapping, &index, end, &fbatch); in mpage_release_unused_pages()
1598 struct folio *folio = fbatch.folios[i]; in mpage_release_unused_pages()
1615 folio_batch_release(&fbatch); in mpage_release_unused_pages()
2145 struct folio_batch fbatch; in mpage_map_and_submit_buffers() local
2160 folio_batch_init(&fbatch); in mpage_map_and_submit_buffers()
2162 nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch); in mpage_map_and_submit_buffers()
2166 struct folio *folio = fbatch.folios[i]; in mpage_map_and_submit_buffers()
2183 folio_batch_release(&fbatch); in mpage_map_and_submit_buffers()
[all …]
/linux/fs/f2fs/
H A Dcompress.c1979 struct folio_batch fbatch; in f2fs_invalidate_compress_pages() local
1986 folio_batch_init(&fbatch); in f2fs_invalidate_compress_pages()
1991 nr = filemap_get_folios(mapping, &index, end - 1, &fbatch); in f2fs_invalidate_compress_pages()
1996 struct folio *folio = fbatch.folios[i]; in f2fs_invalidate_compress_pages()
2012 folio_batch_release(&fbatch); in f2fs_invalidate_compress_pages()

12