Home
last modified time | relevance | path

Searched full:folios (Results 1 – 25 of 145) sorted by relevance

123456

/linux/include/linux/
H A Dpagevec.h6 * folios. A folio_batch is a container which is used for that.
20 * struct folio_batch - A collection of folios.
23 * operating on a set of folios. The order of folios in the batch may be
32 struct folio *folios[PAGEVEC_SIZE]; member
36 * folio_batch_init() - Initialise a batch of folios
39 * A freshly initialised folio_batch contains zero folios.
77 fbatch->folios[fbatch->nr++] = folio; in folio_batch_add()
85 * Use this function to implement a queue of folios.
93 return fbatch->folios[fbatch->i++]; in folio_batch_next()
H A Drmap.h258 * fairly large folios), turning it negative. In that case, just in folio_add_return_large_mapcount()
341 * See __folio_rmap_sanity_checks(), we might map large folios even without
410 /* hugetlb folios are handled separately. */ in __folio_rmap_sanity_checks()
417 * TODO: we get driver-allocated folios that have nothing to do with in __folio_rmap_sanity_checks()
419 * folio_test_large_rmappable() holds for large folios. We should in __folio_rmap_sanity_checks()
420 * handle any desired mapcount+stats accounting for these folios in in __folio_rmap_sanity_checks()
422 * we really only get rmappable folios. in __folio_rmap_sanity_checks()
434 * We don't support folios larger than a single PMD yet. So in __folio_rmap_sanity_checks()
454 * Anon folios must have an associated live anon_vma as long as they're in __folio_rmap_sanity_checks()
719 * private folios cannot get pinned and consequently this function cannot fail
[all …]
/linux/drivers/dma-buf/
H A Dudmabuf.c29 struct folio **folios; member
32 * Unlike folios, pinned_folios is only used for unpin.
58 pfn = folio_pfn(ubuf->folios[pgoff]); in udmabuf_vm_fault()
76 pfn = folio_pfn(ubuf->folios[pgoff]); in udmabuf_vm_fault()
123 pages[pg] = folio_page(ubuf->folios[pg], in vmap_udmabuf()
162 sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE, in get_sg_table()
210 ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios), GFP_KERNEL); in init_udmabuf()
211 if (!ubuf->folios) in init_udmabuf()
231 kvfree(ubuf->folios); in deinit_udmabuf()
326 loff_t start, loff_t size, struct folio **folios) in udmabuf_pin_folios() argument
[all …]
/linux/lib/
H A Dtest_kho.c34 struct folio **folios; member
79 struct folio *folio = state->folios[i]; in kho_test_save_data()
155 state->folios[state->nr_folios++] = folio; in kho_test_generate_data()
167 folio_put(state->folios[i]); in kho_test_generate_data()
174 struct folio **folios __free(kvfree) = NULL; in kho_test_save()
181 folios = kvmalloc_array(max_nr, sizeof(*state->folios), GFP_KERNEL); in kho_test_save()
182 if (!folios) in kho_test_save()
184 state->folios = folios; in kho_test_save()
291 folio_put(kho_test_state.folios[i]); in kho_test_cleanup()
293 kvfree(kho_test_state.folios); in kho_test_cleanup()
/linux/mm/
H A Dswap.c165 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru()
375 struct folio *batch_folio = fbatch->folios[i]; in __lru_cache_activate_folio()
625 * Lazyfree folios are clean anonymous folios. They have in lru_lazyfree()
627 * anonymous folios in lru_lazyfree()
901 * a list of folios to be migrated using folio_isolate_lru().
902 * It drains folios on LRU cache and then disable on all cpus until in lru_cache_disable()
932 * folios_put_refs - Reduce the reference count on a batch of folios.
933 * @folios: The folios
941 folios_put_refs(struct folio_batch * folios,unsigned int * refs) folios_put_refs() argument
[all...]
H A Dmigrate.c116 * TODO: these pages will not be folios in the future. All in isolate_movable_ops_page()
196 * TODO: these pages will not be folios in the future. All in putback_movable_ops_page()
229 * TODO: migration core will treat both pages as folios and lock them before
232 * folios in the future, so that must be reworked.
559 * 1 for anonymous folios without a mapping
560 * 2 for folios with a mapping
561 * 3 for folios with a mapping and the private flag set.
653 * Note that anonymous folios are accounted for in __folio_migrate_mapping()
874 * folios that do not have private data.
876 * Folios are locked upon entry and exit.
[all …]
H A Dgup.c201 * Folios that were pinned via memfd_pin_folios() or other similar routines
440 * unpin_folios() - release an array of gup-pinned folios.
441 * @folios: array of folios to be marked dirty and released.
442 * @nfolios: number of folios in the @folios array.
444 * For each folio in the @folios array, release the folio using gup_put_folio.
448 void unpin_folios(struct folio **folios, unsigned long nfolios) in unpin_folios() argument
453 * If this WARN_ON() fires, then the system *might* be leaking folios in unpin_folios()
463 if (folios[i] != folios[j]) in unpin_folios()
466 if (folios[i]) in unpin_folios()
467 gup_put_folio(folios[i], j - i, FOLL_PIN); in unpin_folios()
[all …]
H A Dtruncate.c73 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals()
81 if (xa_is_value(fbatch->folios[i])) { in truncate_folio_batch_exceptionals()
181 * Handle partial folios. The folio may be entirely within the
229 * try to split at offset + length to make sure folios within in truncate_inode_partial_folio()
295 * It only drops clean, unused folios.
379 truncate_cleanup_folio(fbatch.folios[i]); in truncate_inode_pages_range()
382 folio_unlock(fbatch.folios[i]); in truncate_inode_pages_range()
426 struct folio *folio = fbatch.folios[i]; in truncate_inode_pages_range()
500 * mapping_try_invalidate - Invalidate all the evictable folios of one inode
501 * @mapping: the address_space which holds the folios t
[all...]
H A Dfilemap.c178 /* hugetlb folios do not participate in page cache accounting. */ in filemap_unaccount_folio()
242 * This must be called only on folios that are locked and have been
263 * page_cache_delete_batch - delete several folios from page cache
264 * @mapping: the mapping to which folios belong
265 * @fbatch: batch of folios to delete
267 * The function walks over mapping->i_pages and removes folios passed in
278 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch()
298 if (folio != fbatch->folios[i]) { in page_cache_delete_batch()
300 fbatch->folios[i]->index, folio); in page_cache_delete_batch()
327 struct folio *folio = fbatch->folios[ in delete_from_page_cache_batch()
[all...]
H A Dvmscan.c100 /* Can active folios be deactivated as part of reclaim? */
110 /* Can mapped folios be reclaimed? */
113 /* Can folios be swapped as part of reclaim? */
147 /* The file folios on the current node are dangerously low */
159 /* The highest zone to isolate folios for reclaim from */
388 * This misses isolated folios which are not accounted for to save counters.
390 * not expected that isolated folios will be a dominating factor.
532 * If there are a lot of dirty/writeback folios then do not in skip_throttle_noprogress()
533 * throttle as throttling will occur when the folios cycle in skip_throttle_noprogress()
571 * writeback to a slow device to excessive referenced folios at the tail in reclaim_throttle()
[all …]
H A Dswap_state.c138 * This must be called only on folios that have
169 * This must be called only on folios that have
250 struct folio_batch folios; in free_pages_and_swap_cache() local
253 folio_batch_init(&folios); in free_pages_and_swap_cache()
258 refs[folios.nr] = 1; in free_pages_and_swap_cache()
261 refs[folios.nr] = encoded_nr_pages(pages[++i]); in free_pages_and_swap_cache()
263 if (folio_batch_add(&folios, folio) == 0) in free_pages_and_swap_cache()
264 folios_put_refs(&folios, refs); in free_pages_and_swap_cache()
266 if (folios.nr) in free_pages_and_swap_cache()
267 folios_put_refs(&folios, refs); in free_pages_and_swap_cache()
/linux/fs/btrfs/
H A Daccessors.c46 * The extent buffer pages stored in the array folios may not form a contiguous
59 char *kaddr = folio_address(eb->folios[idx]) + oif; \
73 kaddr = folio_address(eb->folios[idx + 1]); \
77 folio_address(eb->folios[idx + 1]), \
89 char *kaddr = folio_address(eb->folios[idx]) + oif; \
105 kaddr = folio_address(eb->folios[idx + 1]); \
109 kaddr = folio_address(eb->folios[idx + 1]); \
H A Dextent_io.c216 struct folio *folio = fbatch.folios[i]; in __process_folios_contig()
257 struct folio *folio = fbatch.folios[i]; in lock_delalloc_folios()
343 * folios in order, so we can't process delalloc bytes before in find_lock_delalloc_range()
350 * make sure to limit the number of folios we try to lock down in find_lock_delalloc_range()
355 /* step two, lock all the folioss after the folios that has start */ in find_lock_delalloc_range()
360 /* some of the folios are gone, lets avoid looping by in find_lock_delalloc_range()
552 * Populate every free slot in a provided array with folios using GFP_NOFS.
554 * @nr_folios: number of folios to allocate
555 * @folio_array: the array to fill with folios; any existing non-NULL entries in
558 * Return: 0 if all folios were able to be allocated;
[all …]
H A Dcompression.h47 /* Number of compressed folios in the array. */
50 /* The folios with the compressed data on them. */
92 u64 start, struct folio **folios, unsigned long *out_folios,
159 u64 start, struct folio **folios, unsigned long *out_folios,
170 u64 start, struct folio **folios, unsigned long *out_folios,
180 u64 start, struct folio **folios, unsigned long *out_folios,
H A Dextent_io.h111 * Pointers to all the folios of the extent buffer.
115 struct folio *folios[INLINE_EXTENT_BUFFER_PAGES]; member
152 * 1.2) Several page sized folios in get_eb_offset_in_folio()
162 return offset_in_folio(eb->folios[0], offset + eb->start); in get_eb_offset_in_folio()
173 * 1.2) Several page sized folios in get_eb_folio_index()
286 * This can only be determined at runtime by checking eb::folios[0].
290 * single-paged folios.
296 if (!eb->folios[0]) in num_extent_folios()
298 if (folio_order(eb->folios[0])) in num_extent_folios()
H A Ddefrag.c858 /* TODO: Add order fgp order flags when large folios are fully enabled. */ in defrag_prepare_one_folio()
868 * The IO for such large folios is not fully tested, thus return in defrag_prepare_one_folio()
869 * an error to reject such folios unless it's an experimental build. in defrag_prepare_one_folio()
1154 struct folio **folios, int nr_pages, in defrag_one_locked_target() argument
1174 * Due to possible large folios, we have to check all folios one by one. in defrag_one_locked_target()
1176 for (int i = 0; i < nr_pages && folios[i]; i++) { in defrag_one_locked_target()
1177 struct folio *folio = folios[i]; in defrag_one_locked_target()
1200 struct folio **folios; in defrag_one_range() local
1210 folios = kcalloc(nr_pages, sizeof(struct folio *), GFP_NOFS); in defrag_one_range()
1211 if (!folios) in defrag_one_range()
[all …]
/linux/Documentation/mm/
H A Dunevictable-lru.rst13 folios.
28 folios and to hide these folios from vmscan. This mechanism is based on a patch
72 The Unevictable LRU infrastructure maintains unevictable folios as if they were
75 (1) We get to "treat unevictable folios just like we treat other folios in the
80 (2) We want to be able to migrate unevictable folios between nodes for memory
82 can only migrate folios that it can successfully isolate from the LRU
83 lists (or "Movable" folios: outside of consideration here). If we were to
84 maintain folios elsewhere than on an LRU-like list, where they can be
88 anonymous, swap-backed folios. This differentiation is only important
89 while the folios are, in fact, evictable.
[all …]
H A Dmultigen_lru.rst92 truncated generation number is an index to ``lrugen->folios[]``. The
96 ``lrugen->folios[]``; otherwise it stores zero.
100 generations, tiers do not have dedicated ``lrugen->folios[]``. In
131 increments ``min_seq`` when ``lrugen->folios[]`` indexed by
226 since each node and memcg combination has an LRU of folios (see
232 the active/inactive LRU (of folios):
255 The multi-gen LRU (of folios) can be disassembled into the following
/linux/Documentation/ABI/testing/
H A Dsysfs-fs-erofs25 compressed folios:
27 - 1 : invalidate cached compressed folios
29 - 3 : drop in-memory pclusters and cached compressed folios
/linux/tools/mm/
H A Dthp_swap_allocator_test.c9 * 64KB THP and the other area for small folios. The second memory
63 * currently don't support large folios swap-in.
134 fprintf(stderr, "Failed to allocate large folios memory\n"); in main()
147 fprintf(stderr, "Failed to allocate small folios memory\n"); in main()
182 * The following setup creates a 1:1 ratio of mTHP to small folios in main()
H A Dthpmaps252 folios = indexes[index_next:index_end][heads[index_next:index_end]]
256 nr = (int(folios[0]) if len(folios) else index_end) - index_next
261 if len(folios):
264 nr = index_end - int(folios[-1])
265 folios = folios[:-1]
270 if len(folios):
271 folio_nrs = np.append(np.diff(folios), np.uint64(index_end - folios[-1]))
273 for index, order in zip(folios, folio_orders):
/linux/fs/fuse/
H A Dioctl.c254 ap.folios = fuse_folios_alloc(fm->fc->max_pages, GFP_KERNEL, &ap.descs); in fuse_do_ioctl()
256 if (!ap.folios || !iov_page) in fuse_do_ioctl()
310 ap.folios[ap.num_folios] = folio_alloc(GFP_KERNEL | __GFP_HIGHMEM, 0); in fuse_do_ioctl()
311 if (!ap.folios[ap.num_folios]) in fuse_do_ioctl()
330 c = copy_folio_from_iter(ap.folios[i], 0, PAGE_SIZE, &ii); in fuse_do_ioctl()
368 vaddr = kmap_local_folio(ap.folios[0], 0); in fuse_do_ioctl()
397 c = copy_folio_to_iter(ap.folios[i], 0, PAGE_SIZE, &ii); in fuse_do_ioctl()
405 folio_put(ap.folios[--ap.num_folios]); in fuse_do_ioctl()
406 kfree(ap.folios); in fuse_do_ioctl()
H A Dfile.c589 folio_mark_dirty_lock(ap->folios[i]); in fuse_release_user_pages()
591 unpin_folio(ap->folios[i]); in fuse_release_user_pages()
672 ia->ap.folios = fuse_folios_alloc(nfolios, GFP_KERNEL, in fuse_io_alloc()
674 if (!ia->ap.folios) { in fuse_io_alloc()
684 kfree(ia->ap.folios); in fuse_io_free()
787 loff_t pos = folio_pos(ap->folios[0]) + num_read; in fuse_short_read()
806 .ap.folios = &folio, in fuse_do_readfolio()
871 mapping = ap->folios[i]->mapping; in fuse_readpages_end()
886 folio_end_read(ap->folios[i], !err); in fuse_readpages_end()
887 folio_put(ap->folios[i]); in fuse_readpages_end()
[all …]
/linux/fs/orangefs/
H A Dinode.c74 struct folio **folios; member
91 start = offset_in_folio(ow->folios[0], ow->off); in orangefs_writepages_work()
93 folio_start_writeback(ow->folios[i]); in orangefs_writepages_work()
94 bvec_set_folio(&ow->bv[i], ow->folios[i], in orangefs_writepages_work()
95 folio_size(ow->folios[i]) - start, start); in orangefs_writepages_work()
115 wrp = folio_detach_private(ow->folios[i]); in orangefs_writepages_work()
117 folio_end_writeback(ow->folios[i]); in orangefs_writepages_work()
118 folio_unlock(ow->folios[i]); in orangefs_writepages_work()
144 ow->folios[ow->nfolios++] = folio; in orangefs_writepages_callback()
156 ow->folios[ow->nfolios++] = folio; in orangefs_writepages_callback()
[all …]
/linux/fs/gfs2/
H A Daops.c121 * gfs2_jdata_writeback - Write jdata folios to the log
182 * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
185 * @fbatch: The batch of folios
205 size += folio_size(fbatch->folios[i]); in gfs2_write_jdata_batch()
213 struct folio *folio = fbatch->folios[i]; in gfs2_write_jdata_batch()
663 * mm accommodates an old ext3 case where clean folios might in gfs2_release_folio()
665 * dirty folios to ->release_folio() via shrink_active_list(). in gfs2_release_folio()
667 * As a workaround, we skip folios that contain dirty buffers in gfs2_release_folio()
668 * below. Once ->release_folio isn't called on dirty folios in gfs2_release_folio()

123456