/linux/include/linux/ |
H A D | pagevec.h | 6 * folios. A folio_batch is a container which is used for that. 20 * struct folio_batch - A collection of folios. 23 * operating on a set of folios. The order of folios in the batch may be 32 struct folio *folios[PAGEVEC_SIZE]; member 36 * folio_batch_init() - Initialise a batch of folios 39 * A freshly initialised folio_batch contains zero folios. 77 fbatch->folios[fbatch->nr++] = folio; in folio_batch_add() 85 * Use this function to implement a queue of folios. 93 return fbatch->folios[fbatch->i++]; in folio_batch_next()
|
H A D | memcontrol.h | 392 * against some type of folios, e.g. slab folios or ex-slab folios or 393 * kmem folios. 413 * against some type of folios, e.g. slab folios or ex-slab folios or 414 * LRU folios. 434 * against some type of folios, e.g. slab folios or ex-slab folios. 532 * this function against some types of folios, e.g. slab folios. 653 * Do not use this for folios allocated for swapin. 685 void __mem_cgroup_uncharge_folios(struct folio_batch *folios); 686 static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios) in mem_cgroup_uncharge_folios() argument 690 __mem_cgroup_uncharge_folios(folios); in mem_cgroup_uncharge_folios() [all …]
|
H A D | rmap.h | 258 * fairly large folios), turning it negative. In that case, just in folio_add_return_large_mapcount() 341 * See __folio_rmap_sanity_checks(), we might map large folios even without 400 /* hugetlb folios are handled separately. */ in __folio_rmap_sanity_checks() 407 * TODO: we get driver-allocated folios that have nothing to do with in __folio_rmap_sanity_checks() 409 * folio_test_large_rmappable() holds for large folios. We should in __folio_rmap_sanity_checks() 410 * handle any desired mapcount+stats accounting for these folios in in __folio_rmap_sanity_checks() 412 * we really only get rmappable folios. in __folio_rmap_sanity_checks() 424 * We don't support folios larger than a single PMD yet. So in __folio_rmap_sanity_checks() 444 * Anon folios must have an associated live anon_vma as long as they're in __folio_rmap_sanity_checks() 713 * private folios cannot get pinned and consequently this function cannot fail [all …]
|
/linux/drivers/dma-buf/ |
H A D | udmabuf.c | 29 struct folio **folios; member 32 * Unlike folios, pinned_folios is only used for unpin. 58 pfn = folio_pfn(ubuf->folios[pgoff]); in udmabuf_vm_fault() 76 pfn = folio_pfn(ubuf->folios[pgoff]); in udmabuf_vm_fault() 123 pages[pg] = folio_page(ubuf->folios[pg], in vmap_udmabuf() 162 sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE, in get_sg_table() 210 ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios), GFP_KERNEL); in init_udmabuf() 211 if (!ubuf->folios) in init_udmabuf() 231 kvfree(ubuf->folios); in deinit_udmabuf() 326 loff_t start, loff_t size, struct folio **folios) in udmabuf_pin_folios() argument [all …]
|
/linux/lib/ |
H A D | test_kho.c | 34 struct folio **folios; member 84 struct folio *folio = state->folios[i]; in kho_test_save_data() 166 state->folios[state->nr_folios++] = folio; in kho_test_generate_data() 178 folio_put(state->folios[i]); in kho_test_generate_data() 186 struct folio **folios; in kho_test_save() local 193 folios = kvmalloc_array(max_nr, sizeof(*state->folios), GFP_KERNEL); in kho_test_save() 194 if (!folios) in kho_test_save() 196 state->folios = folios; in kho_test_save() 215 kvfree(folios); in kho_test_save() 320 folio_put(kho_test_state.folios[i]); in kho_test_cleanup() [all …]
|
/linux/mm/ |
H A D | swap.c | 165 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru() 376 struct folio *batch_folio = fbatch->folios[i]; in __lru_cache_activate_folio() 626 * Lazyfree folios are clean anonymous folios. They have in lru_lazyfree() 628 * anonymous folios in lru_lazyfree() 906 * a list of folios to be migrated using folio_isolate_lru(). 907 * It drains folios on LRU cache and then disable on all cpus until 937 * folios_put_refs - Reduce the reference count on a batch of folios. 938 * @folios: The folios. 941 * Like folio_put(), but for a batch of folios. This is more efficient 943 * to be taken if the folios are freed. The folios batch is returned [all …]
|
H A D | migrate.c | 116 * TODO: these pages will not be folios in the future. All in isolate_movable_ops_page() 196 * TODO: these pages will not be folios in the future. All in putback_movable_ops_page() 229 * TODO: migration core will treat both pages as folios and lock them before 232 * folios in the future, so that must be reworked. 557 * 1 for anonymous folios without a mapping 558 * 2 for folios with a mapping 559 * 3 for folios with a mapping and the private flag set. 657 * Note that anonymous folios are accounted for in __folio_migrate_mapping() 878 * folios that do not have private data. 880 * Folios are locked upon entry and exit. [all …]
|
H A D | gup.c | 196 * Folios that were pinned via memfd_pin_folios() or other similar routines 441 * unpin_folios() - release an array of gup-pinned folios. 442 * @folios: array of folios to be marked dirty and released. 443 * @nfolios: number of folios in the @folios array. 445 * For each folio in the @folios array, release the folio using gup_put_folio. 449 void unpin_folios(struct folio **folios, unsigned long nfolios) in unpin_folios() argument 454 * If this WARN_ON() fires, then the system *might* be leaking folios in unpin_folios() 464 if (folios[i] != folios[j]) in unpin_folios() 467 if (folios[i]) in unpin_folios() 468 gup_put_folio(folios[i], j - i, FOLL_PIN); in unpin_folios() [all …]
|
H A D | truncate.c | 73 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals() 81 if (xa_is_value(fbatch->folios[i])) { in truncate_folio_batch_exceptionals() 181 * Handle partial folios. The folio may be entirely within the 229 * try to split at offset + length to make sure folios within in truncate_inode_partial_folio() 295 * It only drops clean, unused folios. 379 truncate_cleanup_folio(fbatch.folios[i]); in truncate_inode_pages_range() 382 folio_unlock(fbatch.folios[i]); in truncate_inode_pages_range() 426 struct folio *folio = fbatch.folios[i]; in truncate_inode_pages_range() 500 * mapping_try_invalidate - Invalidate all the evictable folios of one inode 501 * @mapping: the address_space which holds the folios t [all...] |
H A D | filemap.c | 178 /* hugetlb folios do not participate in page cache accounting. */ in filemap_unaccount_folio() 245 * This must be called only on folios that are locked and have been 266 * page_cache_delete_batch - delete several folios from page cache 267 * @mapping: the mapping to which folios belong 268 * @fbatch: batch of folios to delete 270 * The function walks over mapping->i_pages and removes folios passed in 281 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch() 301 if (folio != fbatch->folios[i]) { in page_cache_delete_batch() 303 fbatch->folios[i]->index, folio); in page_cache_delete_batch() 330 struct folio *folio = fbatch->folios[i]; in delete_from_page_cache_batch() [all …]
|
H A D | vmscan.c | 100 /* Can active folios be deactivated as part of reclaim? */ 110 /* Can mapped folios be reclaimed? */ 113 /* Can folios be swapped as part of reclaim? */ 147 /* The file folios on the current node are dangerously low */ 159 /* The highest zone to isolate folios for reclaim from */ 388 * This misses isolated folios which are not accounted for to save counters. 390 * not expected that isolated folios will be a dominating factor. 525 * If there are a lot of dirty/writeback folios then do not in skip_throttle_noprogress() 526 * throttle as throttling will occur when the folios cycle in skip_throttle_noprogress() 564 * writeback to a slow device to excessive referenced folios at the tail in reclaim_throttle() [all …]
|
/linux/fs/btrfs/ |
H A D | accessors.c | 46 * The extent buffer pages stored in the array folios may not form a contiguous 59 char *kaddr = folio_address(eb->folios[idx]) + oif; \ 73 kaddr = folio_address(eb->folios[idx + 1]); \ 77 folio_address(eb->folios[idx + 1]), \ 89 char *kaddr = folio_address(eb->folios[idx]) + oif; \ 105 kaddr = folio_address(eb->folios[idx + 1]); \ 109 kaddr = folio_address(eb->folios[idx + 1]); \
|
H A D | extent_io.c | 282 struct folio *folio = fbatch.folios[i]; in __process_folios_contig() 323 struct folio *folio = fbatch.folios[i]; in lock_delalloc_folios() 416 * folios in order, so we can't process delalloc bytes before in find_lock_delalloc_range() 423 * make sure to limit the number of folios we try to lock down in find_lock_delalloc_range() 428 /* step two, lock all the folios after the folios that has start */ in find_lock_delalloc_range() 434 * Some of the folios are gone, lets avoid looping by in find_lock_delalloc_range() 626 * Populate every free slot in a provided array with folios using GFP_NOFS. 628 * @nr_folios: number of folios to allocate 629 * @order: the order of the folios to be allocated 630 * @folio_array: the array to fill with folios; any existing non-NULL entries in [all …]
|
H A D | extent_io.h | 111 * Pointers to all the folios of the extent buffer. 115 struct folio *folios[INLINE_EXTENT_BUFFER_PAGES]; member 152 * 1.2) Several page sized folios in get_eb_offset_in_folio() 162 return offset_in_folio(eb->folios[0], offset + eb->start); in get_eb_offset_in_folio() 173 * 1.2) Several page sized folios in get_eb_folio_index() 286 * This can only be determined at runtime by checking eb::folios[0]. 290 * single-paged folios. 296 if (!eb->folios[0]) in num_extent_folios() 298 if (folio_order(eb->folios[0])) in num_extent_folios()
|
H A D | compression.h | 47 /* Number of compressed folios in the array. */ 50 /* The folios with the compressed data on them. */ 100 u64 start, struct folio **folios, unsigned long *out_folios, 156 u64 start, struct folio **folios, unsigned long *out_folios, 167 u64 start, struct folio **folios, unsigned long *out_folios, 177 u64 start, struct folio **folios, unsigned long *out_folios,
|
H A D | defrag.c | 858 /* TODO: Add order fgp order flags when large folios are fully enabled. */ in defrag_prepare_one_folio() 868 * The IO for such large folios is not fully tested, thus return in defrag_prepare_one_folio() 869 * an error to reject such folios unless it's an experimental build. in defrag_prepare_one_folio() 1154 struct folio **folios, int nr_pages, in defrag_one_locked_target() argument 1174 * Due to possible large folios, we have to check all folios one by one. in defrag_one_locked_target() 1176 for (int i = 0; i < nr_pages && folios[i]; i++) { in defrag_one_locked_target() 1177 struct folio *folio = folios[i]; in defrag_one_locked_target() 1200 struct folio **folios; in defrag_one_range() local 1210 folios = kcalloc(nr_pages, sizeof(struct folio *), GFP_NOFS); in defrag_one_range() 1211 if (!folios) in defrag_one_range() [all …]
|
/linux/Documentation/mm/ |
H A D | unevictable-lru.rst | 13 folios. 28 folios and to hide these folios from vmscan. This mechanism is based on a patch 72 The Unevictable LRU infrastructure maintains unevictable folios as if they were 75 (1) We get to "treat unevictable folios just like we treat other folios in the 80 (2) We want to be able to migrate unevictable folios between nodes for memory 82 can only migrate folios that it can successfully isolate from the LRU 83 lists (or "Movable" folios: outside of consideration here). If we were to 84 maintain folios elsewhere than on an LRU-like list, where they can be 88 anonymous, swap-backed folios. This differentiation is only important 89 while the folios are, in fact, evictable. [all …]
|
H A D | multigen_lru.rst | 92 truncated generation number is an index to ``lrugen->folios[]``. The 96 ``lrugen->folios[]``; otherwise it stores zero. 100 generations, tiers do not have dedicated ``lrugen->folios[]``. In 131 increments ``min_seq`` when ``lrugen->folios[]`` indexed by 226 since each node and memcg combination has an LRU of folios (see 232 the active/inactive LRU (of folios): 255 The multi-gen LRU (of folios) can be disassembled into the following
|
/linux/Documentation/ABI/testing/ |
H A D | sysfs-fs-erofs | 25 compressed folios: 27 - 1 : invalidate cached compressed folios 29 - 3 : drop in-memory pclusters and cached compressed folios
|
/linux/tools/mm/ |
H A D | thp_swap_allocator_test.c | 9 * 64KB THP and the other area for small folios. The second memory 63 * currently don't support large folios swap-in. 134 fprintf(stderr, "Failed to allocate large folios memory\n"); in main() 147 fprintf(stderr, "Failed to allocate small folios memory\n"); in main() 182 * The following setup creates a 1:1 ratio of mTHP to small folios in main()
|
H A D | thpmaps | 252 folios = indexes[index_next:index_end][heads[index_next:index_end]] 256 nr = (int(folios[0]) if len(folios) else index_end) - index_next 261 if len(folios): 264 nr = index_end - int(folios[-1]) 265 folios = folios[:-1] 270 if len(folios): 271 folio_nrs = np.append(np.diff(folios), np.uint64(index_end - folios[-1])) 273 for index, order in zip(folios, folio_orders):
|
/linux/fs/fuse/ |
H A D | ioctl.c | 254 ap.folios = fuse_folios_alloc(fm->fc->max_pages, GFP_KERNEL, &ap.descs); in fuse_do_ioctl() 256 if (!ap.folios || !iov_page) in fuse_do_ioctl() 310 ap.folios[ap.num_folios] = folio_alloc(GFP_KERNEL | __GFP_HIGHMEM, 0); in fuse_do_ioctl() 311 if (!ap.folios[ap.num_folios]) in fuse_do_ioctl() 330 c = copy_folio_from_iter(ap.folios[i], 0, PAGE_SIZE, &ii); in fuse_do_ioctl() 368 vaddr = kmap_local_folio(ap.folios[0], 0); in fuse_do_ioctl() 397 c = copy_folio_to_iter(ap.folios[i], 0, PAGE_SIZE, &ii); in fuse_do_ioctl() 405 folio_put(ap.folios[--ap.num_folios]); in fuse_do_ioctl() 406 kfree(ap.folios); in fuse_do_ioctl()
|
H A D | file.c | 595 folio_mark_dirty_lock(ap->folios[i]); in fuse_release_user_pages() 597 unpin_folio(ap->folios[i]); in fuse_release_user_pages() 678 ia->ap.folios = fuse_folios_alloc(nfolios, GFP_KERNEL, in fuse_io_alloc() 680 if (!ia->ap.folios) { in fuse_io_alloc() 690 kfree(ia->ap.folios); in fuse_io_free() 793 loff_t pos = folio_pos(ap->folios[0]) + num_read; in fuse_short_read() 812 .ap.folios = &folio, in fuse_do_readfolio() 878 mapping = ap->folios[0]->mapping; in fuse_readpages_end() 890 folio_end_read(ap->folios[i], !err); in fuse_readpages_end() 891 folio_put(ap->folios[i]); in fuse_readpages_end() [all …]
|
/linux/fs/orangefs/ |
H A D | inode.c | 74 struct folio **folios; member 91 start = offset_in_folio(ow->folios[0], ow->off); in orangefs_writepages_work() 93 folio_start_writeback(ow->folios[i]); in orangefs_writepages_work() 94 bvec_set_folio(&ow->bv[i], ow->folios[i], in orangefs_writepages_work() 95 folio_size(ow->folios[i]) - start, start); in orangefs_writepages_work() 115 wrp = folio_detach_private(ow->folios[i]); in orangefs_writepages_work() 117 folio_end_writeback(ow->folios[i]); in orangefs_writepages_work() 118 folio_unlock(ow->folios[i]); in orangefs_writepages_work() 144 ow->folios[ow->nfolios++] = folio; in orangefs_writepages_callback() 156 ow->folios[ow->nfolios++] = folio; in orangefs_writepages_callback() [all …]
|
/linux/fs/gfs2/ |
H A D | aops.c | 121 * gfs2_jdata_writeback - Write jdata folios to the log 182 * gfs2_write_jdata_batch - Write back a folio batch's worth of folios 185 * @fbatch: The batch of folios 205 size += folio_size(fbatch->folios[i]); in gfs2_write_jdata_batch() 213 struct folio *folio = fbatch->folios[i]; in gfs2_write_jdata_batch() 663 * mm accommodates an old ext3 case where clean folios might in gfs2_release_folio() 665 * dirty folios to ->release_folio() via shrink_active_list(). in gfs2_release_folio() 667 * As a workaround, we skip folios that contain dirty buffers in gfs2_release_folio() 668 * below. Once ->release_folio isn't called on dirty folios in gfs2_release_folio()
|