Home
last modified time | relevance | path

Searched refs:folios (Results 1 – 25 of 60) sorted by relevance

123

/linux/drivers/dma-buf/
H A Dudmabuf.c29 struct folio **folios; member
58 pfn = folio_pfn(ubuf->folios[pgoff]); in udmabuf_vm_fault()
76 pfn = folio_pfn(ubuf->folios[pgoff]); in udmabuf_vm_fault()
127 unsigned long pfn = folio_pfn(ubuf->folios[pg]); in vmap_udmabuf()
169 sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE, in get_sg_table()
217 ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios), GFP_KERNEL); in init_udmabuf()
218 if (!ubuf->folios) in init_udmabuf()
238 kvfree(ubuf->folios); in deinit_udmabuf()
335 loff_t start, loff_t size, struct folio **folios) in udmabuf_pin_folios() argument
346 nr_folios = memfd_pin_folios(memfd, start, end, folios, pgcnt, &pgoff); in udmabuf_pin_folios()
[all …]
/linux/fs/btrfs/
H A Daccessors.c31 token->kaddr = folio_address(eb->folios[0]); in btrfs_init_map_token()
80 token->kaddr = folio_address(token->eb->folios[idx]); \
86 token->kaddr = folio_address(token->eb->folios[idx + 1]); \
99 char *kaddr = folio_address(eb->folios[idx]); \
109 kaddr = folio_address(eb->folios[idx + 1]); \
135 token->kaddr = folio_address(token->eb->folios[idx]); \
144 token->kaddr = folio_address(token->eb->folios[idx + 1]); \
156 char *kaddr = folio_address(eb->folios[idx]); \
170 kaddr = folio_address(eb->folios[idx + 1]); \
H A Dextent_io.c213 struct folio *folio = fbatch.folios[i]; in __process_folios_contig()
262 struct folio *folio = fbatch.folios[i]; in lock_delalloc_folios()
660 eb->folios[i] = page_folio(page_array[i]); in alloc_eb_folio_array()
1796 struct folio *folio = eb->folios[0]; in write_one_eb()
1815 struct folio *folio = eb->folios[i]; in write_one_eb()
2033 struct folio *folio = fbatch.folios[i]; in btree_write_cache_pages()
2190 struct folio *folio = fbatch.folios[i]; in extent_write_cache_pages()
2643 struct folio *folio = eb->folios[i]; in btrfs_release_extent_buffer_folios()
2711 struct folio *folio = new->folios[i]; in btrfs_clone_extent_buffer()
2743 ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL); in __alloc_dummy_extent_buffer()
[all …]
H A Dcompression.h97 u64 start, struct folio **folios, unsigned long *out_folios,
161 u64 start, struct folio **folios, unsigned long *out_folios,
172 u64 start, struct folio **folios, unsigned long *out_folios,
182 u64 start, struct folio **folios, unsigned long *out_folios,
H A Ddefrag.c1158 struct folio **folios, int nr_pages, in defrag_one_locked_target() argument
1167 unsigned long first_index = folios[0]->index; in defrag_one_locked_target()
1184 folio_clear_checked(folios[i]); in defrag_one_locked_target()
1185 btrfs_folio_clamp_set_dirty(fs_info, folios[i], start, len); in defrag_one_locked_target()
1201 struct folio **folios; in defrag_one_range() local
1212 folios = kcalloc(nr_pages, sizeof(struct folio *), GFP_NOFS); in defrag_one_range()
1213 if (!folios) in defrag_one_range()
1218 folios[i] = defrag_prepare_one_folio(inode, start_index + i); in defrag_one_range()
1219 if (IS_ERR(folios[i])) { in defrag_one_range()
1220 ret = PTR_ERR(folios[i]); in defrag_one_range()
[all …]
H A Dextent_io.h121 struct folio *folios[INLINE_EXTENT_BUFFER_PAGES]; member
168 return offset_in_folio(eb->folios[0], offset + eb->start); in get_eb_offset_in_folio()
300 if (folio_order(eb->folios[0])) in num_extent_folios()
H A Dzlib.c98 u64 start, struct folio **folios, unsigned long *out_folios, in zlib_compress_folios() argument
139 folios[0] = out_folio; in zlib_compress_folios()
238 folios[nr_folios] = out_folio; in zlib_compress_folios()
274 folios[nr_folios] = out_folio; in zlib_compress_folios()
H A Dlzo.c213 u64 start, struct folio **folios, unsigned long *out_folios, in lzo_compress_folios() argument
268 folios, max_nr_folio, in lzo_compress_folios()
292 sizes_ptr = kmap_local_folio(folios[0], 0); in lzo_compress_folios()
H A Dcompression.c94 struct folio **folios, unsigned long *out_folios, in compression_compress_pages() argument
99 return zlib_compress_folios(ws, mapping, start, folios, in compression_compress_pages()
102 return lzo_compress_folios(ws, mapping, start, folios, in compression_compress_pages()
105 return zstd_compress_folios(ws, mapping, start, folios, in compression_compress_pages()
304 struct folio *folio = fbatch.folios[i]; in end_compressed_writeback()
1027 u64 start, struct folio **folios, unsigned long *out_folios, in btrfs_compress_folios() argument
1038 ret = compression_compress_pages(type, workspace, mapping, start, folios, in btrfs_compress_folios()
/linux/include/linux/
H A Dpagevec.h32 struct folio *folios[PAGEVEC_SIZE]; member
77 fbatch->folios[fbatch->nr++] = folio; in folio_batch_add()
93 return fbatch->folios[fbatch->i++]; in folio_batch_next()
H A Dfolio_queue.h243 folioq->vec.folios[slot] = folio; in folioq_append()
265 folioq->vec.folios[slot] = folio; in folioq_append_mark()
283 return folioq->vec.folios[slot]; in folioq_folio()
324 folioq->vec.folios[slot] = NULL; in folioq_clear()
/linux/tools/mm/
H A Dthpmaps252 folios = indexes[index_next:index_end][heads[index_next:index_end]]
256 nr = (int(folios[0]) if len(folios) else index_end) - index_next
261 if len(folios):
264 nr = index_end - int(folios[-1])
265 folios = folios[:-1]
270 if len(folios):
271 folio_nrs = np.append(np.diff(folios), np.uint64(index_end - folios[-1]))
273 for index, order in zip(folios, folio_orders):
/linux/fs/ramfs/
H A Dfile-nommu.c235 ret = (unsigned long) folio_address(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area()
236 pfn = folio_pfn(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area()
240 if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { in ramfs_nommu_get_unmapped_area()
244 nr_pages += folio_nr_pages(fbatch.folios[loop]); in ramfs_nommu_get_unmapped_area()
/linux/fs/smb/client/
H A Dcompress.c164 struct folio *folios[16], *folio; in collect_sample() local
174 nr = xa_extract(iter->xarray, (void **)folios, index, last, ARRAY_SIZE(folios), in collect_sample()
180 folio = folios[i]; in collect_sample()
209 } while (nr == ARRAY_SIZE(folios)); in collect_sample()
/linux/mm/
H A Dtruncate.c73 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals()
81 if (xa_is_value(fbatch->folios[i])) in truncate_folio_batch_exceptionals()
326 truncate_cleanup_folio(fbatch.folios[i]); in truncate_inode_pages_range()
329 folio_unlock(fbatch.folios[i]); in truncate_inode_pages_range()
373 struct folio *folio = fbatch.folios[i]; in truncate_inode_pages_range()
472 struct folio *folio = fbatch.folios[i]; in mapping_try_invalidate()
615 struct folio *folio = fbatch.folios[i]; in invalidate_inode_pages2_range()
H A Dgup.c449 void unpin_folios(struct folio **folios, unsigned long nfolios) in unpin_folios() argument
464 if (folios[i] != folios[j]) in unpin_folios()
467 if (folios[i]) in unpin_folios()
468 gup_put_folio(folios[i], j - i, FOLL_PIN); in unpin_folios()
2293 struct folio **folios; member
2303 return pofs->folios[i]; in pofs_get_folio()
2315 unpin_folios(pofs->folios, pofs->nr_entries); in pofs_unpin()
2468 struct folio **folios) in check_and_migrate_movable_folios() argument
2471 .folios = folios, in check_and_migrate_movable_folios()
2502 struct folio **folios) in check_and_migrate_movable_folios() argument
[all …]
H A Dmigrate.c1984 LIST_HEAD(folios); in migrate_pages_sync()
1990 reason, &folios, split_folios, &astats, in migrate_pages_sync()
1999 list_splice_tail(&folios, ret_folios); in migrate_pages_sync()
2013 list_splice_tail_init(&folios, from); in migrate_pages_sync()
2015 list_move(from->next, &folios); in migrate_pages_sync()
2016 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, in migrate_pages_sync()
2019 list_splice_tail_init(&folios, ret_folios); in migrate_pages_sync()
2061 LIST_HEAD(folios); in migrate_pages()
2089 list_cut_before(&folios, from, &folio2->lru); in migrate_pages()
2091 list_splice_init(from, &folios); in migrate_pages()
[all …]
/linux/fs/fuse/
H A Dfile.c670 folio_mark_dirty_lock(ap->folios[i]); in fuse_release_user_pages()
672 unpin_folio(ap->folios[i]); in fuse_release_user_pages()
753 ia->ap.folios = fuse_folios_alloc(nfolios, GFP_KERNEL, in fuse_io_alloc()
755 if (!ia->ap.folios) { in fuse_io_alloc()
765 kfree(ia->ap.folios); in fuse_io_free()
868 loff_t pos = folio_pos(ap->folios[0]) + num_read; in fuse_short_read()
883 .ap.folios = &folio, in fuse_do_readfolio()
944 mapping = ap->folios[i]->mapping; in fuse_readpages_end()
959 folio_end_read(ap->folios[i], !err); in fuse_readpages_end()
960 folio_put(ap->folios[i]); in fuse_readpages_end()
[all …]
/linux/Documentation/core-api/
H A Dfolio_queue.rst13 * Adding and removing folios
24 The folio_queue struct forms a single segment in a segmented list of folios
68 the number of folios added.
71 Adding and removing folios
134 of folios added to a segments and the third is a shorthand to indicate if the
137 Not that the count and fullness are not affected by clearing folios from the
197 last segment is reached and the folios it refers to are entirely consumed by
H A Dpin_user_pages.rst58 For large folios, the GUP_PIN_COUNTING_BIAS scheme is not used. Instead,
62 This approach for large folios avoids the counting upper limit problems
68 This also means that huge pages and large folios do not suffer
202 The whole point of marking folios as "DMA-pinned" or "gup-pinned" is to be able
275 fields, and to better report on large folios in general. Specifically,
276 for large folios, the exact pincount is reported.
/linux/Documentation/mm/
H A Dunevictable-lru.rst13 folios.
28 folios and to hide these folios from vmscan. This mechanism is based on a patch
72 The Unevictable LRU infrastructure maintains unevictable folios as if they were
75 (1) We get to "treat unevictable folios just like we treat other folios in the
80 (2) We want to be able to migrate unevictable folios between nodes for memory
82 can only migrate folios that it can successfully isolate from the LRU
83 lists (or "Movable" folios: outside of consideration here). If we were to
84 maintain folios elsewhere than on an LRU-like list, where they can be
88 anonymous, swap-backed folios. This differentiation is only important
89 while the folios are, in fact, evictable.
[all …]
H A Dmultigen_lru.rst92 truncated generation number is an index to ``lrugen->folios[]``. The
96 ``lrugen->folios[]``; otherwise it stores zero.
100 generations, tiers do not have dedicated ``lrugen->folios[]``. In
131 increments ``min_seq`` when ``lrugen->folios[]`` indexed by
226 since each node and memcg combination has an LRU of folios (see
232 the active/inactive LRU (of folios):
255 The multi-gen LRU (of folios) can be disassembled into the following
/linux/fs/bcachefs/
H A Dfs-io-buffered.c42 folios folios; member
54 darray_push(&iter->folios, folio)) { in readpages_iter_init()
58 return iter->folios.nr ? 0 : -ENOMEM; in readpages_iter_init()
69 if (iter->idx >= iter->folios.nr) in readpage_iter_peek()
71 return iter->folios.data[iter->idx]; in readpage_iter_peek()
277 readpages_iter.folios.nr - in bch2_readahead()
299 darray_exit(&readpages_iter.folios); in bch2_readahead()
790 static noinline void folios_trunc(folios *fs, struct folio **fi) in folios_trunc()
807 folios fs; in __bch2_buffered_write()
H A Dfs-io-pagecache.h7 typedef DARRAY(struct folio *) folios; typedef
10 u64, fgf_t, gfp_t, folios *);
/linux/Documentation/filesystems/iomap/
H A Dporting.rst31 2. Large folios are only supported via iomap; there are no plans to
39 allocating, instantiating, locking, and unlocking of folios.
108 interface with iomap and folios.

123