| /linux/drivers/dma-buf/ |
| H A D | udmabuf.c | 29 struct folio **folios; member 58 pfn = folio_pfn(ubuf->folios[pgoff]); in udmabuf_vm_fault() 76 pfn = folio_pfn(ubuf->folios[pgoff]); in udmabuf_vm_fault() 123 pages[pg] = folio_page(ubuf->folios[pg], in vmap_udmabuf() 162 sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE, in get_sg_table() 210 ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios), GFP_KERNEL); in init_udmabuf() 211 if (!ubuf->folios) in init_udmabuf() 231 kvfree(ubuf->folios); in deinit_udmabuf() 326 loff_t start, loff_t size, struct folio **folios) in udmabuf_pin_folios() argument 337 nr_folios = memfd_pin_folios(memfd, start, end, folios, pgcnt, &pgoff); in udmabuf_pin_folios() [all …]
|
| /linux/lib/ |
| H A D | test_kho.c | 34 struct folio **folios; member 47 kho_unpreserve_folio(state->folios[i]); in kho_test_unpreserve_data() 71 struct folio *folio = state->folios[i]; in kho_test_preserve_data() 186 state->folios[state->nr_folios++] = folio; in kho_test_generate_data() 198 folio_put(state->folios[i]); in kho_test_generate_data() 206 struct folio **folios; in kho_test_save() local 213 folios = kvmalloc_array(max_nr, sizeof(*state->folios), GFP_KERNEL); in kho_test_save() 214 if (!folios) in kho_test_save() 216 state->folios = folios; in kho_test_save() 229 kvfree(folios); in kho_test_save() [all …]
|
| /linux/mm/ |
| H A D | memfd_luo.c | 92 struct folio **folios; in memfd_luo_preserve_folios() local 114 folios = kvmalloc_array(max_folios, sizeof(*folios), GFP_KERNEL); in memfd_luo_preserve_folios() 115 if (!folios) in memfd_luo_preserve_folios() 130 nr_pinned = memfd_pin_folios(file, 0, size - 1, folios, max_folios, in memfd_luo_preserve_folios() 147 struct folio *folio = folios[i]; in memfd_luo_preserve_folios() 168 kvfree(folios); in memfd_luo_preserve_folios() 181 kho_unpreserve_folio(folios[i]); in memfd_luo_preserve_folios() 184 unpin_folios(folios, nr_folios); in memfd_luo_preserve_folios() 186 kvfree(folios); in memfd_luo_preserve_folios() 239 err = memfd_luo_preserve_folios(args->file, &ser->folios, in memfd_luo_preserve() [all …]
|
| H A D | swap.c | 165 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru() 376 struct folio *batch_folio = fbatch->folios[i]; in __lru_cache_activate_folio() 951 void folios_put_refs(struct folio_batch *folios, unsigned int *refs) in folios_put_refs() argument 957 for (i = 0, j = 0; i < folios->nr; i++) { in folios_put_refs() 958 struct folio *folio = folios->folios[i]; in folios_put_refs() 990 folios->folios[j] = folio; in folios_put_refs() 996 folio_batch_reinit(folios); in folios_put_refs() 1000 folios->nr = j; in folios_put_refs() 1001 mem_cgroup_uncharge_folios(folios); in folios_put_refs() 1002 free_unref_folios(folios); in folios_put_refs() [all …]
|
| H A D | swap_state.c | 341 struct folio_batch folios; in free_pages_and_swap_cache() local 344 folio_batch_init(&folios); in free_pages_and_swap_cache() 349 refs[folios.nr] = 1; in free_pages_and_swap_cache() 352 refs[folios.nr] = encoded_nr_pages(pages[++i]); in free_pages_and_swap_cache() 354 if (folio_batch_add(&folios, folio) == 0) in free_pages_and_swap_cache() 355 folios_put_refs(&folios, refs); in free_pages_and_swap_cache() 357 if (folios.nr) in free_pages_and_swap_cache() 358 folios_put_refs(&folios, refs); in free_pages_and_swap_cache()
|
| H A D | truncate.c | 73 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals() 81 if (xa_is_value(fbatch->folios[i])) { in truncate_folio_batch_exceptionals() 404 truncate_cleanup_folio(fbatch.folios[i]); in truncate_inode_pages_range() 407 folio_unlock(fbatch.folios[i]); in truncate_inode_pages_range() 451 struct folio *folio = fbatch.folios[i]; in truncate_inode_pages_range() 550 struct folio *folio = fbatch.folios[i]; in mapping_try_invalidate() 693 struct folio *folio = fbatch.folios[i]; in invalidate_inode_pages2_range()
|
| H A D | gup.c | 449 void unpin_folios(struct folio **folios, unsigned long nfolios) in unpin_folios() argument 464 if (folios[i] != folios[j]) in unpin_folios() 467 if (folios[i]) in unpin_folios() 468 gup_put_folio(folios[i], j - i, FOLL_PIN); in unpin_folios() 2210 struct folio **folios; member 2220 return pofs->folios[i]; in pofs_get_folio() 2232 unpin_folios(pofs->folios, pofs->nr_entries); in pofs_unpin() 2421 struct folio **folios) in check_and_migrate_movable_folios() argument 2424 .folios = folios, in check_and_migrate_movable_folios() 2455 struct folio **folios) in check_and_migrate_movable_folios() argument [all …]
|
| H A D | migrate.c | 2002 LIST_HEAD(folios); in migrate_pages_sync() 2008 reason, &folios, split_folios, &astats, in migrate_pages_sync() 2017 list_splice_tail(&folios, ret_folios); in migrate_pages_sync() 2031 list_splice_tail_init(&folios, from); in migrate_pages_sync() 2033 list_move(from->next, &folios); in migrate_pages_sync() 2034 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, in migrate_pages_sync() 2037 list_splice_tail_init(&folios, ret_folios); in migrate_pages_sync() 2079 LIST_HEAD(folios); in migrate_pages() 2107 list_cut_before(&folios, from, &folio2->lru); in migrate_pages() 2109 list_splice_init(from, &folios); in migrate_pages() [all …]
|
| /linux/fs/btrfs/ |
| H A D | accessors.c | 59 char *kaddr = folio_address(eb->folios[idx]) + oif; \ 73 kaddr = folio_address(eb->folios[idx + 1]); \ 77 folio_address(eb->folios[idx + 1]), \ 89 char *kaddr = folio_address(eb->folios[idx]) + oif; \ 105 kaddr = folio_address(eb->folios[idx + 1]); \ 109 kaddr = folio_address(eb->folios[idx + 1]); \
|
| H A D | extent_io.c | 282 struct folio *folio = fbatch.folios[i]; in __process_folios_contig() 323 struct folio *folio = fbatch.folios[i]; in lock_delalloc_folios() 706 eb->folios[i] = page_folio(page_array[i]); in alloc_eb_folio_array() 2230 struct folio *folio = eb->folios[i]; in write_one_eb() 2496 struct folio *folio = fbatch.folios[i]; in extent_write_cache_pages() 2964 struct folio *folio = eb->folios[i]; in btrfs_release_extent_buffer_folios() 3015 ASSERT(eb->folios[i]); in cleanup_extent_buffer_folios() 3016 detach_extent_buffer_folio(eb, eb->folios[i]); in cleanup_extent_buffer_folios() 3017 folio_put(eb->folios[i]); in cleanup_extent_buffer_folios() 3018 eb->folios[i] = NULL; in cleanup_extent_buffer_folios() [all …]
|
| H A D | zlib.c | 148 u64 start, struct folio **folios, unsigned long *out_folios, in zlib_compress_folios() argument 190 folios[0] = out_folio; in zlib_compress_folios() 274 folios[nr_folios] = out_folio; in zlib_compress_folios() 310 folios[nr_folios] = out_folio; in zlib_compress_folios()
|
| H A D | lzo.c | 218 u64 start, struct folio **folios, unsigned long *out_folios, in lzo_compress_folios() argument 275 folios, max_nr_folio, in lzo_compress_folios() 299 sizes_ptr = kmap_local_folio(folios[0], 0); in lzo_compress_folios()
|
| /linux/include/linux/ |
| H A D | pagevec.h | 32 struct folio *folios[PAGEVEC_SIZE]; member 77 fbatch->folios[fbatch->nr++] = folio; in folio_batch_add() 93 return fbatch->folios[fbatch->i++]; in folio_batch_next()
|
| /linux/tools/mm/ |
| H A D | thpmaps | 252 folios = indexes[index_next:index_end][heads[index_next:index_end]] 256 nr = (int(folios[0]) if len(folios) else index_end) - index_next 261 if len(folios): 264 nr = index_end - int(folios[-1]) 265 folios = folios[:-1] 270 if len(folios): 271 folio_nrs = np.append(np.diff(folios), np.uint64(index_end - folios[-1])) 273 for index, order in zip(folios, folio_orders):
|
| /linux/fs/fuse/ |
| H A D | ioctl.c | 254 ap.folios = fuse_folios_alloc(fm->fc->max_pages, GFP_KERNEL, &ap.descs); in fuse_do_ioctl() 256 if (!ap.folios || !iov_page) in fuse_do_ioctl() 310 ap.folios[ap.num_folios] = folio_alloc(GFP_KERNEL | __GFP_HIGHMEM, 0); in fuse_do_ioctl() 311 if (!ap.folios[ap.num_folios]) in fuse_do_ioctl() 330 c = copy_folio_from_iter(ap.folios[i], 0, PAGE_SIZE, &ii); in fuse_do_ioctl() 368 vaddr = kmap_local_folio(ap.folios[0], 0); in fuse_do_ioctl() 397 c = copy_folio_to_iter(ap.folios[i], 0, PAGE_SIZE, &ii); in fuse_do_ioctl() 405 folio_put(ap.folios[--ap.num_folios]); in fuse_do_ioctl() 406 kfree(ap.folios); in fuse_do_ioctl()
|
| H A D | file.c | 607 folio_mark_dirty_lock(ap->folios[i]); in fuse_release_user_pages() 609 unpin_folio(ap->folios[i]); in fuse_release_user_pages() 690 ia->ap.folios = fuse_folios_alloc(nfolios, GFP_KERNEL, in fuse_io_alloc() 692 if (!ia->ap.folios) { in fuse_io_alloc() 702 kfree(ia->ap.folios); in fuse_io_free() 805 loff_t pos = folio_pos(ap->folios[0]) + num_read; in fuse_short_read() 824 .ap.folios = &folio, in fuse_do_readfolio() 915 ap->folios[ap->num_folios] = folio; in fuse_handle_readahead() 1009 mapping = ap->folios[0]->mapping; in fuse_readpages_end() 1021 iomap_finish_folio_read(ap->folios[i], ap->descs[i].offset, in fuse_readpages_end() [all …]
|
| H A D | fuse_i.h | 357 struct folio **folios; member 1107 struct folio **folios; in fuse_folios_alloc() local 1109 folios = kzalloc(nfolios * (sizeof(struct folio *) + in fuse_folios_alloc() 1111 *desc = (void *) (folios + nfolios); in fuse_folios_alloc() 1113 return folios; in fuse_folios_alloc()
|
| /linux/fs/ramfs/ |
| H A D | file-nommu.c | 235 ret = (unsigned long) folio_address(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area() 236 pfn = folio_pfn(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area() 240 if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { in ramfs_nommu_get_unmapped_area() 244 nr_pages += folio_nr_pages(fbatch.folios[loop]); in ramfs_nommu_get_unmapped_area()
|
| /linux/Documentation/ABI/testing/ |
| H A D | sysfs-fs-erofs | 25 compressed folios: 27 - 1 : invalidate cached compressed folios 29 - 3 : drop in-memory pclusters and cached compressed folios
|
| /linux/fs/orangefs/ |
| H A D | inode.c | 74 struct folio **folios; member 91 start = offset_in_folio(ow->folios[0], ow->off); in orangefs_writepages_work() 93 folio_start_writeback(ow->folios[i]); in orangefs_writepages_work() 94 bvec_set_folio(&ow->bv[i], ow->folios[i], in orangefs_writepages_work() 95 folio_size(ow->folios[i]) - start, start); in orangefs_writepages_work() 115 wrp = folio_detach_private(ow->folios[i]); in orangefs_writepages_work() 117 folio_end_writeback(ow->folios[i]); in orangefs_writepages_work() 118 folio_unlock(ow->folios[i]); in orangefs_writepages_work() 144 ow->folios[ow->nfolios++] = folio; in orangefs_writepages_callback() 156 ow->folios[ow->nfolios++] = folio; in orangefs_writepages_callback() [all …]
|
| /linux/Documentation/mm/ |
| H A D | unevictable-lru.rst | 13 folios. 28 folios and to hide these folios from vmscan. This mechanism is based on a patch 72 The Unevictable LRU infrastructure maintains unevictable folios as if they were 75 (1) We get to "treat unevictable folios just like we treat other folios in the 80 (2) We want to be able to migrate unevictable folios between nodes for memory 82 can only migrate folios that it can successfully isolate from the LRU 83 lists (or "Movable" folios: outside of consideration here). If we were to 84 maintain folios elsewhere than on an LRU-like list, where they can be 88 anonymous, swap-backed folios. This differentiation is only important 89 while the folios are, in fact, evictable. [all …]
|
| H A D | multigen_lru.rst | 92 truncated generation number is an index to ``lrugen->folios[]``. The 96 ``lrugen->folios[]``; otherwise it stores zero. 100 generations, tiers do not have dedicated ``lrugen->folios[]``. In 131 increments ``min_seq`` when ``lrugen->folios[]`` indexed by 226 since each node and memcg combination has an LRU of folios (see 232 the active/inactive LRU (of folios): 255 The multi-gen LRU (of folios) can be disassembled into the following
|
| /linux/fs/nilfs2/ |
| H A D | page.c | 258 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_dirty_pages() 312 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_back_pages() 372 struct folio *folio = fbatch.folios[i]; in nilfs_clear_dirty_pages() 531 folio = fbatch.folios[i]; in nilfs_find_uncommitted_extent()
|
| /linux/include/linux/kho/abi/ |
| H A D | memfd.h | 71 struct kho_vmalloc folios; member
|
| /linux/Documentation/core-api/ |
| H A D | pin_user_pages.rst | 58 For large folios, the GUP_PIN_COUNTING_BIAS scheme is not used. Instead, 62 This approach for large folios avoids the counting upper limit problems 68 This also means that huge pages and large folios do not suffer 202 The whole point of marking folios as "DMA-pinned" or "gup-pinned" is to be able 275 fields, and to better report on large folios in general. Specifically, 276 for large folios, the exact pincount is reported.
|