/linux/mm/ |
H A D | shrinker.c | 375 long freeable; in do_shrink_slab() local 382 freeable = shrinker->count_objects(shrinker, shrinkctl); in do_shrink_slab() 383 if (freeable == 0 || freeable == SHRINK_EMPTY) in do_shrink_slab() 384 return freeable; in do_shrink_slab() 394 delta = freeable >> priority; in do_shrink_slab() 403 delta = freeable / 2; in do_shrink_slab() 408 total_scan = min(total_scan, (2 * freeable)); in do_shrink_slab() 411 freeable, delta, total_scan, priority); in do_shrink_slab() 429 total_scan >= freeable) { in do_shrink_slab() 454 next_deferred = min(next_deferred, (2 * freeable)); in do_shrink_slab()
|
H A D | zsmalloc.c | 539 unsigned long obj_allocated, obj_used, pages_used, freeable; in zs_stats_size_show() local 548 "pages_per_zspage", "freeable"); in zs_stats_size_show() 567 freeable = zs_can_compact(class); in zs_stats_size_show() 576 class->pages_per_zspage, freeable); in zs_stats_size_show() 581 total_freeable += freeable; in zs_stats_size_show()
|
/linux/Documentation/mm/ |
H A D | zsmalloc.rst | 42 … 80% 90% 99% 100% obj_allocated obj_used pages_used pages_per_zspage freeable 86 freeable 109 class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable 149 class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable 167 class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable 180 class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable 232 … class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable 246 … class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
|
/linux/fs/bcachefs/ |
H A D | btree_cache.c | 158 list_add(&b->list, &bc->freeable); in bch2_btree_node_hash_remove() 399 list_for_each_entry_safe(b, t, &bc->freeable, list) { in bch2_btree_cache_count() 401 * Leave a few nodes on the freeable list, so that a btree split in bch2_btree_cache_count() 504 list_splice(&bc->freeable, &bc->live); in bch2_fs_btree_cache_init_early() 553 list_splice_init(&bc->live, &bc->freeable); in bch2_btree_cache_cannibalize_lock() 576 INIT_LIST_HEAD(&bc->freeable); in btree_node_cannibalize() 699 list_for_each_entry(b2, &bc->freeable, list) 839 list_add(&b->list, &bc->freeable); in btree_check_header()
|
H A D | btree_types.h | 159 struct list_head freeable; member
|
H A D | btree_update_interior.c | 241 list_move(&b->list, &c->btree_cache.freeable); in __btree_node_free() 2459 list_move(&new_hash->list, &c->btree_cache.freeable); in bch2_btree_node_update_key()
|
H A D | btree_gc.c | 535 list_move(&b->list, &c->btree_cache.freeable); in bch2_check_topology()
|
H A D | btree_io.c | 1775 list_move(&b->list, &c->btree_cache.freeable); in bch2_btree_complete_write()
|
/linux/fs/ |
H A D | inode.c | 808 struct list_head *freeable = arg; in inode_lru_isolate() local 865 list_lru_isolate_move(lru, &inode->i_lru, freeable); in inode_lru_isolate() 880 LIST_HEAD(freeable); in prune_icache_sb() 884 inode_lru_isolate, &freeable); in prune_icache_sb() 885 dispose_list(&freeable); in prune_icache_sb()
|
H A D | dcache.c | 1082 struct list_head *freeable = arg; in dentry_lru_isolate() local 1131 d_lru_shrink_move(lru, dentry, freeable); in dentry_lru_isolate() 1163 struct list_head *freeable = arg; in dentry_lru_isolate_shrink() local 1174 d_lru_shrink_move(lru, dentry, freeable); in dentry_lru_isolate_shrink()
|
/linux/Documentation/filesystems/ |
H A D | ramfs-rootfs-initramfs.rst | 20 around in case it's needed again, but marked as clean (freeable) in case the
|
H A D | vfs.rst | 414 return the number of freeable cached objects it contains.
|
/linux/Documentation/admin-guide/sysctl/ |
H A D | vm.rst | 982 performance impact. Reclaim code needs to take various locks to find freeable 984 ten times more freeable objects than there are.
|
/linux/Documentation/filesystems/xfs/ |
H A D | xfs-online-fsck-design.rst | 2871 are the blocks that might be freeable.
|