| /linux/fs/ubifs/ |
| H A D | shrinker.c | 56 * clean znodes which younger then @age. Returns number of freed znodes. 83 int freed; in shrink_tnc() local 116 freed = ubifs_destroy_tnc_subtree(c, znode); in shrink_tnc() 117 atomic_long_sub(freed, &ubifs_clean_zn_cnt); in shrink_tnc() 118 atomic_long_sub(freed, &c->clean_zn_cnt); in shrink_tnc() 119 total_freed += freed; in shrink_tnc() 141 * znodes which are older than @age, until at least @nr znodes are freed. 142 * Returns the number of freed znodes. 149 int freed = 0; in shrink_tnc_trees() local 187 freed += shrink_tnc(c, nr, age, contention); in shrink_tnc_trees() [all …]
|
| /linux/drivers/staging/octeon/ |
| H A D | ethernet-mem.c | 26 int freed = elements; in cvm_oct_fill_hw_skbuff() local 28 while (freed) { in cvm_oct_fill_hw_skbuff() 36 freed--; in cvm_oct_fill_hw_skbuff() 38 return elements - freed; in cvm_oct_fill_hw_skbuff() 81 int freed = elements; in cvm_oct_fill_hw_memory() local 83 while (freed) { in cvm_oct_fill_hw_memory() 103 freed--; in cvm_oct_fill_hw_memory() 105 return elements - freed; in cvm_oct_fill_hw_memory() 139 int freed; in cvm_oct_mem_fill_fpa() local 142 freed = cvm_oct_fill_hw_skbuff(pool, size, elements); in cvm_oct_mem_fill_fpa() [all …]
|
| /linux/fs/f2fs/ |
| H A D | shrinker.c | 86 unsigned long freed = 0; in f2fs_shrink_scan() local 109 freed += f2fs_shrink_age_extent_tree(sbi, nr >> 2); in f2fs_shrink_scan() 112 freed += f2fs_shrink_read_extent_tree(sbi, nr >> 2); in f2fs_shrink_scan() 115 if (freed < nr) in f2fs_shrink_scan() 116 freed += f2fs_try_to_free_nats(sbi, nr - freed); in f2fs_shrink_scan() 119 if (freed < nr) in f2fs_shrink_scan() 120 freed += f2fs_try_to_free_nids(sbi, nr - freed); in f2fs_shrink_scan() 126 if (freed >= nr) in f2fs_shrink_scan() 130 return freed; in f2fs_shrink_scan()
|
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_shrinker.c | 63 s64 freed = 0, lret; in __xe_shrinker_walk() local 85 freed += lret; in __xe_shrinker_walk() 93 return freed; in __xe_shrinker_walk() 109 s64 lret, freed; in xe_shrinker_walk() local 118 freed = lret; in xe_shrinker_walk() 123 freed += lret; in xe_shrinker_walk() 125 return freed; in xe_shrinker_walk() 132 freed += lret; in xe_shrinker_walk() 135 return freed; in xe_shrinker_walk() 208 unsigned long nr_to_scan, nr_scanned = 0, freed = 0; in xe_shrinker_scan() local [all …]
|
| /linux/mm/ |
| H A D | shrinker.c | 374 unsigned long freed = 0; in do_shrink_slab() local 440 freed += ret; in do_shrink_slab() 464 trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan); in do_shrink_slab() 465 return freed; in do_shrink_slab() 473 unsigned long ret, freed = 0; in shrink_slab_memcg() local 482 * The shrinker_info may be freed asynchronously via RCU in the in shrink_slab_memcg() 486 * The shrinker_info_unit is never freed unless its corresponding memcg in shrink_slab_memcg() 489 * not be freed. in shrink_slab_memcg() 575 freed += ret; in shrink_slab_memcg() 584 return freed; in shrink_slab_memcg() [all …]
|
| H A D | Kconfig.debug | 22 pages are being allocated and freed, as unexpected state changes 152 reduce the risk of information leaks from freed data. This does 159 If you are only interested in sanitization of freed pages without 253 difference being that the orphan objects are not freed but 273 freed before kmemleak is fully initialised, use a static pool
|
| /linux/sound/soc/intel/atom/sst/ |
| H A D | sst_ipc.c | 92 int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed) in sst_free_block() argument 99 if (block == freed) { in sst_free_block() 100 pr_debug("pvt_id freed --> %d\n", freed->drv_id); in sst_free_block() 102 list_del(&freed->node); in sst_free_block() 104 kfree(freed->data); in sst_free_block() 105 freed->data = NULL; in sst_free_block() 106 kfree(freed); in sst_free_block() 111 dev_err(ctx->dev, "block is already freed!!!\n"); in sst_free_block()
|
| /linux/arch/alpha/kernel/ |
| H A D | vmlinux.lds.S | 39 /* Will be freed after init */ 45 needed for the THREAD_SIZE aligned init_task gets freed after init */ 48 /* Freed after init ends here */
|
| /linux/fs/xfs/ |
| H A D | xfs_attr_item.h | 33 * attached to the xfs_attr_intent until they are committed. They are freed 34 * when the xfs_attr_intent itself is freed when the work is done. 45 * earlier mentioned in an attri item have been freed.
|
| H A D | xfs_extfree_item.h | 24 * The EFI is reference counted so that it is not freed prior to both the EFI 46 * AIL, so at this point both the EFI and EFD are freed. 66 * have been freed.
|
| /linux/Documentation/trace/ |
| H A D | events-kmem.rst | 64 When a page is freed directly by the caller, the only mm_page_free event 68 When pages are freed in batch, the also mm_page_free_batched is triggered. 70 freed in batch with a page list. Significant amounts of activity here could 90 When the per-CPU list is too full, a number of pages are freed, each one 101 can be allocated and freed on the same CPU through some algorithm change.
|
| /linux/Documentation/core-api/ |
| H A D | memory-allocation.rst | 176 When the allocated memory is no longer needed it must be freed. 178 Objects allocated by `kmalloc` can be freed by `kfree` or `kvfree`. Objects 179 allocated by `kmem_cache_alloc` can be freed with `kmem_cache_free`, `kfree` 185 Memory allocated by `vmalloc` can be freed with `vfree` or `kvfree`. 186 Memory allocated by `kvmalloc` can be freed with `kvfree`. 187 Caches created by `kmem_cache_create` should be freed with
|
| /linux/include/trace/events/ |
| H A D | jbd2.h | 330 unsigned long block_nr, unsigned long freed), 332 TP_ARGS(journal, first_tid, block_nr, freed), 339 __field(unsigned long, freed ) 347 __entry->freed = freed; 350 TP_printk("dev %d,%d from %u to %u offset %lu freed %lu", 353 __entry->block_nr, __entry->freed) 487 TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu "
|
| /linux/Documentation/mm/ |
| H A D | hugetlbfs_reserv.rst | 76 reservation must be restored when the huge page is freed. More 225 * freed, the reservation will be restored. */ 237 for subpool accounting when the folio is freed. 267 when a huge page that has been instantiated is freed no adjustment is made 274 Huge pages are freed by free_huge_folio(). It is only passed a pointer 276 is freed, reservation accounting may need to be performed. This would 278 reserves, or the page is being freed on an error path where a global 419 be released and the reservation map freed. Before freeing the reservation 420 map, all the individual file_region structures must be freed. In this case 423 after the new file size must be freed. In addition, any file_region entries [all …]
|
| /linux/kernel/module/ |
| H A D | stats.c | 42 * from kernel_read_file_from_fd() is freed right away. 48 * counter will be incremented with the summation of the allocated bytes freed 50 * step b) a separate counter is used and incremented for the bytes freed and 79 * All virtual memory allocated to these failed modules will be freed with 120 * * invalid_kread_bytes: bytes allocated and then freed on failures which 124 * * invalid_decompress_bytes: number of bytes allocated and freed due to 128 * * invalid_becoming_bytes: total number of bytes allocated and freed used 150 * freed bytes in kernel_read_file_from_fd() calls for these type of 164 * freed due to failures after we did all the sanity checks of the module 171 * also freed and not used, and so we increment this counter with twice
|
| /linux/drivers/iommu/ |
| H A D | iommu-pages.c | 38 * Returns the virtual address of the allocated page. The page must be freed 105 * @virt: virtual address of the page to be freed. 119 * @list: The list of pages to be freed 236 * @virt: virtual address of the page to be freed. 239 * If the page is incoherent it made coherent again then freed.
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | bpf_mod_race.c | 63 * Now, if we inject an error in the blocked program, our module will be freed 65 * Later, when bpf program is freed, it will try to module_put already freed
|
| /linux/kernel/trace/ |
| H A D | rethook.c | 56 * @rh: the struct rethook to be freed. 60 * calling this function.) This function will set the @rh to be freed 61 * after all rethook_node are freed (not soon). And the caller must 101 * Note that @handler == NULL means this rethook is going to be freed. 140 * marked as freed, this will free the @node. 165 /* Check whether @rh is going to be freed. */ in rethook_try_get()
|
| /linux/fs/nfsd/ |
| H A D | nfscache.c | 124 unsigned long freed = 0; in nfsd_cacherep_dispose() local 130 freed++; in nfsd_cacherep_dispose() 132 return freed; in nfsd_cacherep_dispose() 265 unsigned int freed = 0; in nfsd_prune_bucket_locked() local 278 if (max && ++freed > max) in nfsd_prune_bucket_locked() 307 * nr_to_scan freed objects. Nothing will be released if the cache 316 unsigned long freed = 0; in nfsd_reply_cache_scan() local 330 freed += nfsd_cacherep_dispose(&dispose); in nfsd_reply_cache_scan() 331 if (freed > sc->nr_to_scan) in nfsd_reply_cache_scan() 334 return freed; in nfsd_reply_cache_scan()
|
| /linux/drivers/comedi/ |
| H A D | comedi_buf.c | 402 * space freed is limited to the amount that was reserved. The freed space is 405 * If the samples in the freed space need to be "munged", do so here. The 406 * freed space becomes available for allocation by the reader. 408 * Return: The amount of space freed in bytes. 448 * which has been freed by the writer and "munged" to the sample data format 499 * reserved before it can be freed. 550 * amount of space freed is limited to the amount that was reserved. 552 * The freed space becomes available for allocation by the writer. 554 * Return: The amount of space freed in bytes.
|
| /linux/fs/erofs/ |
| H A D | zutil.c | 257 unsigned long freed = 0; in erofs_shrink_scan() local 283 freed += z_erofs_shrink_scan(sbi, nr - freed); in erofs_shrink_scan() 295 if (freed >= nr) in erofs_shrink_scan() 299 return freed; in erofs_shrink_scan()
|
| /linux/Documentation/ABI/testing/ |
| H A D | sysfs-kernel-slab | 169 has been deactivated and contained free objects that were freed 202 slabs (not objects) are freed by rcu. 211 been freed in a full slab so that it had to added to its node's 231 The free_fastpath file shows how many objects have been freed 242 The free_frozen file shows how many objects have been freed to 254 been freed to a now-empty slab so that it had to be removed from 266 freed back to the page allocator. It can be written to clear 276 The free_slowpath file shows how many objects have been freed 438 are freed and the partial list is sorted so the slabs
|
| /linux/arch/mips/include/asm/ |
| H A D | dsemul.h | 67 * Return: True if a frame was freed, else false. 103 * for delay slot 'emulation' book-keeping is freed. This is to be called 104 * before @mm is freed in order to avoid memory leaks.
|
| /linux/arch/powerpc/platforms/powernv/ |
| H A D | memtrace.c | 229 /* We have freed this chunk previously */ in memtrace_free_regions() 247 * Memory was freed successfully so clean up references to it in memtrace_free_regions() 248 * so on reentry we can tell that this chunk was freed. in memtrace_free_regions() 251 pr_info("Freed trace memory back on node %d\n", ent->nid); in memtrace_free_regions() 257 /* If all chunks of memory were freed successfully, reset globals */ in memtrace_free_regions()
|
| /linux/include/linux/ |
| H A D | kfence.h | 89 * objects "zombie allocations". Objects may then still be used or freed (which 162 * @addr: object to be freed 166 * Release a KFENCE object and mark it as freed. 172 * @addr: object to be freed 178 * Release a KFENCE object and mark it as freed. May be called on any object,
|