/linux/fs/ubifs/ |
H A D | shrinker.c | 56 * clean znodes which younger then @age. Returns number of freed znodes. 83 int freed; in shrink_tnc() local 116 freed = ubifs_destroy_tnc_subtree(c, znode); in shrink_tnc() 117 atomic_long_sub(freed, &ubifs_clean_zn_cnt); in shrink_tnc() 118 atomic_long_sub(freed, &c->clean_zn_cnt); in shrink_tnc() 119 total_freed += freed; in shrink_tnc() 141 * znodes which are older than @age, until at least @nr znodes are freed. 142 * Returns the number of freed znodes. 149 int freed = 0; in shrink_tnc_trees() local 187 freed += shrink_tnc(c, nr, age, contention); in shrink_tnc_trees() [all …]
|
/linux/drivers/staging/octeon/ |
H A D | ethernet-mem.c | 26 int freed = elements; in cvm_oct_fill_hw_skbuff() local 28 while (freed) { in cvm_oct_fill_hw_skbuff() 36 freed--; in cvm_oct_fill_hw_skbuff() 38 return elements - freed; in cvm_oct_fill_hw_skbuff() 81 int freed = elements; in cvm_oct_fill_hw_memory() local 83 while (freed) { in cvm_oct_fill_hw_memory() 103 freed--; in cvm_oct_fill_hw_memory() 105 return elements - freed; in cvm_oct_fill_hw_memory() 139 int freed; in cvm_oct_mem_fill_fpa() local 142 freed = cvm_oct_fill_hw_skbuff(pool, size, elements); in cvm_oct_mem_fill_fpa() [all …]
|
/linux/fs/f2fs/ |
H A D | shrinker.c | 86 unsigned long freed = 0; in f2fs_shrink_scan() local 109 freed += f2fs_shrink_age_extent_tree(sbi, nr >> 2); in f2fs_shrink_scan() 112 freed += f2fs_shrink_read_extent_tree(sbi, nr >> 2); in f2fs_shrink_scan() 115 if (freed < nr) in f2fs_shrink_scan() 116 freed += f2fs_try_to_free_nats(sbi, nr - freed); in f2fs_shrink_scan() 119 if (freed < nr) in f2fs_shrink_scan() 120 freed += f2fs_try_to_free_nids(sbi, nr - freed); in f2fs_shrink_scan() 126 if (freed >= nr) in f2fs_shrink_scan() 130 return freed; in f2fs_shrink_scan()
|
/linux/drivers/gpu/drm/msm/ |
H A D | msm_gem_shrinker.c | 107 unsigned long freed; in msm_gem_shrinker_scan() member 117 unsigned long freed = 0; in msm_gem_shrinker_scan() local 123 stages[i].freed = in msm_gem_shrinker_scan() 127 nr -= stages[i].freed; in msm_gem_shrinker_scan() 128 freed += stages[i].freed; in msm_gem_shrinker_scan() 132 if (freed) { in msm_gem_shrinker_scan() 133 trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed, in msm_gem_shrinker_scan() 134 stages[1].freed, stages[2].freed, in msm_gem_shrinker_scan() 135 stages[3].freed); in msm_gem_shrinker_scan() 138 return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP; in msm_gem_shrinker_scan()
|
/linux/drivers/gpu/drm/panfrost/ |
H A D | panfrost_gem_shrinker.c | 69 unsigned long freed = 0; in panfrost_gem_shrinker_scan() local 75 if (freed >= sc->nr_to_scan) in panfrost_gem_shrinker_scan() 79 freed += shmem->base.size >> PAGE_SHIFT; in panfrost_gem_shrinker_scan() 86 if (freed > 0) in panfrost_gem_shrinker_scan() 87 pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT); in panfrost_gem_shrinker_scan() 89 return freed; in panfrost_gem_shrinker_scan()
|
/linux/mm/ |
H A D | shrinker.c | 374 unsigned long freed = 0; in do_shrink_slab() local 440 freed += ret; in do_shrink_slab() 464 trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan); in do_shrink_slab() 465 return freed; in do_shrink_slab() 473 unsigned long ret, freed = 0; in shrink_slab_memcg() local 482 * The shrinker_info may be freed asynchronously via RCU in the in shrink_slab_memcg() 486 * The shrinker_info_unit is never freed unless its corresponding memcg in shrink_slab_memcg() 489 * not be freed. in shrink_slab_memcg() 575 freed += ret; in shrink_slab_memcg() 584 return freed; in shrink_slab_memcg() [all …]
|
/linux/fs/erofs/ |
H A D | zutil.c | 337 unsigned int freed = 0; in erofs_shrink_workstation() local 347 ++freed; in erofs_shrink_workstation() 349 return freed; in erofs_shrink_workstation() 353 return freed; in erofs_shrink_workstation() 395 unsigned long freed = 0; in erofs_shrink_scan() local 422 freed += erofs_shrink_workstation(sbi, nr - freed); in erofs_shrink_scan() 435 if (freed >= nr) in erofs_shrink_scan() 439 return freed; in erofs_shrink_scan()
|
/linux/sound/soc/intel/atom/sst/ |
H A D | sst_ipc.c | 92 int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed) in sst_free_block() argument 99 if (block == freed) { in sst_free_block() 100 pr_debug("pvt_id freed --> %d\n", freed->drv_id); in sst_free_block() 102 list_del(&freed->node); in sst_free_block() 104 kfree(freed->data); in sst_free_block() 105 freed->data = NULL; in sst_free_block() 106 kfree(freed); in sst_free_block() 111 dev_err(ctx->dev, "block is already freed!!!\n"); in sst_free_block()
|
/linux/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_shrinker.c | 167 * and result in the object being freed from under us. This is in i915_gem_shrink() 190 * yet freed (due to RCU then a workqueue) we still want in i915_gem_shrink() 192 * the unbound/bound list until actually freed. in i915_gem_shrink() 277 unsigned long freed = 0; in i915_gem_shrink_all() local 280 freed = i915_gem_shrink(NULL, i915, -1UL, NULL, in i915_gem_shrink_all() 285 return freed; in i915_gem_shrink_all() 320 unsigned long freed; in i915_gem_shrinker_scan() local 324 freed = i915_gem_shrink(NULL, i915, in i915_gem_shrinker_scan() 333 freed += i915_gem_shrink(NULL, i915, in i915_gem_shrinker_scan() 343 return sc->nr_scanned ? freed : SHRINK_STOP; in i915_gem_shrinker_scan() [all …]
|
/linux/Documentation/mm/ |
H A D | hugetlbfs_reserv.rst | 76 reservation must be restored when the huge page is freed. More 225 * freed, the reservation will be restored. */ 237 for subpool accounting when the folio is freed. 267 when a huge page that has been instantiated is freed no adjustment is made 274 Huge pages are freed by free_huge_folio(). It is only passed a pointer 276 is freed, reservation accounting may need to be performed. This would 278 reserves, or the page is being freed on an error path where a global 419 be released and the reservation map freed. Before freeing the reservation 420 map, all the individual file_region structures must be freed. In this case 423 after the new file size must be freed. In addition, any file_region entries [all …]
|
/linux/Documentation/trace/ |
H A D | events-kmem.rst | 64 When a page is freed directly by the caller, the only mm_page_free event 68 When pages are freed in batch, the also mm_page_free_batched is triggered. 70 freed in batch with a page list. Significant amounts of activity here could 90 When the per-CPU list is too full, a number of pages are freed, each one 101 can be allocated and freed on the same CPU through some algorithm change.
|
/linux/include/trace/events/ |
H A D | jbd2.h | 330 unsigned long block_nr, unsigned long freed), 332 TP_ARGS(journal, first_tid, block_nr, freed), 339 __field(unsigned long, freed ) 347 __entry->freed = freed; 350 TP_printk("dev %d,%d from %u to %u offset %lu freed %lu", 353 __entry->block_nr, __entry->freed) 487 TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu "
|
/linux/Documentation/core-api/ |
H A D | memory-allocation.rst | 176 When the allocated memory is no longer needed it must be freed. 178 Objects allocated by `kmalloc` can be freed by `kfree` or `kvfree`. Objects 179 allocated by `kmem_cache_alloc` can be freed with `kmem_cache_free`, `kfree` 185 Memory allocated by `vmalloc` can be freed with `vfree` or `kvfree`. 186 Memory allocated by `kvmalloc` can be freed with `kvfree`. 187 Caches created by `kmem_cache_create` should be freed with
|
/linux/kernel/module/ |
H A D | stats.c | 42 * from kernel_read_file_from_fd() is freed right away. 48 * counter will be incremented with the summation of the allocated bytes freed 50 * step b) a separate counter is used and incremented for the bytes freed and 79 * All virtual memory allocated to these failed modules will be freed with 120 * * invalid_kread_bytes: bytes allocated and then freed on failures which 124 * * invalid_decompress_bytes: number of bytes allocated and freed due to 128 * * invalid_becoming_bytes: total number of bytes allocated and freed used 150 * freed bytes in kernel_read_file_from_fd() calls for these type of 164 * freed due to failures after we did all the sanity checks of the module 171 * also freed and not used, and so we increment this counter with twice
|
/linux/arch/alpha/kernel/ |
H A D | vmlinux.lds.S | 39 /* Will be freed after init */ 45 needed for the THREAD_SIZE aligned init_task gets freed after init */ 48 /* Freed after init ends here */
|
/linux/include/drm/ |
H A D | drm_managed.h | 61 * automatically freed on the final drm_dev_put(). Memory can also be freed 77 * memory is automatically freed on the final drm_dev_put() and works exactly 99 * automatically freed on the final drm_dev_put() and works exactly like a
|
/linux/drivers/comedi/ |
H A D | comedi_buf.c | 421 * space freed is limited to the amount that was reserved. The freed space is 424 * If the samples in the freed space need to be "munged", do so here. The 425 * freed space becomes available for allocation by the reader. 427 * Return: The amount of space freed in bytes. 454 * which has been freed by the writer and "munged" to the sample data format 490 * reserved before it can be freed. 528 * amount of space freed is limited to the amount that was reserved. 530 * The freed space becomes available for allocation by the writer. 532 * Return: The amount of space freed in bytes.
|
/linux/fs/nfsd/ |
H A D | nfscache.c | 124 unsigned long freed = 0; in nfsd_cacherep_dispose() local 130 freed++; in nfsd_cacherep_dispose() 132 return freed; in nfsd_cacherep_dispose() 269 unsigned int freed = 0; in nfsd_prune_bucket_locked() local 289 if (max && ++freed > max) in nfsd_prune_bucket_locked() 318 * nr_to_scan freed objects. Nothing will be released if the cache 327 unsigned long freed = 0; in nfsd_reply_cache_scan() local 341 freed += nfsd_cacherep_dispose(&dispose); in nfsd_reply_cache_scan() 342 if (freed > sc->nr_to_scan) in nfsd_reply_cache_scan() 345 return freed; in nfsd_reply_cache_scan()
|
/linux/drivers/iommu/ |
H A D | iommu-pages.h | 37 * __iommu_free_account - account a page that is about to be freed. 146 * @virt: virtual address of the page to be freed. 159 * @virt: virtual address of the page to be freed. 168 * @page: the head of the lru list to be freed.
|
/linux/arch/s390/include/asm/ |
H A D | tlb.h | 44 * has already been freed, so just do free_page_and_swap_cache. 96 * If the mm uses a two level page table the single pmd is freed 116 * If the mm uses a four level page table the single p4d is freed 134 * If the mm uses a three level page table the single pud is freed
|
/linux/kernel/trace/ |
H A D | rethook.c | 56 * @rh: the struct rethook to be freed. 60 * calling this function.) This function will set the @rh to be freed 61 * after all rethook_node are freed (not soon). And the caller must 101 * Note that @handler == NULL means this rethook is going to be freed. 140 * marked as freed, this will free the @node. 165 /* Check whether @rh is going to be freed. */ in rethook_try_get()
|
/linux/Documentation/ABI/testing/ |
H A D | sysfs-kernel-slab | 168 has been deactivated and contained free objects that were freed 201 slabs (not objects) are freed by rcu. 210 been freed in a full slab so that it had to added to its node's 230 The free_fastpath file shows how many objects have been freed 241 The free_frozen file shows how many objects have been freed to 253 been freed to a now-empty slab so that it had to be removed from 265 freed back to the page allocator. It can be written to clear 275 The free_slowpath file shows how many objects have been freed 437 are freed and the partial list is sorted so the slabs
|
/linux/tools/testing/selftests/bpf/progs/ |
H A D | bpf_mod_race.c | 63 * Now, if we inject an error in the blocked program, our module will be freed 65 * Later, when bpf program is freed, it will try to module_put already freed
|
/linux/fs/xfs/ |
H A D | xfs_extfree_item.h | 24 * The EFI is reference counted so that it is not freed prior to both the EFI 46 * AIL, so at this point both the EFI and EFD are freed. 66 * have been freed.
|
/linux/drivers/regulator/ |
H A D | devres.c | 223 * code will ensure that the resource is freed. 283 * automatically be freed when the device is unbound. If any of the 285 * allocated will be freed before returning to the caller. 306 * will automatically be freed when the device is unbound. If any of 308 * allocated will be freed before returning to the caller. 366 * code will ensure that the resource is freed. 400 * automatically be freed when the device is unbound. If any of the 402 * allocated will be freed before returning to the caller. 683 * management code will ensure that the resource is freed.
|