Home
last modified time | relevance | path

Searched refs:slab (Results 1 – 25 of 74) sorted by relevance

123

/linux/drivers/md/dm-vdo/
H A Dslab-depot.c54 static bool is_slab_open(struct vdo_slab *slab) in is_slab_open() argument
56 return (!vdo_is_state_quiescing(&slab->state) && in is_slab_open()
57 !vdo_is_state_quiescent(&slab->state)); in is_slab_open()
68 return ((journal->slab->status != VDO_SLAB_REBUILDING) && in must_make_entries_to_flush()
136 static bool is_slab_journal_blank(const struct vdo_slab *slab) in is_slab_journal_blank() argument
138 return ((slab->journal.tail == 1) && in is_slab_journal_blank()
139 (slab->journal.tail_header.entry_count == 0)); in is_slab_journal_blank()
151 struct list_head *dirty_list = &journal->slab->allocator->dirty_slab_journals; in mark_slab_journal_dirty()
170 static void check_if_slab_drained(struct vdo_slab *slab) in check_if_slab_drained() argument
173 struct slab_journal *journal = &slab->journal; in check_if_slab_drained()
[all …]
H A Dslab-depot.h84 struct vdo_slab *slab; member
167 struct vdo_slab *slab; member
300 struct vdo_slab *slab; member
517 bool __must_check vdo_attempt_replay_into_slab(struct vdo_slab *slab,
534 int __must_check vdo_acquire_provisional_reference(struct vdo_slab *slab,
/linux/mm/
H A Dslub.c3 * SLUB: A slab allocator that limits cache line use instead of queuing
6 * The allocator synchronizes using per slab locks or atomic operations
20 #include <linux/slab.h>
21 #include "slab.h"
59 * 4. slab_lock(slab) (Only on some arches)
65 * and to synchronize major metadata changes to slab cache structures.
76 * A. slab->freelist -> List of free objects in a slab
77 * B. slab->inuse -> Number of objects in use
78 * C. slab
429 struct slab *slab; /* The slab from which we are allocating */ global() member
734 slab_test_pfmemalloc(const struct slab * slab) slab_test_pfmemalloc() argument
739 slab_set_pfmemalloc(struct slab * slab) slab_set_pfmemalloc() argument
744 __slab_clear_pfmemalloc(struct slab * slab) __slab_clear_pfmemalloc() argument
752 slab_lock(struct slab * slab) slab_lock() argument
757 slab_unlock(struct slab * slab) slab_unlock() argument
763 __update_freelist_fast(struct slab * slab,struct freelist_counters * old,struct freelist_counters * new) __update_freelist_fast() argument
776 __update_freelist_slow(struct slab * slab,struct freelist_counters * old,struct freelist_counters * new) __update_freelist_slow() argument
800 __slab_update_freelist(struct kmem_cache * s,struct slab * slab,struct freelist_counters * old,struct freelist_counters * new,const char * n) __slab_update_freelist() argument
826 slab_update_freelist(struct kmem_cache * s,struct slab * slab,struct freelist_counters * old,struct freelist_counters * new,const char * n) slab_update_freelist() argument
895 validate_slab_ptr(struct slab * slab) validate_slab_ptr() argument
904 __fill_map(unsigned long * obj_map,struct kmem_cache * s,struct slab * slab) __fill_map() argument
1002 check_valid_pointer(struct kmem_cache * s,struct slab * slab,void * object) check_valid_pointer() argument
1120 print_slab_info(const struct slab * slab) print_slab_info() argument
1171 print_trailer(struct kmem_cache * s,struct slab * slab,u8 * p) print_trailer() argument
1211 object_err(struct kmem_cache * s,struct slab * slab,u8 * object,const char * reason) object_err() argument
1229 freelist_corrupted(struct kmem_cache * s,struct slab * slab,void ** freelist,void * nextfree) freelist_corrupted() argument
1243 __slab_err(struct slab * slab) __slab_err() argument
1254 slab_err(struct kmem_cache * s,struct slab * slab,const char * fmt,...) slab_err() argument
1317 check_bytes_and_report(struct kmem_cache * s,struct slab * slab,u8 * object,const char * what,u8 * start,unsigned int value,unsigned int bytes,bool slab_obj_print) check_bytes_and_report() argument
1388 check_pad_bytes(struct kmem_cache * s,struct slab * slab,u8 * p) check_pad_bytes() argument
1411 slab_pad_check(struct kmem_cache * s,struct slab * slab) slab_pad_check() argument
1447 check_object(struct kmem_cache * s,struct slab * slab,void * object,u8 val) check_object() argument
1532 check_slab(struct kmem_cache * s,struct slab * slab) check_slab() argument
1561 on_freelist(struct kmem_cache * s,struct slab * slab,void * search) on_freelist() argument
1618 trace(struct kmem_cache * s,struct slab * slab,void * object,int alloc) trace() argument
1640 add_full(struct kmem_cache * s,struct kmem_cache_node * n,struct slab * slab) add_full() argument
1649 remove_full(struct kmem_cache * s,struct kmem_cache_node * n,struct slab * slab) remove_full() argument
1689 setup_slab_debug(struct kmem_cache * s,struct slab * slab,void * addr) setup_slab_debug() argument
1700 alloc_consistency_checks(struct kmem_cache * s,struct slab * slab,void * object) alloc_consistency_checks() argument
1717 alloc_debug_processing(struct kmem_cache * s,struct slab * slab,void * object,int orig_size) alloc_debug_processing() argument
1744 free_consistency_checks(struct kmem_cache * s,struct slab * slab,void * object,unsigned long addr) free_consistency_checks() argument
1991 setup_slab_debug(struct kmem_cache * s,struct slab * slab,void * addr) setup_slab_debug() argument
1994 alloc_debug_processing(struct kmem_cache * s,struct slab * slab,void * object,int orig_size) alloc_debug_processing() argument
1997 free_debug_processing(struct kmem_cache * s,struct slab * slab,void * head,void * tail,int * bulk_cnt,unsigned long addr,depot_stack_handle_t handle) free_debug_processing() argument
2000 slab_pad_check(struct kmem_cache * s,struct slab * slab) slab_pad_check() argument
2001 check_object(struct kmem_cache * s,struct slab * slab,void * object,u8 val) check_object() argument
2007 add_full(struct kmem_cache * s,struct kmem_cache_node * n,struct slab * slab) add_full() argument
2009 remove_full(struct kmem_cache * s,struct kmem_cache_node * n,struct slab * slab) remove_full() argument
2024 freelist_corrupted(struct kmem_cache * s,struct slab * slab,void ** freelist,void * nextfree) freelist_corrupted() argument
2063 mark_failed_objexts_alloc(struct slab * slab) mark_failed_objexts_alloc() argument
2087 mark_failed_objexts_alloc(struct slab * slab) mark_failed_objexts_alloc() argument
2093 init_slab_obj_exts(struct slab * slab) init_slab_obj_exts() argument
2098 alloc_slab_obj_exts(struct slab * slab,struct kmem_cache * s,gfp_t gfp,bool new_slab) alloc_slab_obj_exts() argument
2177 free_slab_obj_exts(struct slab * slab) free_slab_obj_exts() argument
2209 init_slab_obj_exts(struct slab * slab) init_slab_obj_exts() argument
2213 alloc_slab_obj_exts(struct slab * slab,struct kmem_cache * s,gfp_t gfp,bool new_slab) alloc_slab_obj_exts() argument
2219 free_slab_obj_exts(struct slab * slab) free_slab_obj_exts() argument
2230 struct slab *slab; prepare_slab_obj_exts_hook() local
2279 __alloc_tagging_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects) __alloc_tagging_slab_free_hook() argument
2301 alloc_tagging_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects) alloc_tagging_slab_free_hook() argument
2316 alloc_tagging_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects) alloc_tagging_slab_free_hook() argument
2352 memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects) memcg_slab_free_hook() argument
2373 struct slab *slab; memcg_slab_post_charge() local
2430 memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects) memcg_slab_free_hook() argument
2758 struct slab *slab = virt_to_slab(p[i]); __rcu_free_sheaf_prepare() local
3068 struct slab *slab; alloc_slab_page() local
3154 shuffle_freelist(struct kmem_cache * s,struct slab * slab) shuffle_freelist() argument
3192 shuffle_freelist(struct kmem_cache * s,struct slab * slab) shuffle_freelist() argument
3198 account_slab(struct slab * slab,int order,struct kmem_cache * s,gfp_t gfp) account_slab() argument
3208 unaccount_slab(struct slab * slab,int order,struct kmem_cache * s) unaccount_slab() argument
3225 struct slab *slab; allocate_slab() local
3306 __free_slab(struct kmem_cache * s,struct slab * slab) __free_slab() argument
3322 struct slab *slab = container_of(h, struct slab, rcu_head); rcu_free_slab() local
3327 free_slab(struct kmem_cache * s,struct slab * slab) free_slab() argument
3343 discard_slab(struct kmem_cache * s,struct slab * slab) discard_slab() argument
3349 slab_test_node_partial(const struct slab * slab) slab_test_node_partial() argument
3354 slab_set_node_partial(struct slab * slab) slab_set_node_partial() argument
3359 slab_clear_node_partial(struct slab * slab) slab_clear_node_partial() argument
3368 __add_partial(struct kmem_cache_node * n,struct slab * slab,int tail) __add_partial() argument
3379 add_partial(struct kmem_cache_node * n,struct slab * slab,int tail) add_partial() argument
3386 remove_partial(struct kmem_cache_node * n,struct slab * slab) remove_partial() argument
3401 alloc_single_from_partial(struct kmem_cache * s,struct kmem_cache_node * n,struct slab * slab,int orig_size) alloc_single_from_partial() argument
3440 alloc_single_from_new_slab(struct kmem_cache * s,struct slab * slab,int orig_size,gfp_t gfpflags) alloc_single_from_new_slab() argument
3488 put_cpu_partial(struct kmem_cache * s,struct slab * slab,int drain) put_cpu_partial() argument
3500 struct slab *slab, *slab2, *partial = NULL; get_partial_node() local
3565 struct slab *slab; get_any_partial() local
3624 struct slab *slab; get_partial() local
3732 deactivate_slab(struct kmem_cache * s,struct slab * slab,void * freelist) deactivate_slab() argument
3853 struct slab *slab, *slab_to_discard = NULL; __put_partials() local
3926 put_cpu_partial(struct kmem_cache * s,struct slab * slab,int drain) put_cpu_partial() argument
3977 struct slab *slab; flush_slab() local
4001 struct slab *slab = c->slab; __flush_cpu_slab() local
4196 node_match(struct slab * slab,int node) node_match() argument
4206 count_free(struct slab * slab) count_free() argument
4218 free_debug_processing(struct kmem_cache * s,struct slab * slab,void * head,void * tail,int * bulk_cnt,unsigned long addr,depot_stack_handle_t handle) free_debug_processing() argument
4277 count_partial(struct kmem_cache_node * n,int (* get_count)(struct slab *)) count_partial() argument
4281 struct slab *slab; count_partial() local
4298 struct slab *slab; count_partial_free_approx() local
4369 pfmemalloc_match(struct slab * slab,gfp_t gfpflags) pfmemalloc_match() argument
4397 get_freelist(struct kmem_cache * s,struct slab * slab) get_freelist() argument
4422 freeze_slab(struct kmem_cache * s,struct slab * slab) freeze_slab() argument
4466 struct slab *slab; ___slab_alloc() local
4791 struct slab *slab; __slab_alloc_node() local
5794 free_to_partial_list(struct kmem_cache * s,struct slab * slab,void * head,void * tail,int bulk_cnt,unsigned long addr) free_to_partial_list() argument
5866 __slab_free(struct kmem_cache * s,struct slab * slab,void * head,void * tail,int cnt,unsigned long addr) __slab_free() argument
6370 struct slab *slab = virt_to_slab(p[i]); free_to_pcs_bulk() local
6506 struct slab *slab; free_deferred_objects() local
6527 struct slab *slab = container_of(pos, struct slab, llnode); free_deferred_objects() local
6547 defer_deactivate_slab(struct slab * slab,void * flush_freelist) defer_deactivate_slab() argument
6584 do_slab_free(struct kmem_cache * s,struct slab * slab,void * head,void * tail,int cnt,unsigned long addr) do_slab_free() argument
6662 slab_free(struct kmem_cache * s,struct slab * slab,void * object,unsigned long addr) slab_free() argument
6692 slab_free_bulk(struct kmem_cache * s,struct slab * slab,void * head,void * tail,void ** p,int cnt,unsigned long addr) slab_free_bulk() argument
6711 struct slab *slab = virt_to_slab(object); slab_free_after_rcu_debug() local
6741 struct slab *slab; virt_to_cache() local
6813 struct slab *slab; kvfree_rcu_cb() local
6859 struct slab *slab; kfree() local
6891 struct slab *slab; kfree_nolock() local
6968 struct slab *slab = page_slab(page); __do_krealloc() local
7262 struct slab *slab; global() member
7288 struct slab *slab; build_detached_freelist() local
7730 struct slab *slab; early_kmem_cache_node_alloc() local
8003 list_slab_objects(struct kmem_cache * s,struct slab * slab) list_slab_objects() argument
8038 struct slab *slab, *h; free_partial() local
8093 __kmem_obj_info(struct kmem_obj_info * kpp,void * object,struct slab * slab) __kmem_obj_info() argument
8231 __check_heap_object(const void * ptr,unsigned long n,const struct slab * slab,bool to_user) __check_heap_object() argument
8287 struct slab *slab; __kmem_cache_do_shrink() local
8679 count_inuse(struct slab * slab) count_inuse() argument
8684 count_total(struct slab * slab) count_total() argument
8691 validate_slab(struct kmem_cache * s,struct slab * slab,unsigned long * obj_map) validate_slab() argument
8720 struct slab *slab; validate_slab_node() local
8923 process_slab(struct loc_track * t,struct kmem_cache * s,struct slab * slab,enum track_item alloc,unsigned long * obj_map) process_slab() argument
8976 struct slab *slab; show_slab_objects() local
9208 struct slab *slab; slabs_cpu_partial_show() local
9223 struct slab *slab; slabs_cpu_partial_show() local
10008 struct slab *slab; slab_debug_trace_open() local
[all...]
H A Dslab.h68 struct slab { struct
82 struct slab *next; argument
101 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) argument
111 static_assert(sizeof(struct slab) <= sizeof(struct page));
113 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(struct freelist_counters)));
128 const struct slab *: (const struct folio *)s, \
129 struct slab *: (struct folio *)s))
138 static inline struct slab *page_slab(const struct page *page) in page_slab()
148 return (struct slab *)page; in page_slab()
161 static inline void *slab_address(const struct slab *slab) in slab_address() argument
[all …]
H A Dusercopy.c168 struct slab *slab; in check_heap_object() local
194 slab = page_slab(page); in check_heap_object()
195 if (slab) { in check_heap_object()
197 __check_heap_object(ptr, n, slab, to_user); in check_heap_object()
H A Dslab_common.c576 static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in kmem_obj_info() argument
578 if (__kfence_obj_info(kpp, object, slab)) in kmem_obj_info()
580 __kmem_obj_info(kpp, object, slab); in kmem_obj_info()
602 struct slab *slab; in kmem_dump_obj() local
609 slab = virt_to_slab(object); in kmem_dump_obj()
610 if (!slab) in kmem_dump_obj()
613 kmem_obj_info(&kp, object, slab); in kmem_dump_obj()
1001 const struct slab *slab; in __ksize() local
1011 slab = page_slab(page); in __ksize()
1013 if (WARN_ON(!slab)) in __ksize()
[all …]
/linux/Documentation/ABI/testing/
H A Dsysfs-kernel-slab1 What: /sys/kernel/slab
7 The /sys/kernel/slab directory contains a snapshot of the
13 What: /sys/kernel/slab/<cache>/aliases
22 What: /sys/kernel/slab/<cache>/align
31 What: /sys/kernel/slab/<cache>/alloc_calls
41 Documentation/admin-guide/mm/slab.rst).
43 What: /sys/kernel/slab/<cache>/alloc_fastpath
54 What: /sys/kernel/slab/<cache>/alloc_from_partial
60 The alloc_from_partial file shows how many times a cpu slab has
61 been full and it has been refilled by using a slab from the list
[all …]
/linux/tools/mm/
H A Dslabinfo.c54 struct slabinfo *slab; member
360 if (a->slab == find && in find_one_alias()
1146 a->slab = s; in link_slabs()
1165 if (!show_single_ref && a->slab->refs == 1) in alias()
1170 if (strcmp(a->slab->name, active) == 0) { in alias()
1175 printf("\n%-12s <- %s", a->slab->name, a->name); in alias()
1176 active = a->slab->name; in alias()
1179 printf("%-15s -> %s\n", a->name, a->slab->name); in alias()
1209 static int slab_mismatch(char *slab) in slab_mismatch() argument
1211 return regexec(&pattern, slab, 0, NULL, 0); in slab_mismatch()
[all …]
/linux/mm/kasan/
H A Dcommon.c45 struct slab *kasan_addr_to_slab(const void *addr) in kasan_addr_to_slab()
155 void __kasan_poison_slab(struct slab *slab) in __kasan_poison_slab() argument
157 struct page *page = slab_page(slab); in __kasan_poison_slab()
461 struct slab *slab; in __kasan_krealloc() local
479 slab = virt_to_slab(object); in __kasan_krealloc()
482 if (unlikely(!slab)) in __kasan_krealloc()
485 poison_kmalloc_redzone(slab->slab_cache, object, size, flags); in __kasan_krealloc()
522 struct slab *slab; in __kasan_mempool_poison_object() local
534 slab = page_slab(page); in __kasan_mempool_poison_object()
536 if (check_slab_allocation(slab->slab_cache, ptr, ip)) in __kasan_mempool_poison_object()
[all …]
H A Dreport.c492 struct slab *slab; in complete_report_info() local
500 slab = kasan_addr_to_slab(addr); in complete_report_info()
501 if (slab) { in complete_report_info()
502 info->cache = slab->slab_cache; in complete_report_info()
503 info->object = nearest_obj(info->cache, slab, addr); in complete_report_info()
H A Dgeneric.c541 struct slab *slab = kasan_addr_to_slab(addr); in kasan_record_aux_stack() local
546 if (is_kfence_address(addr) || !slab) in kasan_record_aux_stack()
549 cache = slab->slab_cache; in kasan_record_aux_stack()
550 object = nearest_obj(cache, slab, addr); in kasan_record_aux_stack()
/linux/scripts/gdb/linux/
H A Dslab.py38 def slab_folio(slab): argument
39 return slab.cast(gdb.lookup_type("struct folio").pointer())
41 def slab_address(slab): argument
43 folio = slab_folio(slab)
155 def __fill_map(obj_map, cache, slab): argument
156 p = slab['freelist']
157 addr = slab_address(slab)
165 for slab in lists.list_for_each_entry(slab_list, slab_ptr_type, "slab_list"):
167 __fill_map(obj_map, cache, slab)
168 addr = slab_address(slab)
[all …]
/linux/tools/cgroup/
H A Dmemcg_slabinfo.py73 for slab in list_for_each_entry('struct slab', n.partial.address_of_(),
75 nr_objs += fn(slab)
79 def count_free(slab): argument
80 return slab.objects - slab.inuse
194 for slab in for_each_slab(prog):
195 objcg_vec_raw = slab.memcg_data.value_()
198 cache = slab.slab_cache
/linux/Documentation/admin-guide/mm/
H A Dslab.rst2 Short users guide for the slab allocator
5 The slab allocator includes full debugging support (when built with
37 slab_debug=<Debug-Options>,<slab name1>,<slab name2>,...
44 of the first "select slabs" blocks that matches the slab's name are applied.
56 caused higher minimum slab orders
69 end of the slab name, in order to cover all slabs with the same prefix. For
75 Red zoning and tracking may realign the slab. We can just apply sanity checks
80 Debugging options may require the minimum possible slab order to increase as
82 sizes). This has a higher likelihood of resulting in slab allocation errors
88 You can apply different options to different list of slab names, using blocks
[all …]
/linux/lib/
H A Dsg_pool.c13 struct kmem_cache *slab; member
150 sgp->slab = kmem_cache_create(sgp->name, size, 0, in sg_pool_init()
152 if (!sgp->slab) { in sg_pool_init()
159 sgp->slab); in sg_pool_init()
174 kmem_cache_destroy(sgp->slab); in sg_pool_init()
/linux/include/linux/
H A Dkfence.h221 bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
245 static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in __kfence_obj_info() argument
H A Dkasan.h14 struct slab;
130 void __kasan_poison_slab(struct slab *slab);
131 static __always_inline void kasan_poison_slab(struct slab *slab) in kasan_poison_slab() argument
134 __kasan_poison_slab(slab); in kasan_poison_slab()
416 static inline void kasan_poison_slab(struct slab *slab) {} in kasan_poison_slab() argument
/linux/net/dccp/
H A Dccid.c
/linux/tools/perf/Documentation/
H A Dperf-kmem.txt47 Sort the output (default: 'frag,hit,bytes' for slab and 'bytes,hit'
49 pingpong, frag' for slab and 'page, callsite, bytes, hit, order,
51 mode selection options - i.e. --slab, --page, --alloc and/or --caller.
60 --slab::
/linux/mm/kfence/
H A Dcore.c421 struct slab *slab; in kfence_guarded_alloc() local
489 slab = virt_to_slab((void *)meta->addr); in kfence_guarded_alloc()
490 slab->slab_cache = cache; in kfence_guarded_alloc()
491 slab->objects = 1; in kfence_guarded_alloc()
624 struct slab *slab = page_slab(page); in kfence_init_pool() local
625 slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts | in kfence_init_pool()
677 struct slab *slab = page_slab(page); in kfence_init_pool() local
678 slab->obj_exts = 0; in kfence_init_pool()
/linux/tools/testing/scatterlist/
H A DMakefile17 … $(OFILES) scatterlist.c linux/scatterlist.h linux/highmem.h linux/kmemleak.h linux/slab.h asm/io.h
31 @touch linux/slab.h
/linux/Documentation/translations/zh_CN/mm/
H A Dsplit_page_table_lock.rst62 确保架构不使用slab分配器来分配页表:slab使用page->slab_cache来分配其页
/linux/Documentation/translations/zh_CN/core-api/
H A Dmemory-allocation.rst131 如果你需要分配许多相同的对象,你可以使用slab缓存分配器。在使用缓存之前,应该用
137 和 `kvmalloc` 分配的内存。slab缓存应该用kmem_cache_free()来释放。不要忘记用
H A Dmm-api.rst49 include/linux/slab.h
51 mm/slab.c
/linux/tools/testing/memblock/
H A DMakefile11 DEP_OFILES = memblock.o lib/slab.o mmzone.o slab.o cmdline.o

123