| /linux/drivers/md/dm-vdo/ |
| H A D | slab-depot.c | 54 static bool is_slab_open(struct vdo_slab *slab) in is_slab_open() argument 56 return (!vdo_is_state_quiescing(&slab->state) && in is_slab_open() 57 !vdo_is_state_quiescent(&slab->state)); in is_slab_open() 68 return ((journal->slab->status != VDO_SLAB_REBUILDING) && in must_make_entries_to_flush() 136 static bool is_slab_journal_blank(const struct vdo_slab *slab) in is_slab_journal_blank() argument 138 return ((slab->journal.tail == 1) && in is_slab_journal_blank() 139 (slab->journal.tail_header.entry_count == 0)); in is_slab_journal_blank() 151 struct list_head *dirty_list = &journal->slab->allocator->dirty_slab_journals; in mark_slab_journal_dirty() 170 static void check_if_slab_drained(struct vdo_slab *slab) in check_if_slab_drained() argument 173 struct slab_journal *journal = &slab->journal; in check_if_slab_drained() [all …]
|
| H A D | slab-depot.h | 84 struct vdo_slab *slab; member 167 struct vdo_slab *slab; member 300 struct vdo_slab *slab; member 517 bool __must_check vdo_attempt_replay_into_slab(struct vdo_slab *slab, 534 int __must_check vdo_acquire_provisional_reference(struct vdo_slab *slab,
|
| /linux/mm/ |
| H A D | slub.c | 604 static inline bool slab_test_pfmemalloc(const struct slab *slab) in slab_test_pfmemalloc() argument 606 return test_bit(SL_pfmemalloc, &slab->flags.f); in slab_test_pfmemalloc() 609 static inline void slab_set_pfmemalloc(struct slab *slab) in slab_set_pfmemalloc() argument 611 set_bit(SL_pfmemalloc, &slab->flags.f); in slab_set_pfmemalloc() 614 static inline void __slab_clear_pfmemalloc(struct slab *slab) in __slab_clear_pfmemalloc() argument 616 __clear_bit(SL_pfmemalloc, &slab->flags.f); in __slab_clear_pfmemalloc() 622 static __always_inline void slab_lock(struct slab *slab) in slab_lock() argument 624 bit_spin_lock(SL_locked, &slab->flags.f); in slab_lock() 627 static __always_inline void slab_unlock(struct slab *slab) in slab_unlock() argument 629 bit_spin_unlock(SL_locked, &slab->flags.f); in slab_unlock() [all …]
|
| H A D | slab.h | 16 * Internal slab definitions 51 * If slab debugging is enabled then the 53 * that the slab was corrupted 74 struct slab { 95 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) 105 static_assert(sizeof(struct slab) <= sizeof(struct page)); 107 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(struct freelist_counters))); 111 * slab_folio - The folio allocated for a slab 112 * @s: The slab. 116 * now accessed by struct slab 68 struct slab { global() struct 71 slab_cacheslab global() argument 82 __anon3d5164b7050aslab global() argument 101 obj_extsslab global() argument 161 slab_address(const struct slab * slab) slab_address() argument 166 slab_nid(const struct slab * slab) slab_nid() argument 171 slab_pgdat(const struct slab * slab) slab_pgdat() argument 181 slab_order(const struct slab * slab) slab_order() argument 186 slab_size(const struct slab * slab) slab_size() argument 292 nearest_obj(struct kmem_cache * cache,const struct slab * slab,void * x) nearest_obj() argument 312 obj_to_index(const struct kmem_cache * cache,const struct slab * slab,void * obj) obj_to_index() argument 320 objs_per_slab(const struct kmem_cache * cache,const struct slab * slab) objs_per_slab() argument 515 slab_obj_exts(struct slab * slab) slab_obj_exts() argument 536 slab_obj_exts(struct slab * slab) slab_obj_exts() argument [all...] |
| H A D | slab_common.c | 7 #include <linux/slab.h> 35 #include "slab.h" 46 * Set of flags that will prevent slab merging. 48 * since slab merging can update s->inuse that affects the metadata layout. 58 * Merge control. If this is set then no merging of slab caches will occur. 81 * Determine the size of a slab object in kmem_cache_size() 153 * Find a mergeable slab cache in slab_unmergeable() 169 * We may have set a slab to be unmergeable during bootstrap. in slab_unmergeable() 391 panic("%s: Failed to create slab '%s'. Error %d\n", in kmem_buckets_create() 541 * SLAB_TYPESAFE_BY_RCU slab ar in kmem_cache_destroy() 576 kmem_obj_info(struct kmem_obj_info * kpp,void * object,struct slab * slab) kmem_obj_info() argument 602 struct slab *slab; kmem_dump_obj() local 1001 const struct slab *slab; __ksize() local 1269 struct slab *slab; bpf_get_kmem_cache() local 1618 struct slab *slab; kfree_rcu_sheaf() local [all...] |
| H A D | usercopy.c | 168 struct slab *slab; in check_heap_object() local 194 slab = page_slab(page); in check_heap_object() 195 if (slab) { in check_heap_object() 197 __check_heap_object(ptr, n, slab, to_user); in check_heap_object()
|
| /linux/Documentation/ABI/testing/ |
| H A D | sysfs-kernel-slab | 1 What: /sys/kernel/slab 7 The /sys/kernel/slab directory contains a snapshot of the 13 What: /sys/kernel/slab/<cache>/aliases 22 What: /sys/kernel/slab/<cache>/align 31 What: /sys/kernel/slab/<cache>/alloc_calls 41 Documentation/admin-guide/mm/slab.rst). 43 What: /sys/kernel/slab/<cache>/alloc_fastpath 54 What: /sys/kernel/slab/<cache>/alloc_from_partial 60 The alloc_from_partial file shows how many times a cpu slab has 61 been full and it has been refilled by using a slab from the list [all …]
|
| /linux/tools/mm/ |
| H A D | slabinfo.c | 54 struct slabinfo *slab; member 114 "slabinfo [-aABDefhilLnoPrsStTUvXz1] [N=K] [-dafzput] [slab-regexp]\n" in usage() 247 snprintf(x, 128, "/sys/kernel/debug/slab/%s/%s", s->name, name); in read_debug_slab_obj() 352 * Find the shortest alias of a slab 360 if (a->slab == find && in find_one_alias() 798 fprintf(stderr, "%s can only enable trace for one slab at a time\n", s->name); in slab_debug() 819 /* Number of slabs in a slab cache */ in totals() 823 /* Size of the whole slab */ in totals() 827 /* Bytes used for object storage in a slab */ in totals() 834 /* Number of objects in a slab */ in totals() 1209 slab_mismatch(char * slab) slab_mismatch() argument 1218 struct slabinfo *slab = slabinfo; read_slab_dir() local 1326 struct slabinfo *slab; output_slabs() local [all...] |
| /linux/mm/kasan/ |
| H A D | common.c | 45 struct slab *kasan_addr_to_slab(const void *addr) in kasan_addr_to_slab() 155 void __kasan_poison_slab(struct slab *slab) in __kasan_poison_slab() argument 157 struct page *page = slab_page(slab); in __kasan_poison_slab() 461 struct slab *slab; in __kasan_krealloc() local 479 slab = virt_to_slab(object); in __kasan_krealloc() 482 if (unlikely(!slab)) in __kasan_krealloc() 485 poison_kmalloc_redzone(slab->slab_cache, object, size, flags); in __kasan_krealloc() 522 struct slab *slab; in __kasan_mempool_poison_object() local 534 slab = page_slab(page); in __kasan_mempool_poison_object() 536 if (check_slab_allocation(slab->slab_cache, ptr, ip)) in __kasan_mempool_poison_object() [all …]
|
| H A D | report.c | 22 #include <linux/slab.h> 37 #include "../slab.h" 84 * 1. False-positive reports when accessing slab metadata, 323 if (strcmp(info->bug_type, "slab-out-of-bounds") == 0) in describe_object_addr() 325 else if (strcmp(info->bug_type, "slab-use-after-free") == 0) in describe_object_addr() 492 struct slab *slab; in complete_report_info() local 500 slab = kasan_addr_to_slab(addr); in complete_report_info() 501 if (slab) { in complete_report_info() 502 info->cache = slab in complete_report_info() [all...] |
| H A D | generic.c | 541 struct slab *slab = kasan_addr_to_slab(addr); in kasan_record_aux_stack() local 546 if (is_kfence_address(addr) || !slab) in kasan_record_aux_stack() 549 cache = slab->slab_cache; in kasan_record_aux_stack() 550 object = nearest_obj(cache, slab, addr); in kasan_record_aux_stack()
|
| /linux/scripts/gdb/linux/ |
| H A D | slab.py | 38 def slab_folio(slab): argument 39 return slab.cast(gdb.lookup_type("struct folio").pointer()) 41 def slab_address(slab): argument 43 folio = slab_folio(slab) 155 def __fill_map(obj_map, cache, slab): argument 156 p = slab['freelist'] 157 addr = slab_address(slab) 165 for slab in lists.list_for_each_entry(slab_list, slab_ptr_type, "slab_list"): 167 __fill_map(obj_map, cache, slab) 168 addr = slab_address(slab) [all …]
|
| /linux/tools/cgroup/ |
| H A D | memcg_slabinfo.py | 73 for slab in list_for_each_entry('struct slab', n.partial.address_of_(), 75 nr_objs += fn(slab) 79 def count_free(slab): argument 80 return slab.objects - slab.inuse 194 for slab in for_each_slab(prog): 195 objcg_vec_raw = slab.memcg_data.value_() 198 cache = slab.slab_cache
|
| /linux/Documentation/admin-guide/mm/ |
| H A D | slab.rst | 2 Short users guide for the slab allocator 5 The slab allocator includes full debugging support (when built with 37 slab_debug=<Debug-Options>,<slab name1>,<slab name2>,... 44 of the first "select slabs" blocks that matches the slab's name are applied. 56 caused higher minimum slab orders 69 end of the slab name, in order to cover all slabs with the same prefix. For 75 Red zoning and tracking may realign the slab. We can just apply sanity checks 80 Debugging options may require the minimum possible slab order to increase as 82 sizes). This has a higher likelihood of resulting in slab allocation errors 88 You can apply different options to different list of slab names, using blocks [all …]
|
| /linux/lib/ |
| H A D | sg_pool.c | 13 struct kmem_cache *slab; member 150 sgp->slab = kmem_cache_create(sgp->name, size, 0, in sg_pool_init() 152 if (!sgp->slab) { in sg_pool_init() 159 sgp->slab); in sg_pool_init() 174 kmem_cache_destroy(sgp->slab); in sg_pool_init()
|
| /linux/include/linux/ |
| H A D | kfence.h | 222 bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); 246 static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in __kfence_obj_info() argument
|
| H A D | kasan.h | 14 struct slab; 130 void __kasan_poison_slab(struct slab *slab); 131 static __always_inline void kasan_poison_slab(struct slab *slab) in kasan_poison_slab() argument 134 __kasan_poison_slab(slab); in kasan_poison_slab() 416 static inline void kasan_poison_slab(struct slab *slab) {} in kasan_poison_slab() argument
|
| /linux/net/dccp/ |
| H A D | ccid.c | |
| /linux/tools/perf/Documentation/ |
| H A D | perf-kmem.txt | 47 Sort the output (default: 'frag,hit,bytes' for slab and 'bytes,hit' 49 pingpong, frag' for slab and 'page, callsite, bytes, hit, order, 51 mode selection options - i.e. --slab, --page, --alloc and/or --caller. 60 --slab::
|
| /linux/mm/kfence/ |
| H A D | core.c | 424 struct slab *slab; in kfence_guarded_alloc() local 492 slab = virt_to_slab(addr); in kfence_guarded_alloc() 493 slab->slab_cache = cache; in kfence_guarded_alloc() 494 slab->objects = 1; in kfence_guarded_alloc() 630 struct slab *slab = page_slab(page); in kfence_init_pool() local 631 slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts | in kfence_init_pool() 698 struct slab *slab = page_slab(page); in kfence_init_pool() local 699 slab->obj_exts = 0; in kfence_init_pool()
|
| /linux/tools/testing/scatterlist/ |
| H A D | Makefile | 17 … $(OFILES) scatterlist.c linux/scatterlist.h linux/highmem.h linux/kmemleak.h linux/slab.h asm/io.h 31 @touch linux/slab.h
|
| /linux/Documentation/translations/zh_CN/mm/ |
| H A D | split_page_table_lock.rst | 62 确保架构不使用slab分配器来分配页表:slab使用page->slab_cache来分配其页
|
| /linux/Documentation/translations/zh_CN/core-api/ |
| H A D | memory-allocation.rst | 131 如果你需要分配许多相同的对象,你可以使用slab缓存分配器。在使用缓存之前,应该用 137 和 `kvmalloc` 分配的内存。slab缓存应该用kmem_cache_free()来释放。不要忘记用
|
| H A D | mm-api.rst | 49 include/linux/slab.h 51 mm/slab.c
|
| /linux/tools/testing/memblock/ |
| H A D | Makefile | 11 DEP_OFILES = memblock.o lib/slab.o mmzone.o slab.o cmdline.o
|