/linux/mm/ |
H A D | slub.c | 390 struct slab *slab; /* The slab from which we are allocating */ member 392 struct slab *partial; /* Partially allocated slabs */ 639 static __always_inline void slab_lock(struct slab *slab) in slab_lock() argument 641 bit_spin_lock(PG_locked, &slab->__page_flags); in slab_lock() 644 static __always_inline void slab_unlock(struct slab *slab) in slab_unlock() argument 646 bit_spin_unlock(PG_locked, &slab->__page_flags); in slab_unlock() 650 __update_freelist_fast(struct slab *slab, in __update_freelist_fast() argument 658 return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full); in __update_freelist_fast() 665 __update_freelist_slow(struct slab *slab, in __update_freelist_slow() argument 671 slab_lock(slab); in __update_freelist_slow() [all …]
|
H A D | slab_common.c | 577 static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in kmem_obj_info() argument 579 if (__kfence_obj_info(kpp, object, slab)) in kmem_obj_info() 581 __kmem_obj_info(kpp, object, slab); in kmem_obj_info() 603 struct slab *slab; in kmem_dump_obj() local 610 slab = virt_to_slab(object); in kmem_dump_obj() 611 if (!slab) in kmem_dump_obj() 614 kmem_obj_info(&kp, object, slab); in kmem_dump_obj() 1269 struct slab *slab; in bpf_get_kmem_cache() local 1274 slab = virt_to_slab((void *)(long)addr); in bpf_get_kmem_cache() 1275 return slab ? slab->slab_cache : NULL; in bpf_get_kmem_cache()
|
/linux/Documentation/ABI/testing/ |
H A D | sysfs-kernel-slab | 1 What: /sys/kernel/slab 7 The /sys/kernel/slab directory contains a snapshot of the 13 What: /sys/kernel/slab/<cache>/aliases 22 What: /sys/kernel/slab/<cache>/align 31 What: /sys/kernel/slab/<cache>/alloc_calls 42 What: /sys/kernel/slab/<cache>/alloc_fastpath 53 What: /sys/kernel/slab/<cache>/alloc_from_partial 59 The alloc_from_partial file shows how many times a cpu slab has 60 been full and it has been refilled by using a slab from the list 65 What: /sys/kernel/slab/<cache>/alloc_refill [all …]
|
/linux/mm/kasan/ |
H A D | common.c | 35 struct slab *kasan_addr_to_slab(const void *addr) in kasan_addr_to_slab() 145 void __kasan_poison_slab(struct slab *slab) in __kasan_poison_slab() argument 147 struct page *page = slab_page(slab); in __kasan_poison_slab() 440 struct slab *slab; in __kasan_krealloc() local 458 slab = virt_to_slab(object); in __kasan_krealloc() 461 if (unlikely(!slab)) in __kasan_krealloc() 464 poison_kmalloc_redzone(slab->slab_cache, object, size, flags); in __kasan_krealloc() 501 struct slab *slab; in __kasan_mempool_poison_object() local 517 slab = folio_slab(folio); in __kasan_mempool_poison_object() 519 if (check_slab_allocation(slab->slab_cache, ptr, ip)) in __kasan_mempool_poison_object() [all …]
|
H A D | report.c | 531 struct slab *slab; in complete_report_info() local 539 slab = kasan_addr_to_slab(addr); in complete_report_info() 540 if (slab) { in complete_report_info() 541 info->cache = slab->slab_cache; in complete_report_info() 542 info->object = nearest_obj(info->cache, slab, addr); in complete_report_info()
|
/linux/scripts/gdb/linux/ |
H A D | slab.py | 38 def slab_folio(slab): argument 39 return slab.cast(gdb.lookup_type("struct folio").pointer()) 41 def slab_address(slab): argument 43 folio = slab_folio(slab) 155 def __fill_map(obj_map, cache, slab): argument 156 p = slab['freelist'] 157 addr = slab_address(slab) 165 for slab in lists.list_for_each_entry(slab_list, slab_ptr_type, "slab_list"): 167 __fill_map(obj_map, cache, slab) 168 addr = slab_address(slab) [all …]
|
H A D | constants.py.in | 23 #include <linux/slab.h> 108 /* linux/slab.h */
|
/linux/tools/cgroup/ |
H A D | memcg_slabinfo.py | 18 This is a drgn script to provide slab statistics for memory cgroups. 73 for slab in list_for_each_entry('struct slab', n.partial.address_of_(), 75 nr_objs += fn(slab) 79 def count_free(slab): argument 80 return slab.objects - slab.inuse 136 err('Can\'t determine the slab allocator') 154 yield cast('struct slab *', page) 192 # look over all slab folio [all...] |
/linux/Documentation/mm/ |
H A D | slub.rst | 7 slab caches. SLUB always includes full debugging but it is off by default. 38 slab_debug=<Debug-Options>,<slab name1>,<slab name2>,... 45 of the first "select slabs" blocks that matches the slab's name are applied. 57 caused higher minimum slab orders 70 end of the slab name, in order to cover all slabs with the same prefix. For 76 Red zoning and tracking may realign the slab. We can just apply sanity checks 81 Debugging options may require the minimum possible slab order to increase as 83 sizes). This has a higher likelihood of resulting in slab allocation errors 89 You can apply different options to different list of slab names, using blocks 97 debugged by specifying global debug options followed by a list of slab names [all …]
|
/linux/lib/ |
H A D | sg_pool.c | 13 struct kmem_cache *slab; member 150 sgp->slab = kmem_cache_create(sgp->name, size, 0, in sg_pool_init() 152 if (!sgp->slab) { in sg_pool_init() 159 sgp->slab); in sg_pool_init() 174 kmem_cache_destroy(sgp->slab); in sg_pool_init()
|
/linux/include/linux/ |
H A D | kfence.h | 221 bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); 245 static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in __kfence_obj_info() argument
|
/linux/net/dccp/ |
H A D | ccid.c | 81 struct kmem_cache *slab; in ccid_kmem_cache_create() local 88 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, in ccid_kmem_cache_create() 90 return slab; in ccid_kmem_cache_create() 93 static void ccid_kmem_cache_destroy(struct kmem_cache *slab) in ccid_kmem_cache_destroy() argument 95 kmem_cache_destroy(slab); in ccid_kmem_cache_destroy()
|
/linux/tools/perf/Documentation/ |
H A D | perf-kmem.txt | 47 Sort the output (default: 'frag,hit,bytes' for slab and 'bytes,hit' 49 pingpong, frag' for slab and 'page, callsite, bytes, hit, order, 51 mode selection options - i.e. --slab, --page, --alloc and/or --caller. 60 --slab::
|
/linux/drivers/md/dm-vdo/ |
H A D | slab-depot.h | 77 struct vdo_slab *slab; member 160 struct vdo_slab *slab; member 293 struct vdo_slab *slab; member 506 bool __must_check vdo_attempt_replay_into_slab(struct vdo_slab *slab, 523 int __must_check vdo_acquire_provisional_reference(struct vdo_slab *slab,
|
/linux/tools/testing/scatterlist/ |
H A D | Makefile | 17 … $(OFILES) scatterlist.c linux/scatterlist.h linux/highmem.h linux/kmemleak.h linux/slab.h asm/io.h 31 @touch linux/slab.h
|
/linux/Documentation/translations/zh_CN/mm/ |
H A D | split_page_table_lock.rst | 62 确保架构不使用slab分配器来分配页表:slab使用page->slab_cache来分配其页
|
/linux/drivers/net/ethernet/chelsio/inline_crypto/chtls/ |
H A D | chtls_cm.h | 129 chtls_tcp_ops->slab = tcp_prot->rsk_prot->slab; in chtls_init_rsk_ops() 137 kmem_cache_free(req->rsk_ops->slab, req); in chtls_reqsk_free()
|
/linux/Documentation/admin-guide/device-mapper/ |
H A D | vdo-design.rst | 229 Most of the vdo volume belongs to the slab depot. The depot contains a 231 three sections. Most of a slab consists of a linear sequence of 4K blocks. 233 block map (see below). In addition to the data blocks, each slab has a set 234 of reference counters, using 1 byte for each data block. Finally each slab 237 Reference updates are written to the slab journal. Slab journal blocks are 240 to free up space. The slab journal is used both to ensure that the main 244 when there is a need to reclaim slab journal space. The write operations 248 Each slab is independent of every other. They are assigned to "physical 249 zones" in round-robin fashion. If there are P physical zones, then slab n 252 The slab depot maintains an additional small data structure, the "slab [all …]
|
/linux/Documentation/translations/zh_CN/core-api/ |
H A D | memory-allocation.rst | 131 如果你需要分配许多相同的对象,你可以使用slab缓存分配器。在使用缓存之前,应该用 137 和 `kvmalloc` 分配的内存。slab缓存应该用kmem_cache_free()来释放。不要忘记用
|
H A D | mm-api.rst | 49 include/linux/slab.h 51 mm/slab.c
|
/linux/tools/testing/memblock/ |
H A D | Makefile | 11 DEP_OFILES = memblock.o lib/slab.o mmzone.o slab.o cmdline.o
|
/linux/block/ |
H A D | bio.c | 40 struct kmem_cache *slab; member 77 struct kmem_cache *slab; member 93 bslab->slab = kmem_cache_create(bslab->name, size, in create_bio_slab() 96 if (!bslab->slab) in create_bio_slab() 105 kmem_cache_destroy(bslab->slab); in create_bio_slab() 131 return bslab->slab; in bio_find_or_create_slab() 146 WARN_ON_ONCE(bslab->slab != bs->bio_slab); in bio_put_slab() 155 kmem_cache_destroy(bslab->slab); in bio_put_slab() 169 kmem_cache_free(biovec_slab(nr_vecs)->slab, bv); in bvec_free() 204 bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask)); in bvec_alloc() [all …]
|
/linux/mm/kfence/ |
H A D | report.c | 91 * into the slab allocators. Includes the *_bulk() variants by in get_stack_skipnr() 299 bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in __kfence_obj_info() 319 kpp->kp_slab = slab; in __kfence_obj_info() 298 __kfence_obj_info(struct kmem_obj_info * kpp,void * object,struct slab * slab) __kfence_obj_info() argument
|
/linux/include/net/ |
H A D | request_sock.h | 31 struct kmem_cache *slab; member 137 kmem_cache_free(req->rsk_ops->slab, req); in __reqsk_free()
|
/linux/net/core/ |
H A D | sock.c | 2171 struct kmem_cache *slab; in sk_prot_alloc() local 2173 slab = prot->slab; in sk_prot_alloc() 2174 if (slab != NULL) { in sk_prot_alloc() 2175 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); in sk_prot_alloc() 2196 if (slab != NULL) in sk_prot_alloc() 2197 kmem_cache_free(slab, sk); in sk_prot_alloc() 2205 struct kmem_cache *slab; in sk_prot_free() local 2209 slab = prot->slab; in sk_prot_free() 2214 if (slab != NULL) in sk_prot_free() 2215 kmem_cache_free(slab, sk); in sk_prot_free() [all …]
|