slab_common.c (0ea680eda6c9f59a9512f8b0dd4abf229bb9f6cf) slab_common.c (2947a4567f3a79127d2d540384e7f042106c1a24)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Slab allocator functions that are independent of the allocator strategy
4 *
5 * (C) 2012 Christoph Lameter <cl@linux.com>
6 */
7#include <linux/slab.h>
8

--- 36 unchanged lines hidden (view full) ---

45static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
46 slab_caches_to_rcu_destroy_workfn);
47
48/*
49 * Set of flags that will prevent slab merging
50 */
51#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
52 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Slab allocator functions that are independent of the allocator strategy
4 *
5 * (C) 2012 Christoph Lameter <cl@linux.com>
6 */
7#include <linux/slab.h>
8

--- 36 unchanged lines hidden (view full) ---

45static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
46 slab_caches_to_rcu_destroy_workfn);
47
48/*
49 * Set of flags that will prevent slab merging
50 */
51#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
52 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
53 SLAB_FAILSLAB | SLAB_NO_MERGE)
53 SLAB_FAILSLAB | SLAB_NO_MERGE | kasan_never_merge())
54
55#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
56 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
57
58/*
59 * Merge control. If this is set then no merging of slab caches will occur.
60 */
61static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);

--- 105 unchanged lines hidden (view full) ---

167 return NULL;
168
169 if (ctor)
170 return NULL;
171
172 size = ALIGN(size, sizeof(void *));
173 align = calculate_alignment(flags, align, size);
174 size = ALIGN(size, align);
54
55#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
56 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
57
58/*
59 * Merge control. If this is set then no merging of slab caches will occur.
60 */
61static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);

--- 105 unchanged lines hidden (view full) ---

167 return NULL;
168
169 if (ctor)
170 return NULL;
171
172 size = ALIGN(size, sizeof(void *));
173 align = calculate_alignment(flags, align, size);
174 size = ALIGN(size, align);
175 flags = kmem_cache_flags(flags, name);
175 flags = kmem_cache_flags(size, flags, name);
176
177 if (flags & SLAB_NEVER_MERGE)
178 return NULL;
179
180 list_for_each_entry_reverse(s, &slab_caches, list) {
181 if (slab_unmergeable(s))
182 continue;
183

--- 93 unchanged lines hidden (view full) ---

277 void (*ctor)(void *))
278{
279 struct kmem_cache *s = NULL;
280 const char *cache_name;
281 int err;
282
283#ifdef CONFIG_SLUB_DEBUG
284 /*
176
177 if (flags & SLAB_NEVER_MERGE)
178 return NULL;
179
180 list_for_each_entry_reverse(s, &slab_caches, list) {
181 if (slab_unmergeable(s))
182 continue;
183

--- 93 unchanged lines hidden (view full) ---

277 void (*ctor)(void *))
278{
279 struct kmem_cache *s = NULL;
280 const char *cache_name;
281 int err;
282
283#ifdef CONFIG_SLUB_DEBUG
284 /*
285 * If no slab_debug was enabled globally, the static key is not yet
285 * If no slub_debug was enabled globally, the static key is not yet
286 * enabled by setup_slub_debug(). Enable it if the cache is being
287 * created with any of the debugging flags passed explicitly.
288 * It's also possible that this is the first cache created with
289 * SLAB_STORE_USER and we should init stack_depot for it.
290 */
291 if (flags & SLAB_DEBUG_FLAGS)
292 static_branch_enable(&slub_debug_enabled);
293 if (flags & SLAB_STORE_USER)

--- 105 unchanged lines hidden (view full) ---

399 * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
400 * protection. So they are now done without holding those locks.
401 *
402 * Note that there will be a slight delay in the deletion of sysfs files
403 * if kmem_cache_release() is called indrectly from a work function.
404 */
405static void kmem_cache_release(struct kmem_cache *s)
406{
286 * enabled by setup_slub_debug(). Enable it if the cache is being
287 * created with any of the debugging flags passed explicitly.
288 * It's also possible that this is the first cache created with
289 * SLAB_STORE_USER and we should init stack_depot for it.
290 */
291 if (flags & SLAB_DEBUG_FLAGS)
292 static_branch_enable(&slub_debug_enabled);
293 if (flags & SLAB_STORE_USER)

--- 105 unchanged lines hidden (view full) ---

399 * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
400 * protection. So they are now done without holding those locks.
401 *
402 * Note that there will be a slight delay in the deletion of sysfs files
403 * if kmem_cache_release() is called indrectly from a work function.
404 */
405static void kmem_cache_release(struct kmem_cache *s)
406{
407 if (slab_state >= FULL) {
408 sysfs_slab_unlink(s);
409 sysfs_slab_release(s);
410 } else {
411 slab_kmem_cache_release(s);
412 }
407 sysfs_slab_unlink(s);
408 sysfs_slab_release(s);
413}
414#else
415static void kmem_cache_release(struct kmem_cache *s)
416{
417 slab_kmem_cache_release(s);
418}
419#endif
420

--- 229 unchanged lines hidden (view full) ---

650 create_boot_cache(s, name, size, flags | SLAB_KMALLOC, 0, size);
651 list_add(&s->list, &slab_caches);
652 s->refcount = 1;
653 return s;
654}
655
656struct kmem_cache *
657kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
409}
410#else
411static void kmem_cache_release(struct kmem_cache *s)
412{
413 slab_kmem_cache_release(s);
414}
415#endif
416

--- 229 unchanged lines hidden (view full) ---

646 create_boot_cache(s, name, size, flags | SLAB_KMALLOC, 0, size);
647 list_add(&s->list, &slab_caches);
648 s->refcount = 1;
649 return s;
650}
651
652struct kmem_cache *
653kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
658{ /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
654{ /* initialization for https://llvm.org/pr42570 */ };
659EXPORT_SYMBOL(kmalloc_caches);
660
661#ifdef CONFIG_RANDOM_KMALLOC_CACHES
662unsigned long random_kmalloc_seed __ro_after_init;
663EXPORT_SYMBOL(random_kmalloc_seed);
664#endif
665
666/*

--- 98 unchanged lines hidden (view full) ---

765 KMALLOC_RCL_NAME(__short_size) \
766 KMALLOC_CGROUP_NAME(__short_size) \
767 KMALLOC_DMA_NAME(__short_size) \
768 KMALLOC_RANDOM_NAME(RANDOM_KMALLOC_CACHES_NR, __short_size) \
769 .size = __size, \
770}
771
772/*
655EXPORT_SYMBOL(kmalloc_caches);
656
657#ifdef CONFIG_RANDOM_KMALLOC_CACHES
658unsigned long random_kmalloc_seed __ro_after_init;
659EXPORT_SYMBOL(random_kmalloc_seed);
660#endif
661
662/*

--- 98 unchanged lines hidden (view full) ---

761 KMALLOC_RCL_NAME(__short_size) \
762 KMALLOC_CGROUP_NAME(__short_size) \
763 KMALLOC_DMA_NAME(__short_size) \
764 KMALLOC_RANDOM_NAME(RANDOM_KMALLOC_CACHES_NR, __short_size) \
765 .size = __size, \
766}
767
768/*
773 * kmalloc_info[] is to make slab_debug=,kmalloc-xx option work at boot time.
769 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
774 * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is
775 * kmalloc-2M.
776 */
777const struct kmalloc_info_struct kmalloc_info[] __initconst = {
778 INIT_KMALLOC_INFO(0, 0),
779 INIT_KMALLOC_INFO(96, 96),
780 INIT_KMALLOC_INFO(192, 192),
781 INIT_KMALLOC_INFO(8, 8),

--- 70 unchanged lines hidden (view full) ---

852
853 if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
854 is_swiotlb_allocated())
855 minalign = ARCH_KMALLOC_MINALIGN;
856
857 return max(minalign, arch_slab_minalign());
858}
859
770 * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is
771 * kmalloc-2M.
772 */
773const struct kmalloc_info_struct kmalloc_info[] __initconst = {
774 INIT_KMALLOC_INFO(0, 0),
775 INIT_KMALLOC_INFO(96, 96),
776 INIT_KMALLOC_INFO(192, 192),
777 INIT_KMALLOC_INFO(8, 8),

--- 70 unchanged lines hidden (view full) ---

848
849 if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
850 is_swiotlb_allocated())
851 minalign = ARCH_KMALLOC_MINALIGN;
852
853 return max(minalign, arch_slab_minalign());
854}
855
860static void __init
861new_kmalloc_cache(int idx, enum kmalloc_cache_type type)
856void __init
857new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
862{
858{
863 slab_flags_t flags = 0;
864 unsigned int minalign = __kmalloc_minalign();
865 unsigned int aligned_size = kmalloc_info[idx].size;
866 int aligned_idx = idx;
867
868 if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) {
869 flags |= SLAB_RECLAIM_ACCOUNT;
870 } else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
871 if (mem_cgroup_kmem_disabled()) {

--- 30 unchanged lines hidden (view full) ---

902 kmalloc_caches[type][idx] = kmalloc_caches[type][aligned_idx];
903}
904
905/*
906 * Create the kmalloc array. Some of the regular kmalloc arrays
907 * may already have been created because they were needed to
908 * enable allocations for slab creation.
909 */
859 unsigned int minalign = __kmalloc_minalign();
860 unsigned int aligned_size = kmalloc_info[idx].size;
861 int aligned_idx = idx;
862
863 if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) {
864 flags |= SLAB_RECLAIM_ACCOUNT;
865 } else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
866 if (mem_cgroup_kmem_disabled()) {

--- 30 unchanged lines hidden (view full) ---

897 kmalloc_caches[type][idx] = kmalloc_caches[type][aligned_idx];
898}
899
900/*
901 * Create the kmalloc array. Some of the regular kmalloc arrays
902 * may already have been created because they were needed to
903 * enable allocations for slab creation.
904 */
910void __init create_kmalloc_caches(void)
905void __init create_kmalloc_caches(slab_flags_t flags)
911{
912 int i;
913 enum kmalloc_cache_type type;
914
915 /*
916 * Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
917 */
918 for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++) {
919 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
920 if (!kmalloc_caches[type][i])
906{
907 int i;
908 enum kmalloc_cache_type type;
909
910 /*
911 * Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
912 */
913 for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++) {
914 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
915 if (!kmalloc_caches[type][i])
921 new_kmalloc_cache(i, type);
916 new_kmalloc_cache(i, type, flags);
922
923 /*
924 * Caches that are not of the two-to-the-power-of size.
925 * These have to be created immediately after the
926 * earlier power of two caches
927 */
928 if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
929 !kmalloc_caches[type][1])
917
918 /*
919 * Caches that are not of the two-to-the-power-of size.
920 * These have to be created immediately after the
921 * earlier power of two caches
922 */
923 if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
924 !kmalloc_caches[type][1])
930 new_kmalloc_cache(1, type);
925 new_kmalloc_cache(1, type, flags);
931 if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
932 !kmalloc_caches[type][2])
926 if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
927 !kmalloc_caches[type][2])
933 new_kmalloc_cache(2, type);
928 new_kmalloc_cache(2, type, flags);
934 }
935 }
936#ifdef CONFIG_RANDOM_KMALLOC_CACHES
937 random_kmalloc_seed = get_random_u64();
938#endif
939
940 /* Kmalloc array is now usable */
941 slab_state = UP;

--- 346 unchanged lines hidden ---
929 }
930 }
931#ifdef CONFIG_RANDOM_KMALLOC_CACHES
932 random_kmalloc_seed = get_random_u64();
933#endif
934
935 /* Kmalloc array is now usable */
936 slab_state = UP;

--- 346 unchanged lines hidden ---