slab.h (72e0fe2241ce113cbba339ca8c2450b167774530) slab.h (67f2df3b82d091ed095d0e47e1f3a9d3e18e4e41)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4 *
5 * (C) SGI 2006, Christoph Lameter
6 * Cleaned up and restructured to ease the addition of alternative
7 * implementations of SLAB allocators.
8 * (C) Linux Foundation 2008-2013

--- 557 unchanged lines hidden (view full) ---

566 kmem_cache_free_bulk(NULL, size, p);
567}
568
569void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
570 int node) __assume_slab_alignment __malloc;
571#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
572
573/*
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4 *
5 * (C) SGI 2006, Christoph Lameter
6 * Cleaned up and restructured to ease the addition of alternative
7 * implementations of SLAB allocators.
8 * (C) Linux Foundation 2008-2013

--- 557 unchanged lines hidden (view full) ---

566 kmem_cache_free_bulk(NULL, size, p);
567}
568
569void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
570 int node) __assume_slab_alignment __malloc;
571#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
572
573/*
574 * These macros allow declaring a kmem_buckets * parameter alongside size, which
575 * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
576 * sites don't have to pass NULL.
577 */
578#ifdef CONFIG_SLAB_BUCKETS
579#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size), kmem_buckets *(_b)
580#define PASS_BUCKET_PARAMS(_size, _b) (_size), (_b)
581#define PASS_BUCKET_PARAM(_b) (_b)
582#else
583#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size)
584#define PASS_BUCKET_PARAMS(_size, _b) (_size)
585#define PASS_BUCKET_PARAM(_b) NULL
586#endif
587
588/*
574 * The following functions are not to be used directly and are intended only
575 * for internal use from kmalloc() and kmalloc_node()
576 * with the exception of kunit tests
577 */
578
579void *__kmalloc_noprof(size_t size, gfp_t flags)
580 __assume_kmalloc_alignment __alloc_size(1);
581
589 * The following functions are not to be used directly and are intended only
590 * for internal use from kmalloc() and kmalloc_node()
591 * with the exception of kunit tests
592 */
593
594void *__kmalloc_noprof(size_t size, gfp_t flags)
595 __assume_kmalloc_alignment __alloc_size(1);
596
582void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node)
597void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
583 __assume_kmalloc_alignment __alloc_size(1);
584
585void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
586 __assume_kmalloc_alignment __alloc_size(3);
587
588void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
589 int node, size_t size)
590 __assume_kmalloc_alignment __alloc_size(4);

--- 84 unchanged lines hidden (view full) ---

675 if (size > KMALLOC_MAX_CACHE_SIZE)
676 return __kmalloc_large_node_noprof(size, flags, node);
677
678 index = kmalloc_index(size);
679 return __kmalloc_cache_node_noprof(
680 kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
681 flags, node, size);
682 }
598 __assume_kmalloc_alignment __alloc_size(1);
599
600void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
601 __assume_kmalloc_alignment __alloc_size(3);
602
603void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
604 int node, size_t size)
605 __assume_kmalloc_alignment __alloc_size(4);

--- 84 unchanged lines hidden (view full) ---

690 if (size > KMALLOC_MAX_CACHE_SIZE)
691 return __kmalloc_large_node_noprof(size, flags, node);
692
693 index = kmalloc_index(size);
694 return __kmalloc_cache_node_noprof(
695 kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
696 flags, node, size);
697 }
683 return __kmalloc_node_noprof(size, flags, node);
698 return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node);
684}
685#define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))
686
687/**
688 * kmalloc_array - allocate memory for an array.
689 * @n: number of elements.
690 * @size: element size.
691 * @flags: the type of memory to allocate (see kmalloc).

--- 34 unchanged lines hidden (view full) ---

726/**
727 * kcalloc - allocate memory for an array. The memory is set to zero.
728 * @n: number of elements.
729 * @size: element size.
730 * @flags: the type of memory to allocate (see kmalloc).
731 */
732#define kcalloc(n, size, flags) kmalloc_array(n, size, (flags) | __GFP_ZERO)
733
699}
700#define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))
701
702/**
703 * kmalloc_array - allocate memory for an array.
704 * @n: number of elements.
705 * @size: element size.
706 * @flags: the type of memory to allocate (see kmalloc).

--- 34 unchanged lines hidden (view full) ---

741/**
742 * kcalloc - allocate memory for an array. The memory is set to zero.
743 * @n: number of elements.
744 * @size: element size.
745 * @flags: the type of memory to allocate (see kmalloc).
746 */
747#define kcalloc(n, size, flags) kmalloc_array(n, size, (flags) | __GFP_ZERO)
748
734void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node,
735 unsigned long caller) __alloc_size(1);
749void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node,
750 unsigned long caller) __alloc_size(1);
751#define kmalloc_node_track_caller_noprof(size, flags, node, caller) \
752 __kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller)
736#define kmalloc_node_track_caller(...) \
737 alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))
738
739/*
740 * kmalloc_track_caller is a special version of kmalloc that records the
741 * calling function of the routine calling it for slab leak tracking instead
742 * of just the calling function (confusing, eh?).
743 * It's useful when the call to kmalloc comes from a widely-used standard

--- 9 unchanged lines hidden (view full) ---

753 int node)
754{
755 size_t bytes;
756
757 if (unlikely(check_mul_overflow(n, size, &bytes)))
758 return NULL;
759 if (__builtin_constant_p(n) && __builtin_constant_p(size))
760 return kmalloc_node_noprof(bytes, flags, node);
753#define kmalloc_node_track_caller(...) \
754 alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))
755
756/*
757 * kmalloc_track_caller is a special version of kmalloc that records the
758 * calling function of the routine calling it for slab leak tracking instead
759 * of just the calling function (confusing, eh?).
760 * It's useful when the call to kmalloc comes from a widely-used standard

--- 9 unchanged lines hidden (view full) ---

770 int node)
771{
772 size_t bytes;
773
774 if (unlikely(check_mul_overflow(n, size, &bytes)))
775 return NULL;
776 if (__builtin_constant_p(n) && __builtin_constant_p(size))
777 return kmalloc_node_noprof(bytes, flags, node);
761 return __kmalloc_node_noprof(bytes, flags, node);
778 return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(bytes, NULL), flags, node);
762}
763#define kmalloc_array_node(...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__))
764
765#define kcalloc_node(_n, _size, _flags, _node) \
766 kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node)
767
768/*
769 * Shortcuts

--- 73 unchanged lines hidden ---
779}
780#define kmalloc_array_node(...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__))
781
782#define kcalloc_node(_n, _size, _flags, _node) \
783 kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node)
784
785/*
786 * Shortcuts

--- 73 unchanged lines hidden ---