xref: /freebsd/sys/sys/malloc.h (revision 78f3e0f6b3ad70d9574730fc3338474376ef8ebd)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1987, 1993
5  *	The Regents of the University of California.
6  * Copyright (c) 2005, 2009 Robert N. M. Watson
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 #ifndef _SYS_MALLOC_H_
35 #define	_SYS_MALLOC_H_
36 
37 #ifndef _STANDALONE
38 #include <sys/param.h>
39 #ifdef _KERNEL
40 #include <sys/systm.h>
41 #endif
42 #include <sys/queue.h>
43 #include <sys/_lock.h>
44 #include <sys/_mutex.h>
45 #include <machine/_limits.h>
46 
47 #define	MINALLOCSIZE	UMA_SMALLEST_UNIT
48 
49 /*
50  * Flags to memory allocation functions.
51  */
52 #define	M_NOWAIT	0x0001		/* do not block */
53 #define	M_WAITOK	0x0002		/* ok to block */
54 #define	M_NORECLAIM	0x0080		/* do not reclaim after failure */
55 #define	M_ZERO		0x0100		/* bzero the allocation */
56 #define	M_NOVM		0x0200		/* don't ask VM for pages */
57 #define	M_USE_RESERVE	0x0400		/* can alloc out of reserve memory */
58 #define	M_NODUMP	0x0800		/* don't dump pages in this allocation */
59 #define	M_FIRSTFIT	0x1000		/* only for vmem, fast fit */
60 #define	M_BESTFIT	0x2000		/* only for vmem, low fragmentation */
61 #define	M_EXEC		0x4000		/* allocate executable space */
62 #define	M_NEXTFIT	0x8000		/* only for vmem, follow cursor */
63 #define	M_NEVERFREED 	0x10000		/* chunk will never get freed */
64 
65 #define	M_VERSION	2024073001
66 
67 /*
68  * Two malloc type structures are present: malloc_type, which is used by a
69  * type owner to declare the type, and malloc_type_internal, which holds
70  * malloc-owned statistics and other ABI-sensitive fields, such as the set of
71  * malloc statistics indexed by the compile-time MAXCPU constant.
72  * Applications should avoid introducing dependence on the allocator private
73  * data layout and size.
74  *
75  * The malloc_type ks_next field is protected by malloc_mtx.  Other fields in
76  * malloc_type are static after initialization so unsynchronized.
77  *
78  * Statistics in malloc_type_stats are written only when holding a critical
79  * section and running on the CPU associated with the index into the stat
80  * array, but read lock-free resulting in possible (minor) races, which the
81  * monitoring app should take into account.
82  */
83 struct malloc_type_stats {
84 	uint64_t	mts_memalloced;	/* Bytes allocated on CPU. */
85 	uint64_t	mts_memfreed;	/* Bytes freed on CPU. */
86 	uint64_t	mts_numallocs;	/* Number of allocates on CPU. */
87 	uint64_t	mts_numfrees;	/* number of frees on CPU. */
88 	uint64_t	mts_size;	/* Bitmask of sizes allocated on CPU. */
89 	uint64_t	_mts_reserved1;	/* Reserved field. */
90 	uint64_t	_mts_reserved2;	/* Reserved field. */
91 	uint64_t	_mts_reserved3;	/* Reserved field. */
92 };
93 
94 _Static_assert(sizeof(struct malloc_type_stats) == 64,
95     "allocations come from pcpu_zone_64");
96 
97 /*
98  * Index definitions for the mti_probes[] array.
99  */
100 #define DTMALLOC_PROBE_MALLOC		0
101 #define DTMALLOC_PROBE_FREE		1
102 #define DTMALLOC_PROBE_MAX		2
103 
104 struct malloc_type_internal {
105 	uint32_t	mti_probes[DTMALLOC_PROBE_MAX];
106 					/* DTrace probe ID array. */
107 	u_char		mti_zone;
108 	struct malloc_type_stats	*mti_stats;
109 	u_long		mti_spare[8];
110 };
111 
112 /*
113  * Public data structure describing a malloc type.
114  */
115 struct malloc_type {
116 	struct malloc_type *ks_next;	/* Next in global chain. */
117 	u_long		 ks_version;	/* Detect programmer error. */
118 	const char	*ks_shortdesc;	/* Printable type name. */
119 	struct malloc_type_internal ks_mti;
120 };
121 
122 /*
123  * Statistics structure headers for user space.  The kern.malloc sysctl
124  * exposes a structure stream consisting of a stream header, then a series of
125  * malloc type headers and statistics structures (quantity maxcpus).  For
126  * convenience, the kernel will provide the current value of maxcpus at the
127  * head of the stream.
128  */
129 #define	MALLOC_TYPE_STREAM_VERSION	0x00000001
130 struct malloc_type_stream_header {
131 	uint32_t	mtsh_version;	/* Stream format version. */
132 	uint32_t	mtsh_maxcpus;	/* Value of MAXCPU for stream. */
133 	uint32_t	mtsh_count;	/* Number of records. */
134 	uint32_t	_mtsh_pad;	/* Pad/reserved field. */
135 };
136 
137 #define	MALLOC_MAX_NAME	32
138 struct malloc_type_header {
139 	char				mth_name[MALLOC_MAX_NAME];
140 };
141 
142 #ifdef _KERNEL
143 #define	MALLOC_DEFINE(type, shortdesc, longdesc)			\
144 	struct malloc_type type[1] = {					\
145 		{							\
146 			.ks_next = NULL,				\
147 			.ks_version = M_VERSION,			\
148 			.ks_shortdesc = shortdesc,			\
149 		}							\
150 	};								\
151 	SYSINIT(type##_init, SI_SUB_KMEM, SI_ORDER_THIRD, malloc_init,	\
152 	    type);							\
153 	SYSUNINIT(type##_uninit, SI_SUB_KMEM, SI_ORDER_ANY,		\
154 	    malloc_uninit, type)
155 
156 #define	MALLOC_DECLARE(type) \
157 	extern struct malloc_type type[1]
158 
159 MALLOC_DECLARE(M_CACHE);
160 MALLOC_DECLARE(M_DEVBUF);
161 MALLOC_DECLARE(M_PARGS);
162 MALLOC_DECLARE(M_SESSION);
163 MALLOC_DECLARE(M_SUBPROC);
164 MALLOC_DECLARE(M_TEMP);
165 
166 /*
167  * XXX this should be declared in <sys/uio.h>, but that tends to fail
168  * because <sys/uio.h> is included in a header before the source file
169  * has a chance to include <sys/malloc.h> to get MALLOC_DECLARE() defined.
170  */
171 MALLOC_DECLARE(M_IOV);
172 
173 struct domainset;
174 extern struct mtx malloc_mtx;
175 
176 /*
177  * Function type used when iterating over the list of malloc types.
178  */
179 typedef void malloc_type_list_func_t(struct malloc_type *, void *);
180 
181 /* contigfree(9) is deprecated. */
182 void	contigfree(void *addr, unsigned long, struct malloc_type *type);
183 void	*contigmalloc(unsigned long size, struct malloc_type *type, int flags,
184 	    vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
185 	    vm_paddr_t boundary) __malloc_like __result_use_check
186 	    __alloc_size(1) __alloc_align(6);
187 void	*contigmalloc_domainset(unsigned long size, struct malloc_type *type,
188 	    struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
189 	    unsigned long alignment, vm_paddr_t boundary)
190 	    __malloc_like __result_use_check __alloc_size(1) __alloc_align(7);
191 void	free(void *addr, struct malloc_type *type);
192 void	zfree(void *addr, struct malloc_type *type);
193 void	*malloc(size_t size, struct malloc_type *type, int flags) __malloc_like
194 	    __result_use_check __alloc_size(1);
195 /*
196  * Try to optimize malloc(..., ..., M_ZERO) allocations by doing zeroing in
197  * place if the size is known at compilation time.
198  *
199  * Passing the flag down requires malloc to blindly zero the entire object.
200  * In practice a lot of the zeroing can be avoided if most of the object
201  * gets explicitly initialized after the allocation. Letting the compiler
202  * zero in place gives it the opportunity to take advantage of this state.
203  *
204  * Note that the operation is only applicable if both flags and size are
205  * known at compilation time. If M_ZERO is passed but M_WAITOK is not, the
206  * allocation can fail and a NULL check is needed. However, if M_WAITOK is
207  * passed we know the allocation must succeed and the check can be elided.
208  *
209  *	_malloc_item = malloc(_size, type, (flags) &~ M_ZERO);
210  *	if (((flags) & M_WAITOK) != 0 || _malloc_item != NULL)
211  *		bzero(_malloc_item, _size);
212  *
213  * If the flag is set, the compiler knows the left side is always true,
214  * therefore the entire statement is true and the callsite is:
215  *
216  *	_malloc_item = malloc(_size, type, (flags) &~ M_ZERO);
217  *	bzero(_malloc_item, _size);
218  *
219  * If the flag is not set, the compiler knows the left size is always false
220  * and the NULL check is needed, therefore the callsite is:
221  *
222  * 	_malloc_item = malloc(_size, type, (flags) &~ M_ZERO);
223  *	if (_malloc_item != NULL)
224  *		bzero(_malloc_item, _size);
225  *
226  * The implementation is a macro because of what appears to be a clang 6 bug:
227  * an inline function variant ended up being compiled to a mere malloc call
228  * regardless of argument. gcc generates expected code (like the above).
229  */
230 #define	malloc(size, type, flags) ({					\
231 	void *_malloc_item;						\
232 	size_t _size = (size);						\
233 	if (__builtin_constant_p(size) && __builtin_constant_p(flags) &&\
234 	    ((flags) & M_ZERO) != 0) {					\
235 		_malloc_item = malloc(_size, type, (flags) &~ M_ZERO);	\
236 		if (((flags) & M_WAITOK) != 0 ||			\
237 		    __predict_true(_malloc_item != NULL))		\
238 			memset(_malloc_item, 0, _size);			\
239 	} else {							\
240 		_malloc_item = malloc(_size, type, flags);		\
241 	}								\
242 	_malloc_item;							\
243 })
244 
245 void	*malloc_domainset(size_t size, struct malloc_type *type,
246 	    struct domainset *ds, int flags) __malloc_like __result_use_check
247 	    __alloc_size(1);
248 void	*mallocarray(size_t nmemb, size_t size, struct malloc_type *type,
249 	    int flags) __malloc_like __result_use_check
250 	    __alloc_size2(1, 2);
251 void	*mallocarray_domainset(size_t nmemb, size_t size, struct malloc_type *type,
252 	    struct domainset *ds, int flags) __malloc_like __result_use_check
253 	    __alloc_size2(1, 2);
254 void	*malloc_exec(size_t size, struct malloc_type *type, int flags) __malloc_like
255 	    __result_use_check __alloc_size(1);
256 void	*malloc_domainset_exec(size_t size, struct malloc_type *type,
257 	    struct domainset *ds, int flags) __malloc_like __result_use_check
258 	    __alloc_size(1);
259 void	malloc_init(void *);
260 void	malloc_type_allocated(struct malloc_type *type, unsigned long size);
261 void	malloc_type_freed(struct malloc_type *type, unsigned long size);
262 void	malloc_type_list(malloc_type_list_func_t *, void *);
263 void	malloc_uninit(void *);
264 size_t	malloc_size(size_t);
265 size_t	malloc_usable_size(const void *);
266 void	*realloc(void *addr, size_t size, struct malloc_type *type, int flags)
267 	    __result_use_check __alloc_size(2);
268 void	*reallocf(void *addr, size_t size, struct malloc_type *type, int flags)
269 	    __result_use_check __alloc_size(2);
270 void	*malloc_aligned(size_t size, size_t align, struct malloc_type *type,
271 	    int flags) __malloc_like __result_use_check __alloc_size(1);
272 void	*malloc_domainset_aligned(size_t size, size_t align,
273 	    struct malloc_type *mtp, struct domainset *ds, int flags)
274 	    __malloc_like __result_use_check __alloc_size(1);
275 
276 struct malloc_type *malloc_desc2type(const char *desc);
277 
278 /*
279  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
280  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
281  */
282 #define MUL_NO_OVERFLOW		(1UL << (sizeof(size_t) * 8 / 2))
283 static inline bool
284 WOULD_OVERFLOW(size_t nmemb, size_t size)
285 {
286 
287 	return ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
288 	    nmemb > 0 && __SIZE_T_MAX / nmemb < size);
289 }
290 #undef MUL_NO_OVERFLOW
291 #endif /* _KERNEL */
292 
293 #else
294 /*
295  * The native stand malloc / free interface we're mapping to
296  */
297 extern void Free(void *p, const char *file, int line);
298 extern void *Malloc(size_t bytes, const char *file, int line);
299 
300 /*
301  * Minimal standalone malloc implementation / environment. None of the
302  * flags mean anything and there's no need declare malloc types.
303  * Define the simple alloc / free routines in terms of Malloc and
304  * Free. None of the kernel features that this stuff disables are needed.
305  */
306 #define M_WAITOK 1
307 #define M_ZERO 0
308 #define M_NOWAIT 2
309 #define MALLOC_DECLARE(x)
310 
311 #define kmem_zalloc(size, flags) ({					\
312 	void *p = Malloc((size), __FILE__, __LINE__);			\
313 	if (p == NULL && (flags &  M_WAITOK) != 0)			\
314 		panic("Could not malloc %zd bytes with M_WAITOK from %s line %d", \
315 		    (size_t)size, __FILE__, __LINE__);			\
316 	p;								\
317 })
318 
319 #define kmem_free(p, size) Free(p, __FILE__, __LINE__)
320 
321 /*
322  * ZFS mem.h define that's the OpenZFS porting layer way of saying
323  * M_WAITOK. Given the above, it will also be a nop.
324  */
325 #define KM_SLEEP M_WAITOK
326 #define KM_NOSLEEP M_NOWAIT
327 #endif /* _STANDALONE */
328 #endif /* !_SYS_MALLOC_H_ */
329