xref: /freebsd/sys/compat/linuxkpi/common/include/linux/slab.h (revision bb75b0d581f74e22a68d7868ad1f5da1146a8de0)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2021 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  * Copyright (c) 2024-2026 The FreeBSD Foundation
8  *
9  * Portions of this software were developed by Björn Zeeb
10  * under sponsorship from the FreeBSD Foundation.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice unmodified, this list of conditions, and the following
17  *    disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #ifndef	_LINUXKPI_LINUX_SLAB_H_
34 #define	_LINUXKPI_LINUX_SLAB_H_
35 
36 #include <sys/types.h>
37 #include <sys/malloc.h>
38 #include <sys/limits.h>
39 
40 #include <linux/compat.h>
41 #include <linux/types.h>
42 #include <linux/gfp.h>
43 #include <linux/err.h>
44 #include <linux/llist.h>
45 #include <linux/overflow.h>
46 #include <linux/cleanup.h>
47 
48 MALLOC_DECLARE(M_KMALLOC);
49 
50 #define	kvzalloc(size, flags)		kvmalloc(size, (flags) | __GFP_ZERO)
51 #define	kvcalloc(n, size, flags)	kvmalloc_array(n, size, (flags) | __GFP_ZERO)
52 #define	kzalloc(size, flags)		kmalloc(size, (flags) | __GFP_ZERO)
53 #define	kzalloc_node(size, flags, node)	kmalloc_node(size, (flags) | __GFP_ZERO, node)
54 #define	kzalloc_obj(_p, ...)						\
55     kzalloc(sizeof(typeof(_p)), default_gfp(__VA_ARGS__))
56 #define	kzalloc_objs(_p, _n, ...)					\
57     kzalloc(size_mul((_n), sizeof(typeof(_p))), default_gfp(__VA_ARGS__))
58 #define	kzalloc_flex(_p, _field, _n, ...)				\
59 ({									\
60 	const size_t __n = (_n);					\
61 	const size_t __psize = struct_size_t(typeof(_p), _field, __n);	\
62 	typeof(_p) *__p_obj;						\
63 									\
64 	__p_obj = kzalloc(__psize, default_gfp(__VA_ARGS__));		\
65 	if (__p_obj != NULL)						\
66 		__set_flex_counter(__p_obj->_field, __n);		\
67 									\
68 	__p_obj;							\
69 })
70 #define	kfree_const(ptr)		kfree(ptr)
71 #define kfree_async(ptr)		kfree(ptr)		/* drm-kmod 5.4 compat */
72 #define	vzalloc(size)			__vmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0)
73 #define	vfree(arg)			kfree(arg)
74 #define	kvfree(arg)			kfree(arg)
75 #define	vmalloc_node(size, node)	__vmalloc_node(size, GFP_KERNEL, node)
76 #define	vmalloc_user(size)		__vmalloc(size, GFP_KERNEL | __GFP_ZERO, 0)
77 #define	vmalloc(size)			__vmalloc(size, GFP_KERNEL, 0)
78 
79 /*
80  * Prefix some functions with linux_ to avoid namespace conflict
81  * with the OpenSolaris code in the kernel.
82  */
83 #define	kmem_cache		linux_kmem_cache
84 #define	kmem_cache_create(...)	linux_kmem_cache_create(__VA_ARGS__)
85 #define	kmem_cache_alloc(...)	lkpi_kmem_cache_alloc(__VA_ARGS__)
86 #define	kmem_cache_zalloc(...)	lkpi_kmem_cache_zalloc(__VA_ARGS__)
87 #define	kmem_cache_free(...)	lkpi_kmem_cache_free(__VA_ARGS__)
88 #define	kmem_cache_destroy(...) linux_kmem_cache_destroy(__VA_ARGS__)
89 #define	kmem_cache_shrink(x)	(0)
90 
91 #define	KMEM_CACHE(__struct, flags)					\
92 	linux_kmem_cache_create(#__struct, sizeof(struct __struct),	\
93 	__alignof(struct __struct), (flags), NULL)
94 
95 typedef void linux_kmem_ctor_t (void *);
96 
97 struct linux_kmem_cache;
98 
99 #define	SLAB_HWCACHE_ALIGN	(1 << 0)
100 #define	SLAB_TYPESAFE_BY_RCU	(1 << 1)
101 #define	SLAB_RECLAIM_ACCOUNT	(1 << 2)
102 
103 #define	SLAB_DESTROY_BY_RCU \
104 	SLAB_TYPESAFE_BY_RCU
105 
106 #define	ARCH_KMALLOC_MINALIGN \
107 	__alignof(unsigned long long)
108 
109 #define	ZERO_SIZE_PTR		((void *)16)
110 #define ZERO_OR_NULL_PTR(x)	((x) == NULL || (x) == ZERO_SIZE_PTR)
111 
112 struct linux_kmem_cache *linux_kmem_cache_create(const char *name,
113     size_t size, size_t align, unsigned flags, linux_kmem_ctor_t *ctor);
114 void *lkpi_kmem_cache_alloc(struct linux_kmem_cache *, gfp_t);
115 void *lkpi_kmem_cache_zalloc(struct linux_kmem_cache *, gfp_t);
116 void lkpi_kmem_cache_free(struct linux_kmem_cache *, void *);
117 void linux_kmem_cache_destroy(struct linux_kmem_cache *);
118 
119 void *lkpi_kmalloc(size_t, gfp_t);
120 void *lkpi_kvmalloc(size_t, gfp_t);
121 void *lkpi___kmalloc(size_t, gfp_t);
122 void *lkpi___kmalloc_node(size_t, gfp_t, int);
123 void *lkpi_krealloc(const void *, size_t, gfp_t);
124 void lkpi_kfree(const void *);
125 
126 static inline gfp_t
127 linux_check_m_flags(gfp_t flags)
128 {
129 	const gfp_t m = M_NOWAIT | M_WAITOK;
130 
131 	/* make sure either M_NOWAIT or M_WAITOK is set */
132 	if ((flags & m) == 0)
133 		flags |= M_NOWAIT;
134 	else if ((flags & m) == m)
135 		flags &= ~M_WAITOK;
136 
137 	/* mask away LinuxKPI specific flags */
138 	return (flags & GFP_NATIVE_MASK);
139 }
140 
141 /*
142  * Base functions with a native implementation.
143  */
144 
145 static inline size_t
146 ksize(const void *ptr)
147 {
148 	return (malloc_usable_size(ptr));
149 }
150 
151 static inline void *
152 kmalloc(size_t size, gfp_t flags)
153 {
154 	return (lkpi_kmalloc(size, flags));
155 }
156 
157 static inline void *
158 __kmalloc(size_t size, gfp_t flags)
159 {
160 	return (lkpi___kmalloc(size, flags));
161 }
162 
163 static inline void *
164 kmalloc_node(size_t size, gfp_t flags, int node)
165 {
166 	return (lkpi___kmalloc_node(size, flags, node));
167 }
168 
169 #define	kmalloc_obj(_p, ...)						\
170     kmalloc(sizeof(typeof(_p)), default_gfp(__VA_ARGS__))
171 
172 #define	kmalloc_objs(_p, _n, ...)					\
173     kmalloc(size_mul((_n) * sizeof(typeof(_p))), default_gfp(__VA_ARGS__))
174 
175 static inline void *
176 krealloc(const void *ptr, size_t size, gfp_t flags)
177 {
178 	return (lkpi_krealloc(ptr, size, flags));
179 }
180 
181 static inline void
182 kfree(const void *ptr)
183 {
184 	lkpi_kfree(ptr);
185 }
186 
187 DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
188 
189 /*
190  * Other k*alloc() funtions using the above as underlying allocator.
191  */
192 /* kmalloc */
193 static inline void *
194 kmalloc_array(size_t n, size_t size, gfp_t flags)
195 {
196 	if (WOULD_OVERFLOW(n, size))
197 		panic("%s: %zu * %zu overflowed", __func__, n, size);
198 
199 	return (kmalloc(size * n, flags));
200 }
201 
202 static inline void *
203 kcalloc(size_t n, size_t size, gfp_t flags)
204 {
205 	flags |= __GFP_ZERO;
206 	return (kmalloc_array(n, size, flags));
207 }
208 
209 /* kmalloc_node */
210 static inline void *
211 kmalloc_array_node(size_t n, size_t size, gfp_t flags, int node)
212 {
213 	if (WOULD_OVERFLOW(n, size))
214 		panic("%s: %zu * %zu overflowed", __func__, n, size);
215 
216 	return (kmalloc_node(size * n, flags, node));
217 }
218 
219 static inline void *
220 kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
221 {
222 	flags |= __GFP_ZERO;
223 	return (kmalloc_array_node(n, size, flags, node));
224 }
225 
226 /* krealloc */
227 static inline void *
228 krealloc_array(void *ptr, size_t n, size_t size, gfp_t flags)
229 {
230 	if (WOULD_OVERFLOW(n, size))
231 		return NULL;
232 
233 	return (krealloc(ptr, n * size, flags));
234 }
235 
236 /*
237  * vmalloc/kvalloc functions.
238  */
239 static inline void *
240 __vmalloc(size_t size, gfp_t flags, int other)
241 {
242 	return (malloc(size, M_KMALLOC, linux_check_m_flags(flags)));
243 }
244 
245 static inline void *
246 __vmalloc_node(size_t size, gfp_t flags, int node)
247 {
248 	return (malloc_domainset(size, M_KMALLOC,
249 	    linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
250 }
251 
252 static inline void *
253 vmalloc_32(size_t size)
254 {
255 	return (contigmalloc(size, M_KMALLOC, M_WAITOK, 0, UINT_MAX, 1, 1));
256 }
257 
258 /* May return non-contiguous memory. */
259 static inline void *
260 kvmalloc(size_t size, gfp_t flags)
261 {
262 	return (lkpi_kvmalloc(size, flags));
263 }
264 
265 static inline void *
266 kvmalloc_array(size_t n, size_t size, gfp_t flags)
267 {
268 	if (WOULD_OVERFLOW(n, size))
269 		panic("%s: %zu * %zu overflowed", __func__, n, size);
270 
271 	return (kvmalloc(size * n, flags));
272 }
273 
274 void * lkpi_kvrealloc(const void *ptr, size_t oldsize, size_t newsize, gfp_t flags);
275 
276 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION < 61200
277 static inline void *
278 kvrealloc(const void *ptr, size_t oldsize, size_t newsize, gfp_t flags)
279 {
280 	return (lkpi_kvrealloc(ptr, oldsize, newsize, flags));
281 }
282 #else
283 static inline void *
284 kvrealloc(const void *ptr, size_t newsize, gfp_t flags)
285 {
286 	size_t oldsize;
287 
288 	if (!ZERO_OR_NULL_PTR(ptr))
289 		oldsize = ksize(ptr);
290 	else
291 		oldsize = 0;
292 
293 	return (lkpi_kvrealloc(ptr, oldsize, newsize, flags));
294 }
295 #endif
296 
297 /*
298  * Misc.
299  */
300 
301 static __inline void
302 kfree_sensitive(const void *ptr)
303 {
304 	if (ZERO_OR_NULL_PTR(ptr))
305 		return;
306 
307 	zfree(__DECONST(void *, ptr), M_KMALLOC);
308 }
309 
310 static inline size_t
311 kmalloc_size_roundup(size_t size)
312 {
313 	if (unlikely(size == 0 || size == SIZE_MAX))
314 		return (size);
315 	return (malloc_size(size));
316 }
317 
318 #endif					/* _LINUXKPI_LINUX_SLAB_H_ */
319