xref: /freebsd/sys/compat/linuxkpi/common/include/linux/slab.h (revision 39e9290d890b2aa07b68f2fe956b96e5df9a1da2)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2021 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  * Copyright (c) 2024-2025 The FreeBSD Foundation
8  *
9  * Portions of this software were developed by Björn Zeeb
10  * under sponsorship from the FreeBSD Foundation.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice unmodified, this list of conditions, and the following
17  *    disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #ifndef	_LINUXKPI_LINUX_SLAB_H_
34 #define	_LINUXKPI_LINUX_SLAB_H_
35 
36 #include <sys/types.h>
37 #include <sys/malloc.h>
38 #include <sys/limits.h>
39 
40 #include <linux/compat.h>
41 #include <linux/types.h>
42 #include <linux/gfp.h>
43 #include <linux/err.h>
44 #include <linux/llist.h>
45 #include <linux/overflow.h>
46 #include <linux/cleanup.h>
47 
48 MALLOC_DECLARE(M_KMALLOC);
49 
50 #define	kvzalloc(size, flags)		kvmalloc(size, (flags) | __GFP_ZERO)
51 #define	kvcalloc(n, size, flags)	kvmalloc_array(n, size, (flags) | __GFP_ZERO)
52 #define	kzalloc(size, flags)		kmalloc(size, (flags) | __GFP_ZERO)
53 #define	kzalloc_node(size, flags, node)	kmalloc_node(size, (flags) | __GFP_ZERO, node)
54 #define	kfree_const(ptr)		kfree(ptr)
55 #define kfree_async(ptr)		kfree(ptr)		/* drm-kmod 5.4 compat */
56 #define	vzalloc(size)			__vmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0)
57 #define	vfree(arg)			kfree(arg)
58 #define	kvfree(arg)			kfree(arg)
59 #define	vmalloc_node(size, node)	__vmalloc_node(size, GFP_KERNEL, node)
60 #define	vmalloc_user(size)		__vmalloc(size, GFP_KERNEL | __GFP_ZERO, 0)
61 #define	vmalloc(size)			__vmalloc(size, GFP_KERNEL, 0)
62 
63 /*
64  * Prefix some functions with linux_ to avoid namespace conflict
65  * with the OpenSolaris code in the kernel.
66  */
67 #define	kmem_cache		linux_kmem_cache
68 #define	kmem_cache_create(...)	linux_kmem_cache_create(__VA_ARGS__)
69 #define	kmem_cache_alloc(...)	lkpi_kmem_cache_alloc(__VA_ARGS__)
70 #define	kmem_cache_zalloc(...)	lkpi_kmem_cache_zalloc(__VA_ARGS__)
71 #define	kmem_cache_free(...)	lkpi_kmem_cache_free(__VA_ARGS__)
72 #define	kmem_cache_destroy(...) linux_kmem_cache_destroy(__VA_ARGS__)
73 #define	kmem_cache_shrink(x)	(0)
74 
75 #define	KMEM_CACHE(__struct, flags)					\
76 	linux_kmem_cache_create(#__struct, sizeof(struct __struct),	\
77 	__alignof(struct __struct), (flags), NULL)
78 
79 typedef void linux_kmem_ctor_t (void *);
80 
81 struct linux_kmem_cache;
82 
83 #define	SLAB_HWCACHE_ALIGN	(1 << 0)
84 #define	SLAB_TYPESAFE_BY_RCU	(1 << 1)
85 #define	SLAB_RECLAIM_ACCOUNT	(1 << 2)
86 
87 #define	SLAB_DESTROY_BY_RCU \
88 	SLAB_TYPESAFE_BY_RCU
89 
90 #define	ARCH_KMALLOC_MINALIGN \
91 	__alignof(unsigned long long)
92 
93 #define	ZERO_SIZE_PTR		((void *)16)
94 #define ZERO_OR_NULL_PTR(x)	((x) == NULL || (x) == ZERO_SIZE_PTR)
95 
96 struct linux_kmem_cache *linux_kmem_cache_create(const char *name,
97     size_t size, size_t align, unsigned flags, linux_kmem_ctor_t *ctor);
98 void *lkpi_kmem_cache_alloc(struct linux_kmem_cache *, gfp_t);
99 void *lkpi_kmem_cache_zalloc(struct linux_kmem_cache *, gfp_t);
100 void lkpi_kmem_cache_free(struct linux_kmem_cache *, void *);
101 void linux_kmem_cache_destroy(struct linux_kmem_cache *);
102 
103 void *lkpi_kmalloc(size_t, gfp_t);
104 void *lkpi_kvmalloc(size_t, gfp_t);
105 void *lkpi___kmalloc(size_t, gfp_t);
106 void *lkpi___kmalloc_node(size_t, gfp_t, int);
107 void *lkpi_krealloc(void *, size_t, gfp_t);
108 void lkpi_kfree(const void *);
109 
110 static inline gfp_t
linux_check_m_flags(gfp_t flags)111 linux_check_m_flags(gfp_t flags)
112 {
113 	const gfp_t m = M_NOWAIT | M_WAITOK;
114 
115 	/* make sure either M_NOWAIT or M_WAITOK is set */
116 	if ((flags & m) == 0)
117 		flags |= M_NOWAIT;
118 	else if ((flags & m) == m)
119 		flags &= ~M_WAITOK;
120 
121 	/* mask away LinuxKPI specific flags */
122 	return (flags & GFP_NATIVE_MASK);
123 }
124 
125 /*
126  * Base functions with a native implementation.
127  */
128 static inline void *
kmalloc(size_t size,gfp_t flags)129 kmalloc(size_t size, gfp_t flags)
130 {
131 	return (lkpi_kmalloc(size, flags));
132 }
133 
134 static inline void *
__kmalloc(size_t size,gfp_t flags)135 __kmalloc(size_t size, gfp_t flags)
136 {
137 	return (lkpi___kmalloc(size, flags));
138 }
139 
140 static inline void *
kmalloc_node(size_t size,gfp_t flags,int node)141 kmalloc_node(size_t size, gfp_t flags, int node)
142 {
143 	return (lkpi___kmalloc_node(size, flags, node));
144 }
145 
146 static inline void *
krealloc(void * ptr,size_t size,gfp_t flags)147 krealloc(void *ptr, size_t size, gfp_t flags)
148 {
149 	return (lkpi_krealloc(ptr, size, flags));
150 }
151 
152 static inline void
kfree(const void * ptr)153 kfree(const void *ptr)
154 {
155 	lkpi_kfree(ptr);
156 }
157 
158 DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
159 
160 /*
161  * Other k*alloc() funtions using the above as underlying allocator.
162  */
163 /* kmalloc */
164 static inline void *
kmalloc_array(size_t n,size_t size,gfp_t flags)165 kmalloc_array(size_t n, size_t size, gfp_t flags)
166 {
167 	if (WOULD_OVERFLOW(n, size))
168 		panic("%s: %zu * %zu overflowed", __func__, n, size);
169 
170 	return (kmalloc(size * n, flags));
171 }
172 
173 static inline void *
kcalloc(size_t n,size_t size,gfp_t flags)174 kcalloc(size_t n, size_t size, gfp_t flags)
175 {
176 	flags |= __GFP_ZERO;
177 	return (kmalloc_array(n, size, flags));
178 }
179 
180 /* kmalloc_node */
181 static inline void *
kmalloc_array_node(size_t n,size_t size,gfp_t flags,int node)182 kmalloc_array_node(size_t n, size_t size, gfp_t flags, int node)
183 {
184 	if (WOULD_OVERFLOW(n, size))
185 		panic("%s: %zu * %zu overflowed", __func__, n, size);
186 
187 	return (kmalloc_node(size * n, flags, node));
188 }
189 
190 static inline void *
kcalloc_node(size_t n,size_t size,gfp_t flags,int node)191 kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
192 {
193 	flags |= __GFP_ZERO;
194 	return (kmalloc_array_node(n, size, flags, node));
195 }
196 
197 /* krealloc */
198 static inline void *
krealloc_array(void * ptr,size_t n,size_t size,gfp_t flags)199 krealloc_array(void *ptr, size_t n, size_t size, gfp_t flags)
200 {
201 	if (WOULD_OVERFLOW(n, size))
202 		return NULL;
203 
204 	return (krealloc(ptr, n * size, flags));
205 }
206 
207 /*
208  * vmalloc/kvalloc functions.
209  */
210 static inline void *
__vmalloc(size_t size,gfp_t flags,int other)211 __vmalloc(size_t size, gfp_t flags, int other)
212 {
213 	return (malloc(size, M_KMALLOC, linux_check_m_flags(flags)));
214 }
215 
216 static inline void *
__vmalloc_node(size_t size,gfp_t flags,int node)217 __vmalloc_node(size_t size, gfp_t flags, int node)
218 {
219 	return (malloc_domainset(size, M_KMALLOC,
220 	    linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
221 }
222 
223 static inline void *
vmalloc_32(size_t size)224 vmalloc_32(size_t size)
225 {
226 	return (contigmalloc(size, M_KMALLOC, M_WAITOK, 0, UINT_MAX, 1, 1));
227 }
228 
229 /* May return non-contiguous memory. */
230 static inline void *
kvmalloc(size_t size,gfp_t flags)231 kvmalloc(size_t size, gfp_t flags)
232 {
233 	return (lkpi_kvmalloc(size, flags));
234 }
235 
236 static inline void *
kvmalloc_array(size_t n,size_t size,gfp_t flags)237 kvmalloc_array(size_t n, size_t size, gfp_t flags)
238 {
239 	if (WOULD_OVERFLOW(n, size))
240 		panic("%s: %zu * %zu overflowed", __func__, n, size);
241 
242 	return (kvmalloc(size * n, flags));
243 }
244 
245 static inline void *
kvrealloc(const void * ptr,size_t oldsize,size_t newsize,gfp_t flags)246 kvrealloc(const void *ptr, size_t oldsize, size_t newsize, gfp_t flags)
247 {
248 	void *newptr;
249 
250 	if (newsize <= oldsize)
251 		return (__DECONST(void *, ptr));
252 
253 	newptr = kvmalloc(newsize, flags);
254 	if (newptr != NULL) {
255 		memcpy(newptr, ptr, oldsize);
256 		kvfree(ptr);
257 	}
258 
259 	return (newptr);
260 }
261 
262 /*
263  * Misc.
264  */
265 
266 static __inline void
kfree_sensitive(const void * ptr)267 kfree_sensitive(const void *ptr)
268 {
269 	if (ZERO_OR_NULL_PTR(ptr))
270 		return;
271 
272 	zfree(__DECONST(void *, ptr), M_KMALLOC);
273 }
274 
275 static inline size_t
ksize(const void * ptr)276 ksize(const void *ptr)
277 {
278 	return (malloc_usable_size(ptr));
279 }
280 
281 static inline size_t
kmalloc_size_roundup(size_t size)282 kmalloc_size_roundup(size_t size)
283 {
284 	if (unlikely(size == 0 || size == SIZE_MAX))
285 		return (size);
286 	return (malloc_size(size));
287 }
288 
289 #endif					/* _LINUXKPI_LINUX_SLAB_H_ */
290