1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 * Copyright (c) 2024-2025 The FreeBSD Foundation
8 *
9 * Portions of this software were developed by Björn Zeeb
10 * under sponsorship from the FreeBSD Foundation.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice unmodified, this list of conditions, and the following
17 * disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33 #ifndef _LINUXKPI_LINUX_SLAB_H_
34 #define _LINUXKPI_LINUX_SLAB_H_
35
36 #include <sys/types.h>
37 #include <sys/malloc.h>
38 #include <sys/limits.h>
39
40 #include <linux/compat.h>
41 #include <linux/types.h>
42 #include <linux/gfp.h>
43 #include <linux/llist.h>
44 #include <linux/overflow.h>
45
46 MALLOC_DECLARE(M_KMALLOC);
47
48 #define kvzalloc(size, flags) kmalloc(size, (flags) | __GFP_ZERO)
49 #define kvcalloc(n, size, flags) kvmalloc_array(n, size, (flags) | __GFP_ZERO)
50 #define kzalloc(size, flags) kmalloc(size, (flags) | __GFP_ZERO)
51 #define kzalloc_node(size, flags, node) kmalloc_node(size, (flags) | __GFP_ZERO, node)
52 #define kfree_const(ptr) kfree(ptr)
53 #define kfree_async(ptr) kfree(ptr) /* drm-kmod 5.4 compat */
54 #define vzalloc(size) __vmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0)
55 #define vfree(arg) kfree(arg)
56 #define kvfree(arg) kfree(arg)
57 #define vmalloc_node(size, node) __vmalloc_node(size, GFP_KERNEL, node)
58 #define vmalloc_user(size) __vmalloc(size, GFP_KERNEL | __GFP_ZERO, 0)
59 #define vmalloc(size) __vmalloc(size, GFP_KERNEL, 0)
60
61 /*
62 * Prefix some functions with linux_ to avoid namespace conflict
63 * with the OpenSolaris code in the kernel.
64 */
65 #define kmem_cache linux_kmem_cache
66 #define kmem_cache_create(...) linux_kmem_cache_create(__VA_ARGS__)
67 #define kmem_cache_alloc(...) lkpi_kmem_cache_alloc(__VA_ARGS__)
68 #define kmem_cache_zalloc(...) lkpi_kmem_cache_zalloc(__VA_ARGS__)
69 #define kmem_cache_free(...) lkpi_kmem_cache_free(__VA_ARGS__)
70 #define kmem_cache_destroy(...) linux_kmem_cache_destroy(__VA_ARGS__)
71 #define kmem_cache_shrink(x) (0)
72
73 #define KMEM_CACHE(__struct, flags) \
74 linux_kmem_cache_create(#__struct, sizeof(struct __struct), \
75 __alignof(struct __struct), (flags), NULL)
76
77 typedef void linux_kmem_ctor_t (void *);
78
79 struct linux_kmem_cache;
80
81 #define SLAB_HWCACHE_ALIGN (1 << 0)
82 #define SLAB_TYPESAFE_BY_RCU (1 << 1)
83 #define SLAB_RECLAIM_ACCOUNT (1 << 2)
84
85 #define SLAB_DESTROY_BY_RCU \
86 SLAB_TYPESAFE_BY_RCU
87
88 #define ARCH_KMALLOC_MINALIGN \
89 __alignof(unsigned long long)
90
91 #define ZERO_SIZE_PTR ((void *)16)
92 #define ZERO_OR_NULL_PTR(x) ((x) == NULL || (x) == ZERO_SIZE_PTR)
93
94 struct linux_kmem_cache *linux_kmem_cache_create(const char *name,
95 size_t size, size_t align, unsigned flags, linux_kmem_ctor_t *ctor);
96 void *lkpi_kmem_cache_alloc(struct linux_kmem_cache *, gfp_t);
97 void *lkpi_kmem_cache_zalloc(struct linux_kmem_cache *, gfp_t);
98 void lkpi_kmem_cache_free(struct linux_kmem_cache *, void *);
99 void linux_kmem_cache_destroy(struct linux_kmem_cache *);
100
101 void *lkpi_kmalloc(size_t, gfp_t);
102 void *lkpi___kmalloc(size_t, gfp_t);
103 void *lkpi___kmalloc_node(size_t, gfp_t, int);
104 void *lkpi_krealloc(void *, size_t, gfp_t);
105 void lkpi_kfree(const void *);
106
107 static inline gfp_t
linux_check_m_flags(gfp_t flags)108 linux_check_m_flags(gfp_t flags)
109 {
110 const gfp_t m = M_NOWAIT | M_WAITOK;
111
112 /* make sure either M_NOWAIT or M_WAITOK is set */
113 if ((flags & m) == 0)
114 flags |= M_NOWAIT;
115 else if ((flags & m) == m)
116 flags &= ~M_WAITOK;
117
118 /* mask away LinuxKPI specific flags */
119 return (flags & GFP_NATIVE_MASK);
120 }
121
122 /*
123 * Base functions with a native implementation.
124 */
125 static inline void *
kmalloc(size_t size,gfp_t flags)126 kmalloc(size_t size, gfp_t flags)
127 {
128 return (lkpi_kmalloc(size, flags));
129 }
130
131 static inline void *
__kmalloc(size_t size,gfp_t flags)132 __kmalloc(size_t size, gfp_t flags)
133 {
134 return (lkpi___kmalloc(size, flags));
135 }
136
137 static inline void *
kmalloc_node(size_t size,gfp_t flags,int node)138 kmalloc_node(size_t size, gfp_t flags, int node)
139 {
140 return (lkpi___kmalloc_node(size, flags, node));
141 }
142
143 static inline void *
krealloc(void * ptr,size_t size,gfp_t flags)144 krealloc(void *ptr, size_t size, gfp_t flags)
145 {
146 return (lkpi_krealloc(ptr, size, flags));
147 }
148
149 static inline void
kfree(const void * ptr)150 kfree(const void *ptr)
151 {
152 lkpi_kfree(ptr);
153 }
154
155 /*
156 * Other k*alloc() funtions using the above as underlying allocator.
157 */
158 /* kmalloc */
159 static inline void *
kmalloc_array(size_t n,size_t size,gfp_t flags)160 kmalloc_array(size_t n, size_t size, gfp_t flags)
161 {
162 if (WOULD_OVERFLOW(n, size))
163 panic("%s: %zu * %zu overflowed", __func__, n, size);
164
165 return (kmalloc(size * n, flags));
166 }
167
168 static inline void *
kcalloc(size_t n,size_t size,gfp_t flags)169 kcalloc(size_t n, size_t size, gfp_t flags)
170 {
171 flags |= __GFP_ZERO;
172 return (kmalloc_array(n, size, flags));
173 }
174
175 /* kmalloc_node */
176 static inline void *
kmalloc_array_node(size_t n,size_t size,gfp_t flags,int node)177 kmalloc_array_node(size_t n, size_t size, gfp_t flags, int node)
178 {
179 if (WOULD_OVERFLOW(n, size))
180 panic("%s: %zu * %zu overflowed", __func__, n, size);
181
182 return (kmalloc_node(size * n, flags, node));
183 }
184
185 static inline void *
kcalloc_node(size_t n,size_t size,gfp_t flags,int node)186 kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
187 {
188 flags |= __GFP_ZERO;
189 return (kmalloc_array_node(n, size, flags, node));
190 }
191
192 /* krealloc */
193 static inline void *
krealloc_array(void * ptr,size_t n,size_t size,gfp_t flags)194 krealloc_array(void *ptr, size_t n, size_t size, gfp_t flags)
195 {
196 if (WOULD_OVERFLOW(n, size))
197 return NULL;
198
199 return (krealloc(ptr, n * size, flags));
200 }
201
202 /*
203 * vmalloc/kvalloc functions.
204 */
205 static inline void *
__vmalloc(size_t size,gfp_t flags,int other)206 __vmalloc(size_t size, gfp_t flags, int other)
207 {
208 return (malloc(size, M_KMALLOC, linux_check_m_flags(flags)));
209 }
210
211 static inline void *
__vmalloc_node(size_t size,gfp_t flags,int node)212 __vmalloc_node(size_t size, gfp_t flags, int node)
213 {
214 return (malloc_domainset(size, M_KMALLOC,
215 linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
216 }
217
218 static inline void *
vmalloc_32(size_t size)219 vmalloc_32(size_t size)
220 {
221 return (contigmalloc(size, M_KMALLOC, M_WAITOK, 0, UINT_MAX, 1, 1));
222 }
223
224 /* May return non-contiguous memory. */
225 static inline void *
kvmalloc(size_t size,gfp_t flags)226 kvmalloc(size_t size, gfp_t flags)
227 {
228 return (malloc(size, M_KMALLOC, linux_check_m_flags(flags)));
229 }
230
231 static inline void *
kvmalloc_array(size_t n,size_t size,gfp_t flags)232 kvmalloc_array(size_t n, size_t size, gfp_t flags)
233 {
234 if (WOULD_OVERFLOW(n, size))
235 panic("%s: %zu * %zu overflowed", __func__, n, size);
236
237 return (kvmalloc(size * n, flags));
238 }
239
240 static inline void *
kvrealloc(const void * ptr,size_t oldsize,size_t newsize,gfp_t flags)241 kvrealloc(const void *ptr, size_t oldsize, size_t newsize, gfp_t flags)
242 {
243 void *newptr;
244
245 if (newsize <= oldsize)
246 return (__DECONST(void *, ptr));
247
248 newptr = kvmalloc(newsize, flags);
249 if (newptr != NULL) {
250 memcpy(newptr, ptr, oldsize);
251 kvfree(ptr);
252 }
253
254 return (newptr);
255 }
256
257 /*
258 * Misc.
259 */
260
261 static __inline void
kfree_sensitive(const void * ptr)262 kfree_sensitive(const void *ptr)
263 {
264 if (ZERO_OR_NULL_PTR(ptr))
265 return;
266
267 zfree(__DECONST(void *, ptr), M_KMALLOC);
268 }
269
270 static inline size_t
ksize(const void * ptr)271 ksize(const void *ptr)
272 {
273 return (malloc_usable_size(ptr));
274 }
275
276 static inline size_t
kmalloc_size_roundup(size_t size)277 kmalloc_size_roundup(size_t size)
278 {
279 if (unlikely(size == 0 || size == SIZE_MAX))
280 return (size);
281 return (malloc_size(size));
282 }
283
284 #endif /* _LINUXKPI_LINUX_SLAB_H_ */
285