xref: /freebsd/sys/compat/linuxkpi/common/include/linux/slab.h (revision 6be3386466ab79a84b48429ae66244f21526d3df)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2021 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #ifndef	_LINUX_SLAB_H_
32 #define	_LINUX_SLAB_H_
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/limits.h>
38 #include <sys/proc.h>
39 #include <vm/uma.h>
40 
41 #include <linux/compat.h>
42 #include <linux/types.h>
43 #include <linux/gfp.h>
44 #include <linux/llist.h>
45 
46 MALLOC_DECLARE(M_KMALLOC);
47 
48 #define	kvmalloc(size, flags)		kmalloc(size, flags)
49 #define	kvzalloc(size, flags)		kmalloc(size, (flags) | __GFP_ZERO)
50 #define	kvcalloc(n, size, flags)	kvmalloc_array(n, size, (flags) | __GFP_ZERO)
51 #define	kzalloc(size, flags)		kmalloc(size, (flags) | __GFP_ZERO)
52 #define	kzalloc_node(size, flags, node)	kmalloc_node(size, (flags) | __GFP_ZERO, node)
53 #define	kfree_const(ptr)		kfree(ptr)
54 #define	vzalloc(size)			__vmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0)
55 #define	vfree(arg)			kfree(arg)
56 #define	kvfree(arg)			kfree(arg)
57 #define	vmalloc_node(size, node)	__vmalloc_node(size, GFP_KERNEL, node)
58 #define	vmalloc_user(size)		__vmalloc(size, GFP_KERNEL | __GFP_ZERO, 0)
59 #define	vmalloc(size)			__vmalloc(size, GFP_KERNEL, 0)
60 #define	__kmalloc(...)			kmalloc(__VA_ARGS__)
61 
62 /*
63  * Prefix some functions with linux_ to avoid namespace conflict
64  * with the OpenSolaris code in the kernel.
65  */
66 #define	kmem_cache		linux_kmem_cache
67 #define	kmem_cache_create(...)	linux_kmem_cache_create(__VA_ARGS__)
68 #define	kmem_cache_alloc(...)	linux_kmem_cache_alloc(__VA_ARGS__)
69 #define	kmem_cache_free(...)	linux_kmem_cache_free(__VA_ARGS__)
70 #define	kmem_cache_destroy(...) linux_kmem_cache_destroy(__VA_ARGS__)
71 
72 #define	KMEM_CACHE(__struct, flags)					\
73 	linux_kmem_cache_create(#__struct, sizeof(struct __struct),	\
74 	__alignof(struct __struct), (flags), NULL)
75 
76 typedef void linux_kmem_ctor_t (void *);
77 
78 struct linux_kmem_cache {
79 	uma_zone_t cache_zone;
80 	linux_kmem_ctor_t *cache_ctor;
81 	unsigned cache_flags;
82 	unsigned cache_size;
83 };
84 
85 #define	SLAB_HWCACHE_ALIGN	(1 << 0)
86 #define	SLAB_TYPESAFE_BY_RCU	(1 << 1)
87 #define	SLAB_RECLAIM_ACCOUNT	(1 << 2)
88 
89 #define	SLAB_DESTROY_BY_RCU \
90 	SLAB_TYPESAFE_BY_RCU
91 
92 #define	ARCH_KMALLOC_MINALIGN \
93 	__alignof(unsigned long long)
94 
95 /*
96  * Critical section-friendly version of kfree().
97  * Requires knowledge of the allocation size at build time.
98  */
99 #define kfree_async(ptr)	do {					\
100 	_Static_assert(sizeof(*(ptr)) >= sizeof(struct llist_node),	\
101 	    "Size of object to free is unknown or too small");		\
102 	if (curthread->td_critnest != 0)				\
103 		linux_kfree_async(ptr);					\
104 	else								\
105 		kfree(ptr);						\
106 } while (0)
107 
108 static inline gfp_t
109 linux_check_m_flags(gfp_t flags)
110 {
111 	const gfp_t m = M_NOWAIT | M_WAITOK;
112 
113 	/* make sure either M_NOWAIT or M_WAITOK is set */
114 	if ((flags & m) == 0)
115 		flags |= M_NOWAIT;
116 	else if ((flags & m) == m)
117 		flags &= ~M_WAITOK;
118 
119 	/* mask away LinuxKPI specific flags */
120 	return (flags & GFP_NATIVE_MASK);
121 }
122 
123 static inline void *
124 kmalloc(size_t size, gfp_t flags)
125 {
126 	return (malloc(size, M_KMALLOC, linux_check_m_flags(flags)));
127 }
128 
129 static inline void *
130 kmalloc_node(size_t size, gfp_t flags, int node)
131 {
132 	return (malloc_domainset(size, M_KMALLOC,
133 	    linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
134 }
135 
136 static inline void *
137 kcalloc(size_t n, size_t size, gfp_t flags)
138 {
139 	flags |= __GFP_ZERO;
140 	return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags)));
141 }
142 
143 static inline void *
144 kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
145 {
146 	flags |= __GFP_ZERO;
147 	return (mallocarray_domainset(n, size, M_KMALLOC,
148 	    linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
149 }
150 
151 static inline void *
152 __vmalloc(size_t size, gfp_t flags, int other)
153 {
154 	return (malloc(size, M_KMALLOC, linux_check_m_flags(flags)));
155 }
156 
157 static inline void *
158 __vmalloc_node(size_t size, gfp_t flags, int node)
159 {
160 	return (malloc_domainset(size, M_KMALLOC,
161 	    linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
162 }
163 
164 static inline void *
165 vmalloc_32(size_t size)
166 {
167 	return (contigmalloc(size, M_KMALLOC, M_WAITOK, 0, UINT_MAX, 1, 1));
168 }
169 
170 static inline void *
171 kmalloc_array(size_t n, size_t size, gfp_t flags)
172 {
173 	return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags)));
174 }
175 
176 static inline void *
177 kmalloc_array_node(size_t n, size_t size, gfp_t flags, int node)
178 {
179 	return (mallocarray_domainset(n, size, M_KMALLOC,
180 	    linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
181 }
182 
183 static inline void *
184 kvmalloc_array(size_t n, size_t size, gfp_t flags)
185 {
186 	return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags)));
187 }
188 
189 static inline void *
190 krealloc(void *ptr, size_t size, gfp_t flags)
191 {
192 	return (realloc(ptr, size, M_KMALLOC, linux_check_m_flags(flags)));
193 }
194 
195 static inline void
196 kfree(const void *ptr)
197 {
198 	free(__DECONST(void *, ptr), M_KMALLOC);
199 }
200 
201 static inline size_t
202 ksize(const void *ptr)
203 {
204 	return (malloc_usable_size(ptr));
205 }
206 
207 extern struct linux_kmem_cache *linux_kmem_cache_create(const char *name,
208     size_t size, size_t align, unsigned flags, linux_kmem_ctor_t *ctor);
209 
210 static inline void *
211 linux_kmem_cache_alloc(struct linux_kmem_cache *c, gfp_t flags)
212 {
213 	return (uma_zalloc_arg(c->cache_zone, c,
214 	    linux_check_m_flags(flags)));
215 }
216 
217 static inline void *
218 kmem_cache_zalloc(struct linux_kmem_cache *c, gfp_t flags)
219 {
220 	return (uma_zalloc_arg(c->cache_zone, c,
221 	    linux_check_m_flags(flags | M_ZERO)));
222 }
223 
224 extern void linux_kmem_cache_free_rcu(struct linux_kmem_cache *, void *);
225 
226 static inline void
227 linux_kmem_cache_free(struct linux_kmem_cache *c, void *m)
228 {
229 	if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU))
230 		linux_kmem_cache_free_rcu(c, m);
231 	else
232 		uma_zfree(c->cache_zone, m);
233 }
234 
235 extern void linux_kmem_cache_destroy(struct linux_kmem_cache *);
236 void linux_kfree_async(void *);
237 
238 #endif					/* _LINUX_SLAB_H_ */
239