xref: /freebsd/sys/compat/linuxkpi/common/include/linux/slab.h (revision 77013d11e6483b970af25e13c9b892075742f7e5)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2021 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #ifndef	_LINUX_SLAB_H_
32 #define	_LINUX_SLAB_H_
33 
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/limits.h>
37 
38 #include <linux/compat.h>
39 #include <linux/types.h>
40 #include <linux/gfp.h>
41 #include <linux/llist.h>
42 
43 MALLOC_DECLARE(M_KMALLOC);
44 
45 #define	kvmalloc(size, flags)		kmalloc(size, flags)
46 #define	kvzalloc(size, flags)		kmalloc(size, (flags) | __GFP_ZERO)
47 #define	kvcalloc(n, size, flags)	kvmalloc_array(n, size, (flags) | __GFP_ZERO)
48 #define	kzalloc(size, flags)		kmalloc(size, (flags) | __GFP_ZERO)
49 #define	kzalloc_node(size, flags, node)	kmalloc_node(size, (flags) | __GFP_ZERO, node)
50 #define	kfree_const(ptr)		kfree(ptr)
51 #define	vzalloc(size)			__vmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0)
52 #define	vfree(arg)			kfree(arg)
53 #define	kvfree(arg)			kfree(arg)
54 #define	vmalloc_node(size, node)	__vmalloc_node(size, GFP_KERNEL, node)
55 #define	vmalloc_user(size)		__vmalloc(size, GFP_KERNEL | __GFP_ZERO, 0)
56 #define	vmalloc(size)			__vmalloc(size, GFP_KERNEL, 0)
57 #define	__kmalloc(...)			kmalloc(__VA_ARGS__)
58 
59 /*
60  * Prefix some functions with linux_ to avoid namespace conflict
61  * with the OpenSolaris code in the kernel.
62  */
63 #define	kmem_cache		linux_kmem_cache
64 #define	kmem_cache_create(...)	linux_kmem_cache_create(__VA_ARGS__)
65 #define	kmem_cache_alloc(...)	lkpi_kmem_cache_alloc(__VA_ARGS__)
66 #define	kmem_cache_zalloc(...)	lkpi_kmem_cache_zalloc(__VA_ARGS__)
67 #define	kmem_cache_free(...)	lkpi_kmem_cache_free(__VA_ARGS__)
68 #define	kmem_cache_destroy(...) linux_kmem_cache_destroy(__VA_ARGS__)
69 
70 #define	KMEM_CACHE(__struct, flags)					\
71 	linux_kmem_cache_create(#__struct, sizeof(struct __struct),	\
72 	__alignof(struct __struct), (flags), NULL)
73 
74 typedef void linux_kmem_ctor_t (void *);
75 
76 struct linux_kmem_cache;
77 
78 #define	SLAB_HWCACHE_ALIGN	(1 << 0)
79 #define	SLAB_TYPESAFE_BY_RCU	(1 << 1)
80 #define	SLAB_RECLAIM_ACCOUNT	(1 << 2)
81 
82 #define	SLAB_DESTROY_BY_RCU \
83 	SLAB_TYPESAFE_BY_RCU
84 
85 #define	ARCH_KMALLOC_MINALIGN \
86 	__alignof(unsigned long long)
87 
88 /*
89  * Critical section-friendly version of kfree().
90  * Requires knowledge of the allocation size at build time.
91  */
92 #define kfree_async(ptr)	do {					\
93 	_Static_assert(sizeof(*(ptr)) >= sizeof(struct llist_node),	\
94 	    "Size of object to free is unknown or too small");		\
95 	if (curthread->td_critnest != 0)				\
96 		linux_kfree_async(ptr);					\
97 	else								\
98 		kfree(ptr);						\
99 } while (0)
100 
101 static inline gfp_t
102 linux_check_m_flags(gfp_t flags)
103 {
104 	const gfp_t m = M_NOWAIT | M_WAITOK;
105 
106 	/* make sure either M_NOWAIT or M_WAITOK is set */
107 	if ((flags & m) == 0)
108 		flags |= M_NOWAIT;
109 	else if ((flags & m) == m)
110 		flags &= ~M_WAITOK;
111 
112 	/* mask away LinuxKPI specific flags */
113 	return (flags & GFP_NATIVE_MASK);
114 }
115 
116 static inline void *
117 kmalloc(size_t size, gfp_t flags)
118 {
119 	return (malloc(size, M_KMALLOC, linux_check_m_flags(flags)));
120 }
121 
122 static inline void *
123 kmalloc_node(size_t size, gfp_t flags, int node)
124 {
125 	return (malloc_domainset(size, M_KMALLOC,
126 	    linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
127 }
128 
129 static inline void *
130 kcalloc(size_t n, size_t size, gfp_t flags)
131 {
132 	flags |= __GFP_ZERO;
133 	return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags)));
134 }
135 
136 static inline void *
137 kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
138 {
139 	flags |= __GFP_ZERO;
140 	return (mallocarray_domainset(n, size, M_KMALLOC,
141 	    linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
142 }
143 
144 static inline void *
145 __vmalloc(size_t size, gfp_t flags, int other)
146 {
147 	return (malloc(size, M_KMALLOC, linux_check_m_flags(flags)));
148 }
149 
150 static inline void *
151 __vmalloc_node(size_t size, gfp_t flags, int node)
152 {
153 	return (malloc_domainset(size, M_KMALLOC,
154 	    linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
155 }
156 
157 static inline void *
158 vmalloc_32(size_t size)
159 {
160 	return (contigmalloc(size, M_KMALLOC, M_WAITOK, 0, UINT_MAX, 1, 1));
161 }
162 
163 static inline void *
164 kmalloc_array(size_t n, size_t size, gfp_t flags)
165 {
166 	return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags)));
167 }
168 
169 static inline void *
170 kmalloc_array_node(size_t n, size_t size, gfp_t flags, int node)
171 {
172 	return (mallocarray_domainset(n, size, M_KMALLOC,
173 	    linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
174 }
175 
176 static inline void *
177 kvmalloc_array(size_t n, size_t size, gfp_t flags)
178 {
179 	return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags)));
180 }
181 
182 static inline void *
183 krealloc(void *ptr, size_t size, gfp_t flags)
184 {
185 	return (realloc(ptr, size, M_KMALLOC, linux_check_m_flags(flags)));
186 }
187 
188 static inline void
189 kfree(const void *ptr)
190 {
191 	free(__DECONST(void *, ptr), M_KMALLOC);
192 }
193 
194 static __inline void
195 kfree_sensitive(const void *ptr)
196 {
197 	zfree(__DECONST(void *, ptr), M_KMALLOC);
198 }
199 
200 static inline size_t
201 ksize(const void *ptr)
202 {
203 	return (malloc_usable_size(ptr));
204 }
205 
206 extern struct linux_kmem_cache *linux_kmem_cache_create(const char *name,
207     size_t size, size_t align, unsigned flags, linux_kmem_ctor_t *ctor);
208 extern void *lkpi_kmem_cache_alloc(struct linux_kmem_cache *, gfp_t);
209 extern void *lkpi_kmem_cache_zalloc(struct linux_kmem_cache *, gfp_t);
210 extern void lkpi_kmem_cache_free(struct linux_kmem_cache *, void *);
211 extern void linux_kmem_cache_destroy(struct linux_kmem_cache *);
212 void linux_kfree_async(void *);
213 
214 #endif					/* _LINUX_SLAB_H_ */
215