1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * Copyright (c) 2024-2026 The FreeBSD Foundation 8 * 9 * Portions of this software were developed by Björn Zeeb 10 * under sponsorship from the FreeBSD Foundation. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice unmodified, this list of conditions, and the following 17 * disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 #ifndef _LINUXKPI_LINUX_SLAB_H_ 34 #define _LINUXKPI_LINUX_SLAB_H_ 35 36 #include <sys/types.h> 37 #include <sys/malloc.h> 38 #include <sys/limits.h> 39 40 #include <linux/compat.h> 41 #include <linux/types.h> 42 #include <linux/gfp.h> 43 #include <linux/err.h> 44 #include <linux/llist.h> 45 #include <linux/overflow.h> 46 #include <linux/cleanup.h> 47 48 MALLOC_DECLARE(M_KMALLOC); 49 50 #define kvzalloc(size, flags) kvmalloc(size, (flags) | __GFP_ZERO) 51 #define kvcalloc(n, size, flags) kvmalloc_array(n, size, (flags) | __GFP_ZERO) 52 #define kzalloc(size, flags) kmalloc(size, (flags) | __GFP_ZERO) 53 #define kzalloc_node(size, flags, node) kmalloc_node(size, (flags) | __GFP_ZERO, node) 54 #define kzalloc_obj(_p, ...) \ 55 kzalloc(sizeof(typeof(_p)), default_gfp(__VA_ARGS__)) 56 #define kzalloc_objs(_p, _n, ...) \ 57 kzalloc(size_mul((_n), sizeof(typeof(_p))), default_gfp(__VA_ARGS__)) 58 #define kzalloc_flex(_p, _field, _n, ...) \ 59 ({ \ 60 const size_t __n = (_n); \ 61 const size_t __psize = struct_size_t(typeof(_p), _field, __n); \ 62 typeof(_p) *__p_obj; \ 63 \ 64 __p_obj = kzalloc(__psize, default_gfp(__VA_ARGS__)); \ 65 if (__p_obj != NULL) \ 66 __set_flex_counter(__p_obj->_field, __n); \ 67 \ 68 __p_obj; \ 69 }) 70 #define kfree_const(ptr) kfree(ptr) 71 #define kfree_async(ptr) kfree(ptr) /* drm-kmod 5.4 compat */ 72 #define vzalloc(size) __vmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0) 73 #define vfree(arg) kfree(arg) 74 #define kvfree(arg) kfree(arg) 75 #define vmalloc_node(size, node) __vmalloc_node(size, GFP_KERNEL, node) 76 #define vmalloc_user(size) __vmalloc(size, GFP_KERNEL | __GFP_ZERO, 0) 77 #define vmalloc(size) __vmalloc(size, GFP_KERNEL, 0) 78 79 /* 80 * Prefix some functions with linux_ to avoid namespace conflict 81 * with the OpenSolaris code in the kernel. 82 */ 83 #define kmem_cache linux_kmem_cache 84 #define kmem_cache_create(...) linux_kmem_cache_create(__VA_ARGS__) 85 #define kmem_cache_alloc(...) lkpi_kmem_cache_alloc(__VA_ARGS__) 86 #define kmem_cache_zalloc(...) lkpi_kmem_cache_zalloc(__VA_ARGS__) 87 #define kmem_cache_free(...) lkpi_kmem_cache_free(__VA_ARGS__) 88 #define kmem_cache_destroy(...) linux_kmem_cache_destroy(__VA_ARGS__) 89 #define kmem_cache_shrink(x) (0) 90 91 #define KMEM_CACHE(__struct, flags) \ 92 linux_kmem_cache_create(#__struct, sizeof(struct __struct), \ 93 __alignof(struct __struct), (flags), NULL) 94 95 typedef void linux_kmem_ctor_t (void *); 96 97 struct linux_kmem_cache; 98 99 #define SLAB_HWCACHE_ALIGN (1 << 0) 100 #define SLAB_TYPESAFE_BY_RCU (1 << 1) 101 #define SLAB_RECLAIM_ACCOUNT (1 << 2) 102 103 #define SLAB_DESTROY_BY_RCU \ 104 SLAB_TYPESAFE_BY_RCU 105 106 #define ARCH_KMALLOC_MINALIGN \ 107 __alignof(unsigned long long) 108 109 #define ZERO_SIZE_PTR ((void *)16) 110 #define ZERO_OR_NULL_PTR(x) ((x) == NULL || (x) == ZERO_SIZE_PTR) 111 112 struct linux_kmem_cache *linux_kmem_cache_create(const char *name, 113 size_t size, size_t align, unsigned flags, linux_kmem_ctor_t *ctor); 114 void *lkpi_kmem_cache_alloc(struct linux_kmem_cache *, gfp_t); 115 void *lkpi_kmem_cache_zalloc(struct linux_kmem_cache *, gfp_t); 116 void lkpi_kmem_cache_free(struct linux_kmem_cache *, void *); 117 void linux_kmem_cache_destroy(struct linux_kmem_cache *); 118 119 void *lkpi_kmalloc(size_t, gfp_t); 120 void *lkpi_kvmalloc(size_t, gfp_t); 121 void *lkpi___kmalloc(size_t, gfp_t); 122 void *lkpi___kmalloc_node(size_t, gfp_t, int); 123 void *lkpi_krealloc(void *, size_t, gfp_t); 124 void lkpi_kfree(const void *); 125 126 static inline gfp_t 127 linux_check_m_flags(gfp_t flags) 128 { 129 const gfp_t m = M_NOWAIT | M_WAITOK; 130 131 /* make sure either M_NOWAIT or M_WAITOK is set */ 132 if ((flags & m) == 0) 133 flags |= M_NOWAIT; 134 else if ((flags & m) == m) 135 flags &= ~M_WAITOK; 136 137 /* mask away LinuxKPI specific flags */ 138 return (flags & GFP_NATIVE_MASK); 139 } 140 141 /* 142 * Base functions with a native implementation. 143 */ 144 static inline void * 145 kmalloc(size_t size, gfp_t flags) 146 { 147 return (lkpi_kmalloc(size, flags)); 148 } 149 150 static inline void * 151 __kmalloc(size_t size, gfp_t flags) 152 { 153 return (lkpi___kmalloc(size, flags)); 154 } 155 156 static inline void * 157 kmalloc_node(size_t size, gfp_t flags, int node) 158 { 159 return (lkpi___kmalloc_node(size, flags, node)); 160 } 161 162 #define kmalloc_obj(_p, ...) \ 163 kmalloc(sizeof(typeof(_p)), default_gfp(__VA_ARGS__)) 164 165 #define kmalloc_objs(_p, _n, ...) \ 166 kmalloc(size_mul((_n) * sizeof(typeof(_p))), default_gfp(__VA_ARGS__)) 167 168 static inline void * 169 krealloc(void *ptr, size_t size, gfp_t flags) 170 { 171 return (lkpi_krealloc(ptr, size, flags)); 172 } 173 174 static inline void 175 kfree(const void *ptr) 176 { 177 lkpi_kfree(ptr); 178 } 179 180 DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T)) 181 182 /* 183 * Other k*alloc() funtions using the above as underlying allocator. 184 */ 185 /* kmalloc */ 186 static inline void * 187 kmalloc_array(size_t n, size_t size, gfp_t flags) 188 { 189 if (WOULD_OVERFLOW(n, size)) 190 panic("%s: %zu * %zu overflowed", __func__, n, size); 191 192 return (kmalloc(size * n, flags)); 193 } 194 195 static inline void * 196 kcalloc(size_t n, size_t size, gfp_t flags) 197 { 198 flags |= __GFP_ZERO; 199 return (kmalloc_array(n, size, flags)); 200 } 201 202 /* kmalloc_node */ 203 static inline void * 204 kmalloc_array_node(size_t n, size_t size, gfp_t flags, int node) 205 { 206 if (WOULD_OVERFLOW(n, size)) 207 panic("%s: %zu * %zu overflowed", __func__, n, size); 208 209 return (kmalloc_node(size * n, flags, node)); 210 } 211 212 static inline void * 213 kcalloc_node(size_t n, size_t size, gfp_t flags, int node) 214 { 215 flags |= __GFP_ZERO; 216 return (kmalloc_array_node(n, size, flags, node)); 217 } 218 219 /* krealloc */ 220 static inline void * 221 krealloc_array(void *ptr, size_t n, size_t size, gfp_t flags) 222 { 223 if (WOULD_OVERFLOW(n, size)) 224 return NULL; 225 226 return (krealloc(ptr, n * size, flags)); 227 } 228 229 /* 230 * vmalloc/kvalloc functions. 231 */ 232 static inline void * 233 __vmalloc(size_t size, gfp_t flags, int other) 234 { 235 return (malloc(size, M_KMALLOC, linux_check_m_flags(flags))); 236 } 237 238 static inline void * 239 __vmalloc_node(size_t size, gfp_t flags, int node) 240 { 241 return (malloc_domainset(size, M_KMALLOC, 242 linux_get_vm_domain_set(node), linux_check_m_flags(flags))); 243 } 244 245 static inline void * 246 vmalloc_32(size_t size) 247 { 248 return (contigmalloc(size, M_KMALLOC, M_WAITOK, 0, UINT_MAX, 1, 1)); 249 } 250 251 /* May return non-contiguous memory. */ 252 static inline void * 253 kvmalloc(size_t size, gfp_t flags) 254 { 255 return (lkpi_kvmalloc(size, flags)); 256 } 257 258 static inline void * 259 kvmalloc_array(size_t n, size_t size, gfp_t flags) 260 { 261 if (WOULD_OVERFLOW(n, size)) 262 panic("%s: %zu * %zu overflowed", __func__, n, size); 263 264 return (kvmalloc(size * n, flags)); 265 } 266 267 static inline void * 268 kvrealloc(const void *ptr, size_t oldsize, size_t newsize, gfp_t flags) 269 { 270 void *newptr; 271 272 if (newsize <= oldsize) 273 return (__DECONST(void *, ptr)); 274 275 newptr = kvmalloc(newsize, flags); 276 if (newptr != NULL) { 277 memcpy(newptr, ptr, oldsize); 278 kvfree(ptr); 279 } 280 281 return (newptr); 282 } 283 284 /* 285 * Misc. 286 */ 287 288 static __inline void 289 kfree_sensitive(const void *ptr) 290 { 291 if (ZERO_OR_NULL_PTR(ptr)) 292 return; 293 294 zfree(__DECONST(void *, ptr), M_KMALLOC); 295 } 296 297 static inline size_t 298 ksize(const void *ptr) 299 { 300 return (malloc_usable_size(ptr)); 301 } 302 303 static inline size_t 304 kmalloc_size_roundup(size_t size) 305 { 306 if (unlikely(size == 0 || size == SIZE_MAX)) 307 return (size); 308 return (malloc_size(size)); 309 } 310 311 #endif /* _LINUXKPI_LINUX_SLAB_H_ */ 312