114c5024dSHans Petter Selasky /*-
214c5024dSHans Petter Selasky * Copyright (c) 2017 Mellanox Technologies, Ltd.
314c5024dSHans Petter Selasky * All rights reserved.
4*a3e6f97bSBjoern A. Zeeb * Copyright (c) 2024-2025 The FreeBSD Foundation
5*a3e6f97bSBjoern A. Zeeb *
6*a3e6f97bSBjoern A. Zeeb * Portions of this software were developed by Björn Zeeb
7*a3e6f97bSBjoern A. Zeeb * under sponsorship from the FreeBSD Foundation.
814c5024dSHans Petter Selasky *
914c5024dSHans Petter Selasky * Redistribution and use in source and binary forms, with or without
1014c5024dSHans Petter Selasky * modification, are permitted provided that the following conditions
1114c5024dSHans Petter Selasky * are met:
1214c5024dSHans Petter Selasky * 1. Redistributions of source code must retain the above copyright
1314c5024dSHans Petter Selasky * notice unmodified, this list of conditions, and the following
1414c5024dSHans Petter Selasky * disclaimer.
1514c5024dSHans Petter Selasky * 2. Redistributions in binary form must reproduce the above copyright
1614c5024dSHans Petter Selasky * notice, this list of conditions and the following disclaimer in the
1714c5024dSHans Petter Selasky * documentation and/or other materials provided with the distribution.
1814c5024dSHans Petter Selasky *
1914c5024dSHans Petter Selasky * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
2014c5024dSHans Petter Selasky * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
2114c5024dSHans Petter Selasky * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
2214c5024dSHans Petter Selasky * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
2314c5024dSHans Petter Selasky * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2414c5024dSHans Petter Selasky * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2514c5024dSHans Petter Selasky * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2614c5024dSHans Petter Selasky * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2714c5024dSHans Petter Selasky * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2814c5024dSHans Petter Selasky * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2914c5024dSHans Petter Selasky */
3014c5024dSHans Petter Selasky
3114c5024dSHans Petter Selasky #include <sys/cdefs.h>
32c0b8047bSVladimir Kondratyev #include <linux/compat.h>
3314c5024dSHans Petter Selasky #include <linux/slab.h>
3414c5024dSHans Petter Selasky #include <linux/rcupdate.h>
3514c5024dSHans Petter Selasky #include <linux/kernel.h>
36ec25b6faSVladimir Kondratyev #include <linux/irq_work.h>
37ec25b6faSVladimir Kondratyev #include <linux/llist.h>
38ec25b6faSVladimir Kondratyev
39ec25b6faSVladimir Kondratyev #include <sys/param.h>
40ec25b6faSVladimir Kondratyev #include <sys/taskqueue.h>
41a2b83b59SVladimir Kondratyev #include <vm/uma.h>
4214c5024dSHans Petter Selasky
4314c5024dSHans Petter Selasky struct linux_kmem_rcu {
4414c5024dSHans Petter Selasky struct rcu_head rcu_head;
4514c5024dSHans Petter Selasky struct linux_kmem_cache *cache;
4614c5024dSHans Petter Selasky };
4714c5024dSHans Petter Selasky
48a2b83b59SVladimir Kondratyev struct linux_kmem_cache {
49a2b83b59SVladimir Kondratyev uma_zone_t cache_zone;
50a2b83b59SVladimir Kondratyev linux_kmem_ctor_t *cache_ctor;
51a2b83b59SVladimir Kondratyev unsigned cache_flags;
52a2b83b59SVladimir Kondratyev unsigned cache_size;
53a2b83b59SVladimir Kondratyev struct llist_head cache_items;
54a2b83b59SVladimir Kondratyev struct task cache_task;
55a2b83b59SVladimir Kondratyev };
56a2b83b59SVladimir Kondratyev
5714c5024dSHans Petter Selasky #define LINUX_KMEM_TO_RCU(c, m) \
5814c5024dSHans Petter Selasky ((struct linux_kmem_rcu *)((char *)(m) + \
5914c5024dSHans Petter Selasky (c)->cache_size - sizeof(struct linux_kmem_rcu)))
6014c5024dSHans Petter Selasky
6114c5024dSHans Petter Selasky #define LINUX_RCU_TO_KMEM(r) \
6214c5024dSHans Petter Selasky ((void *)((char *)(r) + sizeof(struct linux_kmem_rcu) - \
6314c5024dSHans Petter Selasky (r)->cache->cache_size))
6414c5024dSHans Petter Selasky
65ec25b6faSVladimir Kondratyev static LLIST_HEAD(linux_kfree_async_list);
66ec25b6faSVladimir Kondratyev
67a2b83b59SVladimir Kondratyev static void lkpi_kmem_cache_free_async_fn(void *, int);
68a2b83b59SVladimir Kondratyev
69a2b83b59SVladimir Kondratyev void *
lkpi_kmem_cache_alloc(struct linux_kmem_cache * c,gfp_t flags)70a2b83b59SVladimir Kondratyev lkpi_kmem_cache_alloc(struct linux_kmem_cache *c, gfp_t flags)
71a2b83b59SVladimir Kondratyev {
72a2b83b59SVladimir Kondratyev return (uma_zalloc_arg(c->cache_zone, c,
73a2b83b59SVladimir Kondratyev linux_check_m_flags(flags)));
74a2b83b59SVladimir Kondratyev }
75a2b83b59SVladimir Kondratyev
76a2b83b59SVladimir Kondratyev void *
lkpi_kmem_cache_zalloc(struct linux_kmem_cache * c,gfp_t flags)77a2b83b59SVladimir Kondratyev lkpi_kmem_cache_zalloc(struct linux_kmem_cache *c, gfp_t flags)
78a2b83b59SVladimir Kondratyev {
79a2b83b59SVladimir Kondratyev return (uma_zalloc_arg(c->cache_zone, c,
80a2b83b59SVladimir Kondratyev linux_check_m_flags(flags | M_ZERO)));
81a2b83b59SVladimir Kondratyev }
82a2b83b59SVladimir Kondratyev
8314c5024dSHans Petter Selasky static int
linux_kmem_ctor(void * mem,int size,void * arg,int flags)8414c5024dSHans Petter Selasky linux_kmem_ctor(void *mem, int size, void *arg, int flags)
8514c5024dSHans Petter Selasky {
8614c5024dSHans Petter Selasky struct linux_kmem_cache *c = arg;
8714c5024dSHans Petter Selasky
88782a90d1SHans Petter Selasky if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
8914c5024dSHans Petter Selasky struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, mem);
9014c5024dSHans Petter Selasky
9114c5024dSHans Petter Selasky /* duplicate cache pointer */
9214c5024dSHans Petter Selasky rcu->cache = c;
9314c5024dSHans Petter Selasky }
9414c5024dSHans Petter Selasky
9514c5024dSHans Petter Selasky /* check for constructor */
9614c5024dSHans Petter Selasky if (likely(c->cache_ctor != NULL))
9714c5024dSHans Petter Selasky c->cache_ctor(mem);
9814c5024dSHans Petter Selasky
9914c5024dSHans Petter Selasky return (0);
10014c5024dSHans Petter Selasky }
10114c5024dSHans Petter Selasky
10214c5024dSHans Petter Selasky static void
linux_kmem_cache_free_rcu_callback(struct rcu_head * head)10314c5024dSHans Petter Selasky linux_kmem_cache_free_rcu_callback(struct rcu_head *head)
10414c5024dSHans Petter Selasky {
10514c5024dSHans Petter Selasky struct linux_kmem_rcu *rcu =
10614c5024dSHans Petter Selasky container_of(head, struct linux_kmem_rcu, rcu_head);
10714c5024dSHans Petter Selasky
10814c5024dSHans Petter Selasky uma_zfree(rcu->cache->cache_zone, LINUX_RCU_TO_KMEM(rcu));
10914c5024dSHans Petter Selasky }
11014c5024dSHans Petter Selasky
11114c5024dSHans Petter Selasky struct linux_kmem_cache *
linux_kmem_cache_create(const char * name,size_t size,size_t align,unsigned flags,linux_kmem_ctor_t * ctor)11214c5024dSHans Petter Selasky linux_kmem_cache_create(const char *name, size_t size, size_t align,
11314c5024dSHans Petter Selasky unsigned flags, linux_kmem_ctor_t *ctor)
11414c5024dSHans Petter Selasky {
11514c5024dSHans Petter Selasky struct linux_kmem_cache *c;
11614c5024dSHans Petter Selasky
11714c5024dSHans Petter Selasky c = malloc(sizeof(*c), M_KMALLOC, M_WAITOK);
11814c5024dSHans Petter Selasky
11914c5024dSHans Petter Selasky if (flags & SLAB_HWCACHE_ALIGN)
12014c5024dSHans Petter Selasky align = UMA_ALIGN_CACHE;
12114c5024dSHans Petter Selasky else if (align != 0)
12214c5024dSHans Petter Selasky align--;
12314c5024dSHans Petter Selasky
124782a90d1SHans Petter Selasky if (flags & SLAB_TYPESAFE_BY_RCU) {
12514c5024dSHans Petter Selasky /* make room for RCU structure */
12614c5024dSHans Petter Selasky size = ALIGN(size, sizeof(void *));
12714c5024dSHans Petter Selasky size += sizeof(struct linux_kmem_rcu);
12814c5024dSHans Petter Selasky
12914c5024dSHans Petter Selasky /* create cache_zone */
13014c5024dSHans Petter Selasky c->cache_zone = uma_zcreate(name, size,
13114c5024dSHans Petter Selasky linux_kmem_ctor, NULL, NULL, NULL,
13214c5024dSHans Petter Selasky align, UMA_ZONE_ZINIT);
13314c5024dSHans Petter Selasky } else {
134a2b83b59SVladimir Kondratyev /* make room for async task list items */
135a2b83b59SVladimir Kondratyev size = MAX(size, sizeof(struct llist_node));
136a2b83b59SVladimir Kondratyev
13714c5024dSHans Petter Selasky /* create cache_zone */
13814c5024dSHans Petter Selasky c->cache_zone = uma_zcreate(name, size,
13914c5024dSHans Petter Selasky ctor ? linux_kmem_ctor : NULL, NULL,
14014c5024dSHans Petter Selasky NULL, NULL, align, 0);
14114c5024dSHans Petter Selasky }
14214c5024dSHans Petter Selasky
14314c5024dSHans Petter Selasky c->cache_flags = flags;
14414c5024dSHans Petter Selasky c->cache_ctor = ctor;
14514c5024dSHans Petter Selasky c->cache_size = size;
146a2b83b59SVladimir Kondratyev init_llist_head(&c->cache_items);
147a2b83b59SVladimir Kondratyev TASK_INIT(&c->cache_task, 0, lkpi_kmem_cache_free_async_fn, c);
14814c5024dSHans Petter Selasky return (c);
14914c5024dSHans Petter Selasky }
15014c5024dSHans Petter Selasky
151a2b83b59SVladimir Kondratyev static inline void
lkpi_kmem_cache_free_rcu(struct linux_kmem_cache * c,void * m)152a2b83b59SVladimir Kondratyev lkpi_kmem_cache_free_rcu(struct linux_kmem_cache *c, void *m)
15314c5024dSHans Petter Selasky {
15414c5024dSHans Petter Selasky struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, m);
15514c5024dSHans Petter Selasky
15614c5024dSHans Petter Selasky call_rcu(&rcu->rcu_head, linux_kmem_cache_free_rcu_callback);
15714c5024dSHans Petter Selasky }
15814c5024dSHans Petter Selasky
159a2b83b59SVladimir Kondratyev static inline void
lkpi_kmem_cache_free_sync(struct linux_kmem_cache * c,void * m)160a2b83b59SVladimir Kondratyev lkpi_kmem_cache_free_sync(struct linux_kmem_cache *c, void *m)
161a2b83b59SVladimir Kondratyev {
162a2b83b59SVladimir Kondratyev uma_zfree(c->cache_zone, m);
163a2b83b59SVladimir Kondratyev }
164a2b83b59SVladimir Kondratyev
165a2b83b59SVladimir Kondratyev static void
lkpi_kmem_cache_free_async_fn(void * context,int pending)166a2b83b59SVladimir Kondratyev lkpi_kmem_cache_free_async_fn(void *context, int pending)
167a2b83b59SVladimir Kondratyev {
168a2b83b59SVladimir Kondratyev struct linux_kmem_cache *c = context;
169a2b83b59SVladimir Kondratyev struct llist_node *freed, *next;
170a2b83b59SVladimir Kondratyev
171a2b83b59SVladimir Kondratyev llist_for_each_safe(freed, next, llist_del_all(&c->cache_items))
172a2b83b59SVladimir Kondratyev lkpi_kmem_cache_free_sync(c, freed);
173a2b83b59SVladimir Kondratyev }
174a2b83b59SVladimir Kondratyev
175a2b83b59SVladimir Kondratyev static inline void
lkpi_kmem_cache_free_async(struct linux_kmem_cache * c,void * m)176a2b83b59SVladimir Kondratyev lkpi_kmem_cache_free_async(struct linux_kmem_cache *c, void *m)
177a2b83b59SVladimir Kondratyev {
178a2b83b59SVladimir Kondratyev if (m == NULL)
179a2b83b59SVladimir Kondratyev return;
180a2b83b59SVladimir Kondratyev
181a2b83b59SVladimir Kondratyev llist_add(m, &c->cache_items);
182a2b83b59SVladimir Kondratyev taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
183a2b83b59SVladimir Kondratyev }
184a2b83b59SVladimir Kondratyev
185a2b83b59SVladimir Kondratyev void
lkpi_kmem_cache_free(struct linux_kmem_cache * c,void * m)186a2b83b59SVladimir Kondratyev lkpi_kmem_cache_free(struct linux_kmem_cache *c, void *m)
187a2b83b59SVladimir Kondratyev {
188a2b83b59SVladimir Kondratyev if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU))
189a2b83b59SVladimir Kondratyev lkpi_kmem_cache_free_rcu(c, m);
190a2b83b59SVladimir Kondratyev else if (unlikely(curthread->td_critnest != 0))
191a2b83b59SVladimir Kondratyev lkpi_kmem_cache_free_async(c, m);
192a2b83b59SVladimir Kondratyev else
193a2b83b59SVladimir Kondratyev lkpi_kmem_cache_free_sync(c, m);
194a2b83b59SVladimir Kondratyev }
195a2b83b59SVladimir Kondratyev
19614c5024dSHans Petter Selasky void
linux_kmem_cache_destroy(struct linux_kmem_cache * c)19714c5024dSHans Petter Selasky linux_kmem_cache_destroy(struct linux_kmem_cache *c)
19814c5024dSHans Petter Selasky {
199a76de177SMark Johnston if (c == NULL)
200a76de177SMark Johnston return;
201a76de177SMark Johnston
202782a90d1SHans Petter Selasky if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
20314c5024dSHans Petter Selasky /* make sure all free callbacks have been called */
20414c5024dSHans Petter Selasky rcu_barrier();
20514c5024dSHans Petter Selasky }
20614c5024dSHans Petter Selasky
207a2b83b59SVladimir Kondratyev if (!llist_empty(&c->cache_items))
208a2b83b59SVladimir Kondratyev taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
209a2b83b59SVladimir Kondratyev taskqueue_drain(linux_irq_work_tq, &c->cache_task);
21014c5024dSHans Petter Selasky uma_zdestroy(c->cache_zone);
21114c5024dSHans Petter Selasky free(c, M_KMALLOC);
21214c5024dSHans Petter Selasky }
213ec25b6faSVladimir Kondratyev
2141f7df757SBjoern A. Zeeb void *
lkpi___kmalloc_node(size_t size,gfp_t flags,int node)2151c81ebecSBjoern A. Zeeb lkpi___kmalloc_node(size_t size, gfp_t flags, int node)
2161c81ebecSBjoern A. Zeeb {
2171c81ebecSBjoern A. Zeeb if (size <= PAGE_SIZE)
2181c81ebecSBjoern A. Zeeb return (malloc_domainset(size, M_KMALLOC,
2191c81ebecSBjoern A. Zeeb linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
2201c81ebecSBjoern A. Zeeb else
2211c81ebecSBjoern A. Zeeb return (contigmalloc_domainset(size, M_KMALLOC,
2221c81ebecSBjoern A. Zeeb linux_get_vm_domain_set(node), linux_check_m_flags(flags),
2231c81ebecSBjoern A. Zeeb 0, -1UL, PAGE_SIZE, 0));
2241c81ebecSBjoern A. Zeeb }
2251c81ebecSBjoern A. Zeeb
2261c81ebecSBjoern A. Zeeb void *
lkpi___kmalloc(size_t size,gfp_t flags)2271f7df757SBjoern A. Zeeb lkpi___kmalloc(size_t size, gfp_t flags)
2281f7df757SBjoern A. Zeeb {
2291f7df757SBjoern A. Zeeb size_t _s;
2301f7df757SBjoern A. Zeeb
2311f7df757SBjoern A. Zeeb /* sizeof(struct llist_node) is used for kfree_async(). */
2321f7df757SBjoern A. Zeeb _s = MAX(size, sizeof(struct llist_node));
2331f7df757SBjoern A. Zeeb
23419efc9e6SBjoern A. Zeeb if (_s <= PAGE_SIZE)
2351f7df757SBjoern A. Zeeb return (malloc(_s, M_KMALLOC, linux_check_m_flags(flags)));
23619df0c5aSBjoern A. Zeeb else
23719df0c5aSBjoern A. Zeeb return (contigmalloc(_s, M_KMALLOC, linux_check_m_flags(flags),
23819df0c5aSBjoern A. Zeeb 0, -1UL, PAGE_SIZE, 0));
2391f7df757SBjoern A. Zeeb }
2401f7df757SBjoern A. Zeeb
2411c95d401SBjoern A. Zeeb void *
lkpi_krealloc(void * ptr,size_t size,gfp_t flags)2421c95d401SBjoern A. Zeeb lkpi_krealloc(void *ptr, size_t size, gfp_t flags)
2431c95d401SBjoern A. Zeeb {
2441c95d401SBjoern A. Zeeb void *nptr;
2451c95d401SBjoern A. Zeeb size_t osize;
2461c95d401SBjoern A. Zeeb
2471c95d401SBjoern A. Zeeb /*
2481c95d401SBjoern A. Zeeb * First handle invariants based on function arguments.
2491c95d401SBjoern A. Zeeb */
2501c95d401SBjoern A. Zeeb if (ptr == NULL)
2511c95d401SBjoern A. Zeeb return (kmalloc(size, flags));
2521c95d401SBjoern A. Zeeb
2531c95d401SBjoern A. Zeeb osize = ksize(ptr);
2541c95d401SBjoern A. Zeeb if (size <= osize)
2551c95d401SBjoern A. Zeeb return (ptr);
2561c95d401SBjoern A. Zeeb
2571c95d401SBjoern A. Zeeb /*
2581c95d401SBjoern A. Zeeb * We know the new size > original size. realloc(9) does not (and cannot)
2591c95d401SBjoern A. Zeeb * know about our requirements for physically contiguous memory, so we can
2601c95d401SBjoern A. Zeeb * only call it for sizes up to and including PAGE_SIZE, and otherwise have
2611c95d401SBjoern A. Zeeb * to replicate its functionality using kmalloc to get the contigmalloc(9)
2621c95d401SBjoern A. Zeeb * backing.
2631c95d401SBjoern A. Zeeb */
2641c95d401SBjoern A. Zeeb if (size <= PAGE_SIZE)
2651c95d401SBjoern A. Zeeb return (realloc(ptr, size, M_KMALLOC, linux_check_m_flags(flags)));
2661c95d401SBjoern A. Zeeb
2671c95d401SBjoern A. Zeeb nptr = kmalloc(size, flags);
2681c95d401SBjoern A. Zeeb if (nptr == NULL)
2691c95d401SBjoern A. Zeeb return (NULL);
2701c95d401SBjoern A. Zeeb
2711c95d401SBjoern A. Zeeb memcpy(nptr, ptr, osize);
2721c95d401SBjoern A. Zeeb kfree(ptr);
2731c95d401SBjoern A. Zeeb return (nptr);
2741c95d401SBjoern A. Zeeb }
2751c95d401SBjoern A. Zeeb
276c0b8047bSVladimir Kondratyev struct lkpi_kmalloc_ctx {
277c0b8047bSVladimir Kondratyev size_t size;
278c0b8047bSVladimir Kondratyev gfp_t flags;
279c0b8047bSVladimir Kondratyev void *addr;
280c0b8047bSVladimir Kondratyev };
281c0b8047bSVladimir Kondratyev
282c0b8047bSVladimir Kondratyev static void
lkpi_kmalloc_cb(void * ctx)283c0b8047bSVladimir Kondratyev lkpi_kmalloc_cb(void *ctx)
284c0b8047bSVladimir Kondratyev {
285c0b8047bSVladimir Kondratyev struct lkpi_kmalloc_ctx *lmc = ctx;
286c0b8047bSVladimir Kondratyev
287c0b8047bSVladimir Kondratyev lmc->addr = __kmalloc(lmc->size, lmc->flags);
288c0b8047bSVladimir Kondratyev }
289c0b8047bSVladimir Kondratyev
290c0b8047bSVladimir Kondratyev void *
lkpi_kmalloc(size_t size,gfp_t flags)291c0b8047bSVladimir Kondratyev lkpi_kmalloc(size_t size, gfp_t flags)
292c0b8047bSVladimir Kondratyev {
293c0b8047bSVladimir Kondratyev struct lkpi_kmalloc_ctx lmc = { .size = size, .flags = flags };
294c0b8047bSVladimir Kondratyev
295c0b8047bSVladimir Kondratyev lkpi_fpu_safe_exec(&lkpi_kmalloc_cb, &lmc);
296c0b8047bSVladimir Kondratyev return(lmc.addr);
297c0b8047bSVladimir Kondratyev }
298c0b8047bSVladimir Kondratyev
299ec25b6faSVladimir Kondratyev static void
linux_kfree_async_fn(void * context,int pending)300ec25b6faSVladimir Kondratyev linux_kfree_async_fn(void *context, int pending)
301ec25b6faSVladimir Kondratyev {
302ec25b6faSVladimir Kondratyev struct llist_node *freed;
303ec25b6faSVladimir Kondratyev
304ec25b6faSVladimir Kondratyev while((freed = llist_del_first(&linux_kfree_async_list)) != NULL)
305ec25b6faSVladimir Kondratyev kfree(freed);
306ec25b6faSVladimir Kondratyev }
307ec25b6faSVladimir Kondratyev static struct task linux_kfree_async_task =
308ec25b6faSVladimir Kondratyev TASK_INITIALIZER(0, linux_kfree_async_fn, &linux_kfree_async_task);
309ec25b6faSVladimir Kondratyev
310*a3e6f97bSBjoern A. Zeeb static void
linux_kfree_async(void * addr)311ec25b6faSVladimir Kondratyev linux_kfree_async(void *addr)
312ec25b6faSVladimir Kondratyev {
313ec25b6faSVladimir Kondratyev if (addr == NULL)
314ec25b6faSVladimir Kondratyev return;
315ec25b6faSVladimir Kondratyev llist_add(addr, &linux_kfree_async_list);
316ec25b6faSVladimir Kondratyev taskqueue_enqueue(linux_irq_work_tq, &linux_kfree_async_task);
317ec25b6faSVladimir Kondratyev }
318*a3e6f97bSBjoern A. Zeeb
319*a3e6f97bSBjoern A. Zeeb void
lkpi_kfree(const void * ptr)320*a3e6f97bSBjoern A. Zeeb lkpi_kfree(const void *ptr)
321*a3e6f97bSBjoern A. Zeeb {
322*a3e6f97bSBjoern A. Zeeb if (ZERO_OR_NULL_PTR(ptr))
323*a3e6f97bSBjoern A. Zeeb return;
324*a3e6f97bSBjoern A. Zeeb
325*a3e6f97bSBjoern A. Zeeb if (curthread->td_critnest != 0)
326*a3e6f97bSBjoern A. Zeeb linux_kfree_async(__DECONST(void *, ptr));
327*a3e6f97bSBjoern A. Zeeb else
328*a3e6f97bSBjoern A. Zeeb free(__DECONST(void *, ptr), M_KMALLOC);
329*a3e6f97bSBjoern A. Zeeb }
330*a3e6f97bSBjoern A. Zeeb
331