xref: /freebsd/sys/compat/linuxkpi/common/src/linux_slab.c (revision 1c81ebec74d8a71c770f7835d3bc6e02c13467a0)
114c5024dSHans Petter Selasky /*-
214c5024dSHans Petter Selasky  * Copyright (c) 2017 Mellanox Technologies, Ltd.
314c5024dSHans Petter Selasky  * All rights reserved.
414c5024dSHans Petter Selasky  *
514c5024dSHans Petter Selasky  * Redistribution and use in source and binary forms, with or without
614c5024dSHans Petter Selasky  * modification, are permitted provided that the following conditions
714c5024dSHans Petter Selasky  * are met:
814c5024dSHans Petter Selasky  * 1. Redistributions of source code must retain the above copyright
914c5024dSHans Petter Selasky  *    notice unmodified, this list of conditions, and the following
1014c5024dSHans Petter Selasky  *    disclaimer.
1114c5024dSHans Petter Selasky  * 2. Redistributions in binary form must reproduce the above copyright
1214c5024dSHans Petter Selasky  *    notice, this list of conditions and the following disclaimer in the
1314c5024dSHans Petter Selasky  *    documentation and/or other materials provided with the distribution.
1414c5024dSHans Petter Selasky  *
1514c5024dSHans Petter Selasky  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1614c5024dSHans Petter Selasky  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1714c5024dSHans Petter Selasky  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1814c5024dSHans Petter Selasky  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1914c5024dSHans Petter Selasky  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2014c5024dSHans Petter Selasky  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2114c5024dSHans Petter Selasky  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2214c5024dSHans Petter Selasky  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2314c5024dSHans Petter Selasky  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2414c5024dSHans Petter Selasky  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2514c5024dSHans Petter Selasky  */
2614c5024dSHans Petter Selasky 
2714c5024dSHans Petter Selasky #include <sys/cdefs.h>
28c0b8047bSVladimir Kondratyev #include <linux/compat.h>
2914c5024dSHans Petter Selasky #include <linux/slab.h>
3014c5024dSHans Petter Selasky #include <linux/rcupdate.h>
3114c5024dSHans Petter Selasky #include <linux/kernel.h>
32ec25b6faSVladimir Kondratyev #include <linux/irq_work.h>
33ec25b6faSVladimir Kondratyev #include <linux/llist.h>
34ec25b6faSVladimir Kondratyev 
35ec25b6faSVladimir Kondratyev #include <sys/param.h>
36ec25b6faSVladimir Kondratyev #include <sys/taskqueue.h>
37a2b83b59SVladimir Kondratyev #include <vm/uma.h>
3814c5024dSHans Petter Selasky 
3914c5024dSHans Petter Selasky struct linux_kmem_rcu {
4014c5024dSHans Petter Selasky 	struct rcu_head rcu_head;
4114c5024dSHans Petter Selasky 	struct linux_kmem_cache *cache;
4214c5024dSHans Petter Selasky };
4314c5024dSHans Petter Selasky 
44a2b83b59SVladimir Kondratyev struct linux_kmem_cache {
45a2b83b59SVladimir Kondratyev 	uma_zone_t cache_zone;
46a2b83b59SVladimir Kondratyev 	linux_kmem_ctor_t *cache_ctor;
47a2b83b59SVladimir Kondratyev 	unsigned cache_flags;
48a2b83b59SVladimir Kondratyev 	unsigned cache_size;
49a2b83b59SVladimir Kondratyev 	struct llist_head cache_items;
50a2b83b59SVladimir Kondratyev 	struct task cache_task;
51a2b83b59SVladimir Kondratyev };
52a2b83b59SVladimir Kondratyev 
5314c5024dSHans Petter Selasky #define	LINUX_KMEM_TO_RCU(c, m)					\
5414c5024dSHans Petter Selasky 	((struct linux_kmem_rcu *)((char *)(m) +		\
5514c5024dSHans Petter Selasky 	(c)->cache_size - sizeof(struct linux_kmem_rcu)))
5614c5024dSHans Petter Selasky 
5714c5024dSHans Petter Selasky #define	LINUX_RCU_TO_KMEM(r)					\
5814c5024dSHans Petter Selasky 	((void *)((char *)(r) + sizeof(struct linux_kmem_rcu) - \
5914c5024dSHans Petter Selasky 	(r)->cache->cache_size))
6014c5024dSHans Petter Selasky 
61ec25b6faSVladimir Kondratyev static LLIST_HEAD(linux_kfree_async_list);
62ec25b6faSVladimir Kondratyev 
63a2b83b59SVladimir Kondratyev static void	lkpi_kmem_cache_free_async_fn(void *, int);
64a2b83b59SVladimir Kondratyev 
65a2b83b59SVladimir Kondratyev void *
66a2b83b59SVladimir Kondratyev lkpi_kmem_cache_alloc(struct linux_kmem_cache *c, gfp_t flags)
67a2b83b59SVladimir Kondratyev {
68a2b83b59SVladimir Kondratyev 	return (uma_zalloc_arg(c->cache_zone, c,
69a2b83b59SVladimir Kondratyev 	    linux_check_m_flags(flags)));
70a2b83b59SVladimir Kondratyev }
71a2b83b59SVladimir Kondratyev 
72a2b83b59SVladimir Kondratyev void *
73a2b83b59SVladimir Kondratyev lkpi_kmem_cache_zalloc(struct linux_kmem_cache *c, gfp_t flags)
74a2b83b59SVladimir Kondratyev {
75a2b83b59SVladimir Kondratyev 	return (uma_zalloc_arg(c->cache_zone, c,
76a2b83b59SVladimir Kondratyev 	    linux_check_m_flags(flags | M_ZERO)));
77a2b83b59SVladimir Kondratyev }
78a2b83b59SVladimir Kondratyev 
7914c5024dSHans Petter Selasky static int
8014c5024dSHans Petter Selasky linux_kmem_ctor(void *mem, int size, void *arg, int flags)
8114c5024dSHans Petter Selasky {
8214c5024dSHans Petter Selasky 	struct linux_kmem_cache *c = arg;
8314c5024dSHans Petter Selasky 
84782a90d1SHans Petter Selasky 	if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
8514c5024dSHans Petter Selasky 		struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, mem);
8614c5024dSHans Petter Selasky 
8714c5024dSHans Petter Selasky 		/* duplicate cache pointer */
8814c5024dSHans Petter Selasky 		rcu->cache = c;
8914c5024dSHans Petter Selasky 	}
9014c5024dSHans Petter Selasky 
9114c5024dSHans Petter Selasky 	/* check for constructor */
9214c5024dSHans Petter Selasky 	if (likely(c->cache_ctor != NULL))
9314c5024dSHans Petter Selasky 		c->cache_ctor(mem);
9414c5024dSHans Petter Selasky 
9514c5024dSHans Petter Selasky 	return (0);
9614c5024dSHans Petter Selasky }
9714c5024dSHans Petter Selasky 
9814c5024dSHans Petter Selasky static void
9914c5024dSHans Petter Selasky linux_kmem_cache_free_rcu_callback(struct rcu_head *head)
10014c5024dSHans Petter Selasky {
10114c5024dSHans Petter Selasky 	struct linux_kmem_rcu *rcu =
10214c5024dSHans Petter Selasky 	    container_of(head, struct linux_kmem_rcu, rcu_head);
10314c5024dSHans Petter Selasky 
10414c5024dSHans Petter Selasky 	uma_zfree(rcu->cache->cache_zone, LINUX_RCU_TO_KMEM(rcu));
10514c5024dSHans Petter Selasky }
10614c5024dSHans Petter Selasky 
10714c5024dSHans Petter Selasky struct linux_kmem_cache *
10814c5024dSHans Petter Selasky linux_kmem_cache_create(const char *name, size_t size, size_t align,
10914c5024dSHans Petter Selasky     unsigned flags, linux_kmem_ctor_t *ctor)
11014c5024dSHans Petter Selasky {
11114c5024dSHans Petter Selasky 	struct linux_kmem_cache *c;
11214c5024dSHans Petter Selasky 
11314c5024dSHans Petter Selasky 	c = malloc(sizeof(*c), M_KMALLOC, M_WAITOK);
11414c5024dSHans Petter Selasky 
11514c5024dSHans Petter Selasky 	if (flags & SLAB_HWCACHE_ALIGN)
11614c5024dSHans Petter Selasky 		align = UMA_ALIGN_CACHE;
11714c5024dSHans Petter Selasky 	else if (align != 0)
11814c5024dSHans Petter Selasky 		align--;
11914c5024dSHans Petter Selasky 
120782a90d1SHans Petter Selasky 	if (flags & SLAB_TYPESAFE_BY_RCU) {
12114c5024dSHans Petter Selasky 		/* make room for RCU structure */
12214c5024dSHans Petter Selasky 		size = ALIGN(size, sizeof(void *));
12314c5024dSHans Petter Selasky 		size += sizeof(struct linux_kmem_rcu);
12414c5024dSHans Petter Selasky 
12514c5024dSHans Petter Selasky 		/* create cache_zone */
12614c5024dSHans Petter Selasky 		c->cache_zone = uma_zcreate(name, size,
12714c5024dSHans Petter Selasky 		    linux_kmem_ctor, NULL, NULL, NULL,
12814c5024dSHans Petter Selasky 		    align, UMA_ZONE_ZINIT);
12914c5024dSHans Petter Selasky 	} else {
130a2b83b59SVladimir Kondratyev 		/* make room for async task list items */
131a2b83b59SVladimir Kondratyev 		size = MAX(size, sizeof(struct llist_node));
132a2b83b59SVladimir Kondratyev 
13314c5024dSHans Petter Selasky 		/* create cache_zone */
13414c5024dSHans Petter Selasky 		c->cache_zone = uma_zcreate(name, size,
13514c5024dSHans Petter Selasky 		    ctor ? linux_kmem_ctor : NULL, NULL,
13614c5024dSHans Petter Selasky 		    NULL, NULL, align, 0);
13714c5024dSHans Petter Selasky 	}
13814c5024dSHans Petter Selasky 
13914c5024dSHans Petter Selasky 	c->cache_flags = flags;
14014c5024dSHans Petter Selasky 	c->cache_ctor = ctor;
14114c5024dSHans Petter Selasky 	c->cache_size = size;
142a2b83b59SVladimir Kondratyev 	init_llist_head(&c->cache_items);
143a2b83b59SVladimir Kondratyev 	TASK_INIT(&c->cache_task, 0, lkpi_kmem_cache_free_async_fn, c);
14414c5024dSHans Petter Selasky 	return (c);
14514c5024dSHans Petter Selasky }
14614c5024dSHans Petter Selasky 
147a2b83b59SVladimir Kondratyev static inline void
148a2b83b59SVladimir Kondratyev lkpi_kmem_cache_free_rcu(struct linux_kmem_cache *c, void *m)
14914c5024dSHans Petter Selasky {
15014c5024dSHans Petter Selasky 	struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, m);
15114c5024dSHans Petter Selasky 
15214c5024dSHans Petter Selasky 	call_rcu(&rcu->rcu_head, linux_kmem_cache_free_rcu_callback);
15314c5024dSHans Petter Selasky }
15414c5024dSHans Petter Selasky 
155a2b83b59SVladimir Kondratyev static inline void
156a2b83b59SVladimir Kondratyev lkpi_kmem_cache_free_sync(struct linux_kmem_cache *c, void *m)
157a2b83b59SVladimir Kondratyev {
158a2b83b59SVladimir Kondratyev 	uma_zfree(c->cache_zone, m);
159a2b83b59SVladimir Kondratyev }
160a2b83b59SVladimir Kondratyev 
161a2b83b59SVladimir Kondratyev static void
162a2b83b59SVladimir Kondratyev lkpi_kmem_cache_free_async_fn(void *context, int pending)
163a2b83b59SVladimir Kondratyev {
164a2b83b59SVladimir Kondratyev 	struct linux_kmem_cache *c = context;
165a2b83b59SVladimir Kondratyev 	struct llist_node *freed, *next;
166a2b83b59SVladimir Kondratyev 
167a2b83b59SVladimir Kondratyev 	llist_for_each_safe(freed, next, llist_del_all(&c->cache_items))
168a2b83b59SVladimir Kondratyev 		lkpi_kmem_cache_free_sync(c, freed);
169a2b83b59SVladimir Kondratyev }
170a2b83b59SVladimir Kondratyev 
171a2b83b59SVladimir Kondratyev static inline void
172a2b83b59SVladimir Kondratyev lkpi_kmem_cache_free_async(struct linux_kmem_cache *c, void *m)
173a2b83b59SVladimir Kondratyev {
174a2b83b59SVladimir Kondratyev 	if (m == NULL)
175a2b83b59SVladimir Kondratyev 		return;
176a2b83b59SVladimir Kondratyev 
177a2b83b59SVladimir Kondratyev 	llist_add(m, &c->cache_items);
178a2b83b59SVladimir Kondratyev 	taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
179a2b83b59SVladimir Kondratyev }
180a2b83b59SVladimir Kondratyev 
181a2b83b59SVladimir Kondratyev void
182a2b83b59SVladimir Kondratyev lkpi_kmem_cache_free(struct linux_kmem_cache *c, void *m)
183a2b83b59SVladimir Kondratyev {
184a2b83b59SVladimir Kondratyev 	if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU))
185a2b83b59SVladimir Kondratyev 		lkpi_kmem_cache_free_rcu(c, m);
186a2b83b59SVladimir Kondratyev 	else if (unlikely(curthread->td_critnest != 0))
187a2b83b59SVladimir Kondratyev 		lkpi_kmem_cache_free_async(c, m);
188a2b83b59SVladimir Kondratyev 	else
189a2b83b59SVladimir Kondratyev 		lkpi_kmem_cache_free_sync(c, m);
190a2b83b59SVladimir Kondratyev }
191a2b83b59SVladimir Kondratyev 
19214c5024dSHans Petter Selasky void
19314c5024dSHans Petter Selasky linux_kmem_cache_destroy(struct linux_kmem_cache *c)
19414c5024dSHans Petter Selasky {
195a76de177SMark Johnston 	if (c == NULL)
196a76de177SMark Johnston 		return;
197a76de177SMark Johnston 
198782a90d1SHans Petter Selasky 	if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
19914c5024dSHans Petter Selasky 		/* make sure all free callbacks have been called */
20014c5024dSHans Petter Selasky 		rcu_barrier();
20114c5024dSHans Petter Selasky 	}
20214c5024dSHans Petter Selasky 
203a2b83b59SVladimir Kondratyev 	if (!llist_empty(&c->cache_items))
204a2b83b59SVladimir Kondratyev 		taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
205a2b83b59SVladimir Kondratyev 	taskqueue_drain(linux_irq_work_tq, &c->cache_task);
20614c5024dSHans Petter Selasky 	uma_zdestroy(c->cache_zone);
20714c5024dSHans Petter Selasky 	free(c, M_KMALLOC);
20814c5024dSHans Petter Selasky }
209ec25b6faSVladimir Kondratyev 
2101f7df757SBjoern A. Zeeb void *
211*1c81ebecSBjoern A. Zeeb lkpi___kmalloc_node(size_t size, gfp_t flags, int node)
212*1c81ebecSBjoern A. Zeeb {
213*1c81ebecSBjoern A. Zeeb 	if (size <= PAGE_SIZE)
214*1c81ebecSBjoern A. Zeeb 		return (malloc_domainset(size, M_KMALLOC,
215*1c81ebecSBjoern A. Zeeb 		    linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
216*1c81ebecSBjoern A. Zeeb 	else
217*1c81ebecSBjoern A. Zeeb 		return (contigmalloc_domainset(size, M_KMALLOC,
218*1c81ebecSBjoern A. Zeeb 		    linux_get_vm_domain_set(node), linux_check_m_flags(flags),
219*1c81ebecSBjoern A. Zeeb 		    0, -1UL, PAGE_SIZE, 0));
220*1c81ebecSBjoern A. Zeeb }
221*1c81ebecSBjoern A. Zeeb 
222*1c81ebecSBjoern A. Zeeb void *
2231f7df757SBjoern A. Zeeb lkpi___kmalloc(size_t size, gfp_t flags)
2241f7df757SBjoern A. Zeeb {
2251f7df757SBjoern A. Zeeb 	size_t _s;
2261f7df757SBjoern A. Zeeb 
2271f7df757SBjoern A. Zeeb 	/* sizeof(struct llist_node) is used for kfree_async(). */
2281f7df757SBjoern A. Zeeb 	_s = MAX(size, sizeof(struct llist_node));
2291f7df757SBjoern A. Zeeb 
23019efc9e6SBjoern A. Zeeb 	if (_s <= PAGE_SIZE)
2311f7df757SBjoern A. Zeeb 		return (malloc(_s, M_KMALLOC, linux_check_m_flags(flags)));
23219df0c5aSBjoern A. Zeeb 	else
23319df0c5aSBjoern A. Zeeb 		return (contigmalloc(_s, M_KMALLOC, linux_check_m_flags(flags),
23419df0c5aSBjoern A. Zeeb 		    0, -1UL, PAGE_SIZE, 0));
2351f7df757SBjoern A. Zeeb }
2361f7df757SBjoern A. Zeeb 
237c0b8047bSVladimir Kondratyev struct lkpi_kmalloc_ctx {
238c0b8047bSVladimir Kondratyev 	size_t size;
239c0b8047bSVladimir Kondratyev 	gfp_t flags;
240c0b8047bSVladimir Kondratyev 	void *addr;
241c0b8047bSVladimir Kondratyev };
242c0b8047bSVladimir Kondratyev 
243c0b8047bSVladimir Kondratyev static void
244c0b8047bSVladimir Kondratyev lkpi_kmalloc_cb(void *ctx)
245c0b8047bSVladimir Kondratyev {
246c0b8047bSVladimir Kondratyev 	struct lkpi_kmalloc_ctx *lmc = ctx;
247c0b8047bSVladimir Kondratyev 
248c0b8047bSVladimir Kondratyev 	lmc->addr = __kmalloc(lmc->size, lmc->flags);
249c0b8047bSVladimir Kondratyev }
250c0b8047bSVladimir Kondratyev 
251c0b8047bSVladimir Kondratyev void *
252c0b8047bSVladimir Kondratyev lkpi_kmalloc(size_t size, gfp_t flags)
253c0b8047bSVladimir Kondratyev {
254c0b8047bSVladimir Kondratyev 	struct lkpi_kmalloc_ctx lmc = { .size = size, .flags = flags };
255c0b8047bSVladimir Kondratyev 
256c0b8047bSVladimir Kondratyev 	lkpi_fpu_safe_exec(&lkpi_kmalloc_cb, &lmc);
257c0b8047bSVladimir Kondratyev 	return(lmc.addr);
258c0b8047bSVladimir Kondratyev }
259c0b8047bSVladimir Kondratyev 
260ec25b6faSVladimir Kondratyev static void
261ec25b6faSVladimir Kondratyev linux_kfree_async_fn(void *context, int pending)
262ec25b6faSVladimir Kondratyev {
263ec25b6faSVladimir Kondratyev 	struct llist_node *freed;
264ec25b6faSVladimir Kondratyev 
265ec25b6faSVladimir Kondratyev 	while((freed = llist_del_first(&linux_kfree_async_list)) != NULL)
266ec25b6faSVladimir Kondratyev 		kfree(freed);
267ec25b6faSVladimir Kondratyev }
268ec25b6faSVladimir Kondratyev static struct task linux_kfree_async_task =
269ec25b6faSVladimir Kondratyev     TASK_INITIALIZER(0, linux_kfree_async_fn, &linux_kfree_async_task);
270ec25b6faSVladimir Kondratyev 
271ec25b6faSVladimir Kondratyev void
272ec25b6faSVladimir Kondratyev linux_kfree_async(void *addr)
273ec25b6faSVladimir Kondratyev {
274ec25b6faSVladimir Kondratyev 	if (addr == NULL)
275ec25b6faSVladimir Kondratyev 		return;
276ec25b6faSVladimir Kondratyev 	llist_add(addr, &linux_kfree_async_list);
277ec25b6faSVladimir Kondratyev 	taskqueue_enqueue(linux_irq_work_tq, &linux_kfree_async_task);
278ec25b6faSVladimir Kondratyev }
279