xref: /freebsd/sys/compat/linuxkpi/common/src/linux_slab.c (revision 3110d4ebd6c0848cf5e25890d01791bb407e2a9b)
1 /*-
2  * Copyright (c) 2017 Mellanox Technologies, Ltd.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <linux/slab.h>
31 #include <linux/rcupdate.h>
32 #include <linux/kernel.h>
33 #include <linux/irq_work.h>
34 #include <linux/llist.h>
35 
36 #include <sys/param.h>
37 #include <sys/taskqueue.h>
38 
39 struct linux_kmem_rcu {
40 	struct rcu_head rcu_head;
41 	struct linux_kmem_cache *cache;
42 };
43 
44 #define	LINUX_KMEM_TO_RCU(c, m)					\
45 	((struct linux_kmem_rcu *)((char *)(m) +		\
46 	(c)->cache_size - sizeof(struct linux_kmem_rcu)))
47 
48 #define	LINUX_RCU_TO_KMEM(r)					\
49 	((void *)((char *)(r) + sizeof(struct linux_kmem_rcu) - \
50 	(r)->cache->cache_size))
51 
52 static LLIST_HEAD(linux_kfree_async_list);
53 
54 static int
55 linux_kmem_ctor(void *mem, int size, void *arg, int flags)
56 {
57 	struct linux_kmem_cache *c = arg;
58 
59 	if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
60 		struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, mem);
61 
62 		/* duplicate cache pointer */
63 		rcu->cache = c;
64 	}
65 
66 	/* check for constructor */
67 	if (likely(c->cache_ctor != NULL))
68 		c->cache_ctor(mem);
69 
70 	return (0);
71 }
72 
73 static void
74 linux_kmem_cache_free_rcu_callback(struct rcu_head *head)
75 {
76 	struct linux_kmem_rcu *rcu =
77 	    container_of(head, struct linux_kmem_rcu, rcu_head);
78 
79 	uma_zfree(rcu->cache->cache_zone, LINUX_RCU_TO_KMEM(rcu));
80 }
81 
82 struct linux_kmem_cache *
83 linux_kmem_cache_create(const char *name, size_t size, size_t align,
84     unsigned flags, linux_kmem_ctor_t *ctor)
85 {
86 	struct linux_kmem_cache *c;
87 
88 	c = malloc(sizeof(*c), M_KMALLOC, M_WAITOK);
89 
90 	if (flags & SLAB_HWCACHE_ALIGN)
91 		align = UMA_ALIGN_CACHE;
92 	else if (align != 0)
93 		align--;
94 
95 	if (flags & SLAB_TYPESAFE_BY_RCU) {
96 		/* make room for RCU structure */
97 		size = ALIGN(size, sizeof(void *));
98 		size += sizeof(struct linux_kmem_rcu);
99 
100 		/* create cache_zone */
101 		c->cache_zone = uma_zcreate(name, size,
102 		    linux_kmem_ctor, NULL, NULL, NULL,
103 		    align, UMA_ZONE_ZINIT);
104 	} else {
105 		/* create cache_zone */
106 		c->cache_zone = uma_zcreate(name, size,
107 		    ctor ? linux_kmem_ctor : NULL, NULL,
108 		    NULL, NULL, align, 0);
109 	}
110 
111 	c->cache_flags = flags;
112 	c->cache_ctor = ctor;
113 	c->cache_size = size;
114 	return (c);
115 }
116 
117 void
118 linux_kmem_cache_free_rcu(struct linux_kmem_cache *c, void *m)
119 {
120 	struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, m);
121 
122 	call_rcu(&rcu->rcu_head, linux_kmem_cache_free_rcu_callback);
123 }
124 
125 void
126 linux_kmem_cache_destroy(struct linux_kmem_cache *c)
127 {
128 	if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
129 		/* make sure all free callbacks have been called */
130 		rcu_barrier();
131 	}
132 
133 	uma_zdestroy(c->cache_zone);
134 	free(c, M_KMALLOC);
135 }
136 
137 static void
138 linux_kfree_async_fn(void *context, int pending)
139 {
140 	struct llist_node *freed;
141 
142 	while((freed = llist_del_first(&linux_kfree_async_list)) != NULL)
143 		kfree(freed);
144 }
145 static struct task linux_kfree_async_task =
146     TASK_INITIALIZER(0, linux_kfree_async_fn, &linux_kfree_async_task);
147 
148 void
149 linux_kfree_async(void *addr)
150 {
151 	if (addr == NULL)
152 		return;
153 	llist_add(addr, &linux_kfree_async_list);
154 	taskqueue_enqueue(linux_irq_work_tq, &linux_kfree_async_task);
155 }
156