xref: /freebsd/sys/compat/linuxkpi/common/src/linux_slab.c (revision a64729f5077d77e13b9497cb33ecb3c82e606ee8)
1 /*-
2  * Copyright (c) 2017 Mellanox Technologies, Ltd.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 #include <linux/compat.h>
29 #include <linux/slab.h>
30 #include <linux/rcupdate.h>
31 #include <linux/kernel.h>
32 #include <linux/irq_work.h>
33 #include <linux/llist.h>
34 
35 #include <sys/param.h>
36 #include <sys/taskqueue.h>
37 #include <vm/uma.h>
38 
39 struct linux_kmem_rcu {
40 	struct rcu_head rcu_head;
41 	struct linux_kmem_cache *cache;
42 };
43 
44 struct linux_kmem_cache {
45 	uma_zone_t cache_zone;
46 	linux_kmem_ctor_t *cache_ctor;
47 	unsigned cache_flags;
48 	unsigned cache_size;
49 	struct llist_head cache_items;
50 	struct task cache_task;
51 };
52 
53 #define	LINUX_KMEM_TO_RCU(c, m)					\
54 	((struct linux_kmem_rcu *)((char *)(m) +		\
55 	(c)->cache_size - sizeof(struct linux_kmem_rcu)))
56 
57 #define	LINUX_RCU_TO_KMEM(r)					\
58 	((void *)((char *)(r) + sizeof(struct linux_kmem_rcu) - \
59 	(r)->cache->cache_size))
60 
61 static LLIST_HEAD(linux_kfree_async_list);
62 
63 static void	lkpi_kmem_cache_free_async_fn(void *, int);
64 
65 void *
66 lkpi_kmem_cache_alloc(struct linux_kmem_cache *c, gfp_t flags)
67 {
68 	return (uma_zalloc_arg(c->cache_zone, c,
69 	    linux_check_m_flags(flags)));
70 }
71 
72 void *
73 lkpi_kmem_cache_zalloc(struct linux_kmem_cache *c, gfp_t flags)
74 {
75 	return (uma_zalloc_arg(c->cache_zone, c,
76 	    linux_check_m_flags(flags | M_ZERO)));
77 }
78 
79 static int
80 linux_kmem_ctor(void *mem, int size, void *arg, int flags)
81 {
82 	struct linux_kmem_cache *c = arg;
83 
84 	if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
85 		struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, mem);
86 
87 		/* duplicate cache pointer */
88 		rcu->cache = c;
89 	}
90 
91 	/* check for constructor */
92 	if (likely(c->cache_ctor != NULL))
93 		c->cache_ctor(mem);
94 
95 	return (0);
96 }
97 
98 static void
99 linux_kmem_cache_free_rcu_callback(struct rcu_head *head)
100 {
101 	struct linux_kmem_rcu *rcu =
102 	    container_of(head, struct linux_kmem_rcu, rcu_head);
103 
104 	uma_zfree(rcu->cache->cache_zone, LINUX_RCU_TO_KMEM(rcu));
105 }
106 
107 struct linux_kmem_cache *
108 linux_kmem_cache_create(const char *name, size_t size, size_t align,
109     unsigned flags, linux_kmem_ctor_t *ctor)
110 {
111 	struct linux_kmem_cache *c;
112 
113 	c = malloc(sizeof(*c), M_KMALLOC, M_WAITOK);
114 
115 	if (flags & SLAB_HWCACHE_ALIGN)
116 		align = UMA_ALIGN_CACHE;
117 	else if (align != 0)
118 		align--;
119 
120 	if (flags & SLAB_TYPESAFE_BY_RCU) {
121 		/* make room for RCU structure */
122 		size = ALIGN(size, sizeof(void *));
123 		size += sizeof(struct linux_kmem_rcu);
124 
125 		/* create cache_zone */
126 		c->cache_zone = uma_zcreate(name, size,
127 		    linux_kmem_ctor, NULL, NULL, NULL,
128 		    align, UMA_ZONE_ZINIT);
129 	} else {
130 		/* make room for async task list items */
131 		size = MAX(size, sizeof(struct llist_node));
132 
133 		/* create cache_zone */
134 		c->cache_zone = uma_zcreate(name, size,
135 		    ctor ? linux_kmem_ctor : NULL, NULL,
136 		    NULL, NULL, align, 0);
137 	}
138 
139 	c->cache_flags = flags;
140 	c->cache_ctor = ctor;
141 	c->cache_size = size;
142 	init_llist_head(&c->cache_items);
143 	TASK_INIT(&c->cache_task, 0, lkpi_kmem_cache_free_async_fn, c);
144 	return (c);
145 }
146 
147 static inline void
148 lkpi_kmem_cache_free_rcu(struct linux_kmem_cache *c, void *m)
149 {
150 	struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, m);
151 
152 	call_rcu(&rcu->rcu_head, linux_kmem_cache_free_rcu_callback);
153 }
154 
155 static inline void
156 lkpi_kmem_cache_free_sync(struct linux_kmem_cache *c, void *m)
157 {
158 	uma_zfree(c->cache_zone, m);
159 }
160 
161 static void
162 lkpi_kmem_cache_free_async_fn(void *context, int pending)
163 {
164 	struct linux_kmem_cache *c = context;
165 	struct llist_node *freed, *next;
166 
167 	llist_for_each_safe(freed, next, llist_del_all(&c->cache_items))
168 		lkpi_kmem_cache_free_sync(c, freed);
169 }
170 
171 static inline void
172 lkpi_kmem_cache_free_async(struct linux_kmem_cache *c, void *m)
173 {
174 	if (m == NULL)
175 		return;
176 
177 	llist_add(m, &c->cache_items);
178 	taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
179 }
180 
181 void
182 lkpi_kmem_cache_free(struct linux_kmem_cache *c, void *m)
183 {
184 	if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU))
185 		lkpi_kmem_cache_free_rcu(c, m);
186 	else if (unlikely(curthread->td_critnest != 0))
187 		lkpi_kmem_cache_free_async(c, m);
188 	else
189 		lkpi_kmem_cache_free_sync(c, m);
190 }
191 
192 void
193 linux_kmem_cache_destroy(struct linux_kmem_cache *c)
194 {
195 	if (c == NULL)
196 		return;
197 
198 	if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
199 		/* make sure all free callbacks have been called */
200 		rcu_barrier();
201 	}
202 
203 	if (!llist_empty(&c->cache_items))
204 		taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
205 	taskqueue_drain(linux_irq_work_tq, &c->cache_task);
206 	uma_zdestroy(c->cache_zone);
207 	free(c, M_KMALLOC);
208 }
209 
210 void *
211 lkpi___kmalloc(size_t size, gfp_t flags)
212 {
213 	size_t _s;
214 
215 	/* sizeof(struct llist_node) is used for kfree_async(). */
216 	_s = MAX(size, sizeof(struct llist_node));
217 
218 	return (malloc(_s, M_KMALLOC, linux_check_m_flags(flags)));
219 }
220 
221 struct lkpi_kmalloc_ctx {
222 	size_t size;
223 	gfp_t flags;
224 	void *addr;
225 };
226 
227 static void
228 lkpi_kmalloc_cb(void *ctx)
229 {
230 	struct lkpi_kmalloc_ctx *lmc = ctx;
231 
232 	lmc->addr = __kmalloc(lmc->size, lmc->flags);
233 }
234 
235 void *
236 lkpi_kmalloc(size_t size, gfp_t flags)
237 {
238 	struct lkpi_kmalloc_ctx lmc = { .size = size, .flags = flags };
239 
240 	lkpi_fpu_safe_exec(&lkpi_kmalloc_cb, &lmc);
241 	return(lmc.addr);
242 }
243 
244 static void
245 linux_kfree_async_fn(void *context, int pending)
246 {
247 	struct llist_node *freed;
248 
249 	while((freed = llist_del_first(&linux_kfree_async_list)) != NULL)
250 		kfree(freed);
251 }
252 static struct task linux_kfree_async_task =
253     TASK_INITIALIZER(0, linux_kfree_async_fn, &linux_kfree_async_task);
254 
255 void
256 linux_kfree_async(void *addr)
257 {
258 	if (addr == NULL)
259 		return;
260 	llist_add(addr, &linux_kfree_async_list);
261 	taskqueue_enqueue(linux_irq_work_tq, &linux_kfree_async_task);
262 }
263