xref: /freebsd/sys/compat/linuxkpi/common/src/linux_slab.c (revision 1c95d401ebe5075ebb38b57638830713a496f107)
1 /*-
2  * Copyright (c) 2017 Mellanox Technologies, Ltd.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 #include <linux/compat.h>
29 #include <linux/slab.h>
30 #include <linux/rcupdate.h>
31 #include <linux/kernel.h>
32 #include <linux/irq_work.h>
33 #include <linux/llist.h>
34 
35 #include <sys/param.h>
36 #include <sys/taskqueue.h>
37 #include <vm/uma.h>
38 
39 struct linux_kmem_rcu {
40 	struct rcu_head rcu_head;
41 	struct linux_kmem_cache *cache;
42 };
43 
44 struct linux_kmem_cache {
45 	uma_zone_t cache_zone;
46 	linux_kmem_ctor_t *cache_ctor;
47 	unsigned cache_flags;
48 	unsigned cache_size;
49 	struct llist_head cache_items;
50 	struct task cache_task;
51 };
52 
53 #define	LINUX_KMEM_TO_RCU(c, m)					\
54 	((struct linux_kmem_rcu *)((char *)(m) +		\
55 	(c)->cache_size - sizeof(struct linux_kmem_rcu)))
56 
57 #define	LINUX_RCU_TO_KMEM(r)					\
58 	((void *)((char *)(r) + sizeof(struct linux_kmem_rcu) - \
59 	(r)->cache->cache_size))
60 
61 static LLIST_HEAD(linux_kfree_async_list);
62 
63 static void	lkpi_kmem_cache_free_async_fn(void *, int);
64 
65 void *
66 lkpi_kmem_cache_alloc(struct linux_kmem_cache *c, gfp_t flags)
67 {
68 	return (uma_zalloc_arg(c->cache_zone, c,
69 	    linux_check_m_flags(flags)));
70 }
71 
72 void *
73 lkpi_kmem_cache_zalloc(struct linux_kmem_cache *c, gfp_t flags)
74 {
75 	return (uma_zalloc_arg(c->cache_zone, c,
76 	    linux_check_m_flags(flags | M_ZERO)));
77 }
78 
79 static int
80 linux_kmem_ctor(void *mem, int size, void *arg, int flags)
81 {
82 	struct linux_kmem_cache *c = arg;
83 
84 	if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
85 		struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, mem);
86 
87 		/* duplicate cache pointer */
88 		rcu->cache = c;
89 	}
90 
91 	/* check for constructor */
92 	if (likely(c->cache_ctor != NULL))
93 		c->cache_ctor(mem);
94 
95 	return (0);
96 }
97 
98 static void
99 linux_kmem_cache_free_rcu_callback(struct rcu_head *head)
100 {
101 	struct linux_kmem_rcu *rcu =
102 	    container_of(head, struct linux_kmem_rcu, rcu_head);
103 
104 	uma_zfree(rcu->cache->cache_zone, LINUX_RCU_TO_KMEM(rcu));
105 }
106 
107 struct linux_kmem_cache *
108 linux_kmem_cache_create(const char *name, size_t size, size_t align,
109     unsigned flags, linux_kmem_ctor_t *ctor)
110 {
111 	struct linux_kmem_cache *c;
112 
113 	c = malloc(sizeof(*c), M_KMALLOC, M_WAITOK);
114 
115 	if (flags & SLAB_HWCACHE_ALIGN)
116 		align = UMA_ALIGN_CACHE;
117 	else if (align != 0)
118 		align--;
119 
120 	if (flags & SLAB_TYPESAFE_BY_RCU) {
121 		/* make room for RCU structure */
122 		size = ALIGN(size, sizeof(void *));
123 		size += sizeof(struct linux_kmem_rcu);
124 
125 		/* create cache_zone */
126 		c->cache_zone = uma_zcreate(name, size,
127 		    linux_kmem_ctor, NULL, NULL, NULL,
128 		    align, UMA_ZONE_ZINIT);
129 	} else {
130 		/* make room for async task list items */
131 		size = MAX(size, sizeof(struct llist_node));
132 
133 		/* create cache_zone */
134 		c->cache_zone = uma_zcreate(name, size,
135 		    ctor ? linux_kmem_ctor : NULL, NULL,
136 		    NULL, NULL, align, 0);
137 	}
138 
139 	c->cache_flags = flags;
140 	c->cache_ctor = ctor;
141 	c->cache_size = size;
142 	init_llist_head(&c->cache_items);
143 	TASK_INIT(&c->cache_task, 0, lkpi_kmem_cache_free_async_fn, c);
144 	return (c);
145 }
146 
147 static inline void
148 lkpi_kmem_cache_free_rcu(struct linux_kmem_cache *c, void *m)
149 {
150 	struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, m);
151 
152 	call_rcu(&rcu->rcu_head, linux_kmem_cache_free_rcu_callback);
153 }
154 
155 static inline void
156 lkpi_kmem_cache_free_sync(struct linux_kmem_cache *c, void *m)
157 {
158 	uma_zfree(c->cache_zone, m);
159 }
160 
161 static void
162 lkpi_kmem_cache_free_async_fn(void *context, int pending)
163 {
164 	struct linux_kmem_cache *c = context;
165 	struct llist_node *freed, *next;
166 
167 	llist_for_each_safe(freed, next, llist_del_all(&c->cache_items))
168 		lkpi_kmem_cache_free_sync(c, freed);
169 }
170 
171 static inline void
172 lkpi_kmem_cache_free_async(struct linux_kmem_cache *c, void *m)
173 {
174 	if (m == NULL)
175 		return;
176 
177 	llist_add(m, &c->cache_items);
178 	taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
179 }
180 
181 void
182 lkpi_kmem_cache_free(struct linux_kmem_cache *c, void *m)
183 {
184 	if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU))
185 		lkpi_kmem_cache_free_rcu(c, m);
186 	else if (unlikely(curthread->td_critnest != 0))
187 		lkpi_kmem_cache_free_async(c, m);
188 	else
189 		lkpi_kmem_cache_free_sync(c, m);
190 }
191 
192 void
193 linux_kmem_cache_destroy(struct linux_kmem_cache *c)
194 {
195 	if (c == NULL)
196 		return;
197 
198 	if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
199 		/* make sure all free callbacks have been called */
200 		rcu_barrier();
201 	}
202 
203 	if (!llist_empty(&c->cache_items))
204 		taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
205 	taskqueue_drain(linux_irq_work_tq, &c->cache_task);
206 	uma_zdestroy(c->cache_zone);
207 	free(c, M_KMALLOC);
208 }
209 
210 void *
211 lkpi___kmalloc_node(size_t size, gfp_t flags, int node)
212 {
213 	if (size <= PAGE_SIZE)
214 		return (malloc_domainset(size, M_KMALLOC,
215 		    linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
216 	else
217 		return (contigmalloc_domainset(size, M_KMALLOC,
218 		    linux_get_vm_domain_set(node), linux_check_m_flags(flags),
219 		    0, -1UL, PAGE_SIZE, 0));
220 }
221 
222 void *
223 lkpi___kmalloc(size_t size, gfp_t flags)
224 {
225 	size_t _s;
226 
227 	/* sizeof(struct llist_node) is used for kfree_async(). */
228 	_s = MAX(size, sizeof(struct llist_node));
229 
230 	if (_s <= PAGE_SIZE)
231 		return (malloc(_s, M_KMALLOC, linux_check_m_flags(flags)));
232 	else
233 		return (contigmalloc(_s, M_KMALLOC, linux_check_m_flags(flags),
234 		    0, -1UL, PAGE_SIZE, 0));
235 }
236 
237 void *
238 lkpi_krealloc(void *ptr, size_t size, gfp_t flags)
239 {
240 	void *nptr;
241 	size_t osize;
242 
243 	/*
244 	 * First handle invariants based on function arguments.
245 	 */
246 	if (ptr == NULL)
247 		return (kmalloc(size, flags));
248 
249 	osize = ksize(ptr);
250 	if (size <= osize)
251 		return (ptr);
252 
253 	/*
254 	 * We know the new size > original size.  realloc(9) does not (and cannot)
255 	 * know about our requirements for physically contiguous memory, so we can
256 	 * only call it for sizes up to and including PAGE_SIZE, and otherwise have
257 	 * to replicate its functionality using kmalloc to get the contigmalloc(9)
258 	 * backing.
259 	 */
260 	if (size <= PAGE_SIZE)
261 		return (realloc(ptr, size, M_KMALLOC, linux_check_m_flags(flags)));
262 
263 	nptr = kmalloc(size, flags);
264 	if (nptr == NULL)
265 		return (NULL);
266 
267 	memcpy(nptr, ptr, osize);
268 	kfree(ptr);
269 	return (nptr);
270 }
271 
272 struct lkpi_kmalloc_ctx {
273 	size_t size;
274 	gfp_t flags;
275 	void *addr;
276 };
277 
278 static void
279 lkpi_kmalloc_cb(void *ctx)
280 {
281 	struct lkpi_kmalloc_ctx *lmc = ctx;
282 
283 	lmc->addr = __kmalloc(lmc->size, lmc->flags);
284 }
285 
286 void *
287 lkpi_kmalloc(size_t size, gfp_t flags)
288 {
289 	struct lkpi_kmalloc_ctx lmc = { .size = size, .flags = flags };
290 
291 	lkpi_fpu_safe_exec(&lkpi_kmalloc_cb, &lmc);
292 	return(lmc.addr);
293 }
294 
295 static void
296 linux_kfree_async_fn(void *context, int pending)
297 {
298 	struct llist_node *freed;
299 
300 	while((freed = llist_del_first(&linux_kfree_async_list)) != NULL)
301 		kfree(freed);
302 }
303 static struct task linux_kfree_async_task =
304     TASK_INITIALIZER(0, linux_kfree_async_fn, &linux_kfree_async_task);
305 
306 void
307 linux_kfree_async(void *addr)
308 {
309 	if (addr == NULL)
310 		return;
311 	llist_add(addr, &linux_kfree_async_list);
312 	taskqueue_enqueue(linux_irq_work_tq, &linux_kfree_async_task);
313 }
314