xref: /linux/mm/mempool.c (revision 3821a065f5672c430a088ae68b4da2a2d2b34106)
1 /*
2  *  linux/mm/mempool.c
3  *
4  *  memory buffer pool support. Such pools are mostly used
5  *  for guaranteed, deadlock-free memory allocations during
6  *  extreme VM load.
7  *
8  *  started by Ingo Molnar, Copyright (C) 2001
9  *  debugging by David Rientjes, Copyright (C) 2015
10  */
11 
12 #include <linux/mm.h>
13 #include <linux/slab.h>
14 #include <linux/highmem.h>
15 #include <linux/kasan.h>
16 #include <linux/kmemleak.h>
17 #include <linux/export.h>
18 #include <linux/mempool.h>
19 #include <linux/blkdev.h>
20 #include <linux/writeback.h>
21 #include "slab.h"
22 
23 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
24 static void poison_error(mempool_t *pool, void *element, size_t size,
25 			 size_t byte)
26 {
27 	const int nr = pool->curr_nr;
28 	const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
29 	const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
30 	int i;
31 
32 	pr_err("BUG: mempool element poison mismatch\n");
33 	pr_err("Mempool %p size %zu\n", pool, size);
34 	pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
35 	for (i = start; i < end; i++)
36 		pr_cont("%x ", *(u8 *)(element + i));
37 	pr_cont("%s\n", end < size ? "..." : "");
38 	dump_stack();
39 }
40 
41 static void __check_element(mempool_t *pool, void *element, size_t size)
42 {
43 	u8 *obj = element;
44 	size_t i;
45 
46 	for (i = 0; i < size; i++) {
47 		u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
48 
49 		if (obj[i] != exp) {
50 			poison_error(pool, element, size, i);
51 			return;
52 		}
53 	}
54 	memset(obj, POISON_INUSE, size);
55 }
56 
57 static void check_element(mempool_t *pool, void *element)
58 {
59 	/* Mempools backed by slab allocator */
60 	if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
61 		__check_element(pool, element, ksize(element));
62 
63 	/* Mempools backed by page allocator */
64 	if (pool->free == mempool_free_pages) {
65 		int order = (int)(long)pool->pool_data;
66 		void *addr = kmap_atomic((struct page *)element);
67 
68 		__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
69 		kunmap_atomic(addr);
70 	}
71 }
72 
73 static void __poison_element(void *element, size_t size)
74 {
75 	u8 *obj = element;
76 
77 	memset(obj, POISON_FREE, size - 1);
78 	obj[size - 1] = POISON_END;
79 }
80 
81 static void poison_element(mempool_t *pool, void *element)
82 {
83 	/* Mempools backed by slab allocator */
84 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
85 		__poison_element(element, ksize(element));
86 
87 	/* Mempools backed by page allocator */
88 	if (pool->alloc == mempool_alloc_pages) {
89 		int order = (int)(long)pool->pool_data;
90 		void *addr = kmap_atomic((struct page *)element);
91 
92 		__poison_element(addr, 1UL << (PAGE_SHIFT + order));
93 		kunmap_atomic(addr);
94 	}
95 }
96 #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
97 static inline void check_element(mempool_t *pool, void *element)
98 {
99 }
100 static inline void poison_element(mempool_t *pool, void *element)
101 {
102 }
103 #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
104 
105 static void kasan_poison_element(mempool_t *pool, void *element)
106 {
107 	if (pool->alloc == mempool_alloc_slab)
108 		kasan_slab_free(pool->pool_data, element);
109 	if (pool->alloc == mempool_kmalloc)
110 		kasan_kfree(element);
111 	if (pool->alloc == mempool_alloc_pages)
112 		kasan_free_pages(element, (unsigned long)pool->pool_data);
113 }
114 
115 static void kasan_unpoison_element(mempool_t *pool, void *element)
116 {
117 	if (pool->alloc == mempool_alloc_slab)
118 		kasan_slab_alloc(pool->pool_data, element);
119 	if (pool->alloc == mempool_kmalloc)
120 		kasan_krealloc(element, (size_t)pool->pool_data);
121 	if (pool->alloc == mempool_alloc_pages)
122 		kasan_alloc_pages(element, (unsigned long)pool->pool_data);
123 }
124 
125 static void add_element(mempool_t *pool, void *element)
126 {
127 	BUG_ON(pool->curr_nr >= pool->min_nr);
128 	poison_element(pool, element);
129 	kasan_poison_element(pool, element);
130 	pool->elements[pool->curr_nr++] = element;
131 }
132 
133 static void *remove_element(mempool_t *pool)
134 {
135 	void *element = pool->elements[--pool->curr_nr];
136 
137 	BUG_ON(pool->curr_nr < 0);
138 	check_element(pool, element);
139 	kasan_unpoison_element(pool, element);
140 	return element;
141 }
142 
143 /**
144  * mempool_destroy - deallocate a memory pool
145  * @pool:      pointer to the memory pool which was allocated via
146  *             mempool_create().
147  *
148  * Free all reserved elements in @pool and @pool itself.  This function
149  * only sleeps if the free_fn() function sleeps.
150  */
151 void mempool_destroy(mempool_t *pool)
152 {
153 	if (unlikely(!pool))
154 		return;
155 
156 	while (pool->curr_nr) {
157 		void *element = remove_element(pool);
158 		pool->free(element, pool->pool_data);
159 	}
160 	kfree(pool->elements);
161 	kfree(pool);
162 }
163 EXPORT_SYMBOL(mempool_destroy);
164 
165 /**
166  * mempool_create - create a memory pool
167  * @min_nr:    the minimum number of elements guaranteed to be
168  *             allocated for this pool.
169  * @alloc_fn:  user-defined element-allocation function.
170  * @free_fn:   user-defined element-freeing function.
171  * @pool_data: optional private data available to the user-defined functions.
172  *
173  * this function creates and allocates a guaranteed size, preallocated
174  * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
175  * functions. This function might sleep. Both the alloc_fn() and the free_fn()
176  * functions might sleep - as long as the mempool_alloc() function is not called
177  * from IRQ contexts.
178  */
179 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
180 				mempool_free_t *free_fn, void *pool_data)
181 {
182 	return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
183 				   GFP_KERNEL, NUMA_NO_NODE);
184 }
185 EXPORT_SYMBOL(mempool_create);
186 
187 mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
188 			       mempool_free_t *free_fn, void *pool_data,
189 			       gfp_t gfp_mask, int node_id)
190 {
191 	mempool_t *pool;
192 	pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
193 	if (!pool)
194 		return NULL;
195 	pool->elements = kmalloc_node(min_nr * sizeof(void *),
196 				      gfp_mask, node_id);
197 	if (!pool->elements) {
198 		kfree(pool);
199 		return NULL;
200 	}
201 	spin_lock_init(&pool->lock);
202 	pool->min_nr = min_nr;
203 	pool->pool_data = pool_data;
204 	init_waitqueue_head(&pool->wait);
205 	pool->alloc = alloc_fn;
206 	pool->free = free_fn;
207 
208 	/*
209 	 * First pre-allocate the guaranteed number of buffers.
210 	 */
211 	while (pool->curr_nr < pool->min_nr) {
212 		void *element;
213 
214 		element = pool->alloc(gfp_mask, pool->pool_data);
215 		if (unlikely(!element)) {
216 			mempool_destroy(pool);
217 			return NULL;
218 		}
219 		add_element(pool, element);
220 	}
221 	return pool;
222 }
223 EXPORT_SYMBOL(mempool_create_node);
224 
225 /**
226  * mempool_resize - resize an existing memory pool
227  * @pool:       pointer to the memory pool which was allocated via
228  *              mempool_create().
229  * @new_min_nr: the new minimum number of elements guaranteed to be
230  *              allocated for this pool.
231  *
232  * This function shrinks/grows the pool. In the case of growing,
233  * it cannot be guaranteed that the pool will be grown to the new
234  * size immediately, but new mempool_free() calls will refill it.
235  * This function may sleep.
236  *
237  * Note, the caller must guarantee that no mempool_destroy is called
238  * while this function is running. mempool_alloc() & mempool_free()
239  * might be called (eg. from IRQ contexts) while this function executes.
240  */
241 int mempool_resize(mempool_t *pool, int new_min_nr)
242 {
243 	void *element;
244 	void **new_elements;
245 	unsigned long flags;
246 
247 	BUG_ON(new_min_nr <= 0);
248 	might_sleep();
249 
250 	spin_lock_irqsave(&pool->lock, flags);
251 	if (new_min_nr <= pool->min_nr) {
252 		while (new_min_nr < pool->curr_nr) {
253 			element = remove_element(pool);
254 			spin_unlock_irqrestore(&pool->lock, flags);
255 			pool->free(element, pool->pool_data);
256 			spin_lock_irqsave(&pool->lock, flags);
257 		}
258 		pool->min_nr = new_min_nr;
259 		goto out_unlock;
260 	}
261 	spin_unlock_irqrestore(&pool->lock, flags);
262 
263 	/* Grow the pool */
264 	new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
265 				     GFP_KERNEL);
266 	if (!new_elements)
267 		return -ENOMEM;
268 
269 	spin_lock_irqsave(&pool->lock, flags);
270 	if (unlikely(new_min_nr <= pool->min_nr)) {
271 		/* Raced, other resize will do our work */
272 		spin_unlock_irqrestore(&pool->lock, flags);
273 		kfree(new_elements);
274 		goto out;
275 	}
276 	memcpy(new_elements, pool->elements,
277 			pool->curr_nr * sizeof(*new_elements));
278 	kfree(pool->elements);
279 	pool->elements = new_elements;
280 	pool->min_nr = new_min_nr;
281 
282 	while (pool->curr_nr < pool->min_nr) {
283 		spin_unlock_irqrestore(&pool->lock, flags);
284 		element = pool->alloc(GFP_KERNEL, pool->pool_data);
285 		if (!element)
286 			goto out;
287 		spin_lock_irqsave(&pool->lock, flags);
288 		if (pool->curr_nr < pool->min_nr) {
289 			add_element(pool, element);
290 		} else {
291 			spin_unlock_irqrestore(&pool->lock, flags);
292 			pool->free(element, pool->pool_data);	/* Raced */
293 			goto out;
294 		}
295 	}
296 out_unlock:
297 	spin_unlock_irqrestore(&pool->lock, flags);
298 out:
299 	return 0;
300 }
301 EXPORT_SYMBOL(mempool_resize);
302 
303 /**
304  * mempool_alloc - allocate an element from a specific memory pool
305  * @pool:      pointer to the memory pool which was allocated via
306  *             mempool_create().
307  * @gfp_mask:  the usual allocation bitmask.
308  *
309  * this function only sleeps if the alloc_fn() function sleeps or
310  * returns NULL. Note that due to preallocation, this function
311  * *never* fails when called from process contexts. (it might
312  * fail if called from an IRQ context.)
313  * Note: using __GFP_ZERO is not supported.
314  */
315 void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
316 {
317 	void *element;
318 	unsigned long flags;
319 	wait_queue_t wait;
320 	gfp_t gfp_temp;
321 
322 	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
323 	might_sleep_if(gfp_mask & __GFP_WAIT);
324 
325 	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
326 	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
327 	gfp_mask |= __GFP_NOWARN;	/* failures are OK */
328 
329 	gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);
330 
331 repeat_alloc:
332 
333 	element = pool->alloc(gfp_temp, pool->pool_data);
334 	if (likely(element != NULL))
335 		return element;
336 
337 	spin_lock_irqsave(&pool->lock, flags);
338 	if (likely(pool->curr_nr)) {
339 		element = remove_element(pool);
340 		spin_unlock_irqrestore(&pool->lock, flags);
341 		/* paired with rmb in mempool_free(), read comment there */
342 		smp_wmb();
343 		/*
344 		 * Update the allocation stack trace as this is more useful
345 		 * for debugging.
346 		 */
347 		kmemleak_update_trace(element);
348 		return element;
349 	}
350 
351 	/*
352 	 * We use gfp mask w/o __GFP_WAIT or IO for the first round.  If
353 	 * alloc failed with that and @pool was empty, retry immediately.
354 	 */
355 	if (gfp_temp != gfp_mask) {
356 		spin_unlock_irqrestore(&pool->lock, flags);
357 		gfp_temp = gfp_mask;
358 		goto repeat_alloc;
359 	}
360 
361 	/* We must not sleep if !__GFP_WAIT */
362 	if (!(gfp_mask & __GFP_WAIT)) {
363 		spin_unlock_irqrestore(&pool->lock, flags);
364 		return NULL;
365 	}
366 
367 	/* Let's wait for someone else to return an element to @pool */
368 	init_wait(&wait);
369 	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
370 
371 	spin_unlock_irqrestore(&pool->lock, flags);
372 
373 	/*
374 	 * FIXME: this should be io_schedule().  The timeout is there as a
375 	 * workaround for some DM problems in 2.6.18.
376 	 */
377 	io_schedule_timeout(5*HZ);
378 
379 	finish_wait(&pool->wait, &wait);
380 	goto repeat_alloc;
381 }
382 EXPORT_SYMBOL(mempool_alloc);
383 
384 /**
385  * mempool_free - return an element to the pool.
386  * @element:   pool element pointer.
387  * @pool:      pointer to the memory pool which was allocated via
388  *             mempool_create().
389  *
390  * this function only sleeps if the free_fn() function sleeps.
391  */
392 void mempool_free(void *element, mempool_t *pool)
393 {
394 	unsigned long flags;
395 
396 	if (unlikely(element == NULL))
397 		return;
398 
399 	/*
400 	 * Paired with the wmb in mempool_alloc().  The preceding read is
401 	 * for @element and the following @pool->curr_nr.  This ensures
402 	 * that the visible value of @pool->curr_nr is from after the
403 	 * allocation of @element.  This is necessary for fringe cases
404 	 * where @element was passed to this task without going through
405 	 * barriers.
406 	 *
407 	 * For example, assume @p is %NULL at the beginning and one task
408 	 * performs "p = mempool_alloc(...);" while another task is doing
409 	 * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
410 	 * may end up using curr_nr value which is from before allocation
411 	 * of @p without the following rmb.
412 	 */
413 	smp_rmb();
414 
415 	/*
416 	 * For correctness, we need a test which is guaranteed to trigger
417 	 * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
418 	 * without locking achieves that and refilling as soon as possible
419 	 * is desirable.
420 	 *
421 	 * Because curr_nr visible here is always a value after the
422 	 * allocation of @element, any task which decremented curr_nr below
423 	 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
424 	 * incremented to min_nr afterwards.  If curr_nr gets incremented
425 	 * to min_nr after the allocation of @element, the elements
426 	 * allocated after that are subject to the same guarantee.
427 	 *
428 	 * Waiters happen iff curr_nr is 0 and the above guarantee also
429 	 * ensures that there will be frees which return elements to the
430 	 * pool waking up the waiters.
431 	 */
432 	if (unlikely(pool->curr_nr < pool->min_nr)) {
433 		spin_lock_irqsave(&pool->lock, flags);
434 		if (likely(pool->curr_nr < pool->min_nr)) {
435 			add_element(pool, element);
436 			spin_unlock_irqrestore(&pool->lock, flags);
437 			wake_up(&pool->wait);
438 			return;
439 		}
440 		spin_unlock_irqrestore(&pool->lock, flags);
441 	}
442 	pool->free(element, pool->pool_data);
443 }
444 EXPORT_SYMBOL(mempool_free);
445 
446 /*
447  * A commonly used alloc and free fn.
448  */
449 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
450 {
451 	struct kmem_cache *mem = pool_data;
452 	VM_BUG_ON(mem->ctor);
453 	return kmem_cache_alloc(mem, gfp_mask);
454 }
455 EXPORT_SYMBOL(mempool_alloc_slab);
456 
457 void mempool_free_slab(void *element, void *pool_data)
458 {
459 	struct kmem_cache *mem = pool_data;
460 	kmem_cache_free(mem, element);
461 }
462 EXPORT_SYMBOL(mempool_free_slab);
463 
464 /*
465  * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
466  * specified by pool_data
467  */
468 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
469 {
470 	size_t size = (size_t)pool_data;
471 	return kmalloc(size, gfp_mask);
472 }
473 EXPORT_SYMBOL(mempool_kmalloc);
474 
475 void mempool_kfree(void *element, void *pool_data)
476 {
477 	kfree(element);
478 }
479 EXPORT_SYMBOL(mempool_kfree);
480 
481 /*
482  * A simple mempool-backed page allocator that allocates pages
483  * of the order specified by pool_data.
484  */
485 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
486 {
487 	int order = (int)(long)pool_data;
488 	return alloc_pages(gfp_mask, order);
489 }
490 EXPORT_SYMBOL(mempool_alloc_pages);
491 
492 void mempool_free_pages(void *element, void *pool_data)
493 {
494 	int order = (int)(long)pool_data;
495 	__free_pages(element, order);
496 }
497 EXPORT_SYMBOL(mempool_free_pages);
498