xref: /linux/mm/mempool.c (revision 954ea91fb68b771dba6d87cfa61b68e09cc2497f)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *  linux/mm/mempool.c
4   *
5   *  memory buffer pool support. Such pools are mostly used
6   *  for guaranteed, deadlock-free memory allocations during
7   *  extreme VM load.
8   *
9   *  started by Ingo Molnar, Copyright (C) 2001
10   *  debugging by David Rientjes, Copyright (C) 2015
11   */
12  
13  #include <linux/mm.h>
14  #include <linux/slab.h>
15  #include <linux/highmem.h>
16  #include <linux/kasan.h>
17  #include <linux/kmemleak.h>
18  #include <linux/export.h>
19  #include <linux/mempool.h>
20  #include <linux/writeback.h>
21  #include "slab.h"
22  
23  #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
24  static void poison_error(mempool_t *pool, void *element, size_t size,
25  			 size_t byte)
26  {
27  	const int nr = pool->curr_nr;
28  	const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
29  	const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
30  	int i;
31  
32  	pr_err("BUG: mempool element poison mismatch\n");
33  	pr_err("Mempool %p size %zu\n", pool, size);
34  	pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
35  	for (i = start; i < end; i++)
36  		pr_cont("%x ", *(u8 *)(element + i));
37  	pr_cont("%s\n", end < size ? "..." : "");
38  	dump_stack();
39  }
40  
41  static void __check_element(mempool_t *pool, void *element, size_t size)
42  {
43  	u8 *obj = element;
44  	size_t i;
45  
46  	for (i = 0; i < size; i++) {
47  		u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
48  
49  		if (obj[i] != exp) {
50  			poison_error(pool, element, size, i);
51  			return;
52  		}
53  	}
54  	memset(obj, POISON_INUSE, size);
55  }
56  
57  static void check_element(mempool_t *pool, void *element)
58  {
59  	/* Mempools backed by slab allocator */
60  	if (pool->free == mempool_kfree) {
61  		__check_element(pool, element, (size_t)pool->pool_data);
62  	} else if (pool->free == mempool_free_slab) {
63  		__check_element(pool, element, kmem_cache_size(pool->pool_data));
64  	} else if (pool->free == mempool_free_pages) {
65  		/* Mempools backed by page allocator */
66  		int order = (int)(long)pool->pool_data;
67  		void *addr = kmap_atomic((struct page *)element);
68  
69  		__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
70  		kunmap_atomic(addr);
71  	}
72  }
73  
74  static void __poison_element(void *element, size_t size)
75  {
76  	u8 *obj = element;
77  
78  	memset(obj, POISON_FREE, size - 1);
79  	obj[size - 1] = POISON_END;
80  }
81  
82  static void poison_element(mempool_t *pool, void *element)
83  {
84  	/* Mempools backed by slab allocator */
85  	if (pool->alloc == mempool_kmalloc) {
86  		__poison_element(element, (size_t)pool->pool_data);
87  	} else if (pool->alloc == mempool_alloc_slab) {
88  		__poison_element(element, kmem_cache_size(pool->pool_data));
89  	} else if (pool->alloc == mempool_alloc_pages) {
90  		/* Mempools backed by page allocator */
91  		int order = (int)(long)pool->pool_data;
92  		void *addr = kmap_atomic((struct page *)element);
93  
94  		__poison_element(addr, 1UL << (PAGE_SHIFT + order));
95  		kunmap_atomic(addr);
96  	}
97  }
98  #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
99  static inline void check_element(mempool_t *pool, void *element)
100  {
101  }
102  static inline void poison_element(mempool_t *pool, void *element)
103  {
104  }
105  #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
106  
107  static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
108  {
109  	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
110  		kasan_slab_free_mempool(element);
111  	else if (pool->alloc == mempool_alloc_pages)
112  		kasan_poison_pages(element, (unsigned long)pool->pool_data,
113  				   false);
114  }
115  
116  static void kasan_unpoison_element(mempool_t *pool, void *element)
117  {
118  	if (pool->alloc == mempool_kmalloc)
119  		kasan_unpoison_range(element, (size_t)pool->pool_data);
120  	else if (pool->alloc == mempool_alloc_slab)
121  		kasan_unpoison_range(element, kmem_cache_size(pool->pool_data));
122  	else if (pool->alloc == mempool_alloc_pages)
123  		kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
124  				     false);
125  }
126  
127  static __always_inline void add_element(mempool_t *pool, void *element)
128  {
129  	BUG_ON(pool->curr_nr >= pool->min_nr);
130  	poison_element(pool, element);
131  	kasan_poison_element(pool, element);
132  	pool->elements[pool->curr_nr++] = element;
133  }
134  
135  static void *remove_element(mempool_t *pool)
136  {
137  	void *element = pool->elements[--pool->curr_nr];
138  
139  	BUG_ON(pool->curr_nr < 0);
140  	kasan_unpoison_element(pool, element);
141  	check_element(pool, element);
142  	return element;
143  }
144  
145  /**
146   * mempool_exit - exit a mempool initialized with mempool_init()
147   * @pool:      pointer to the memory pool which was initialized with
148   *             mempool_init().
149   *
150   * Free all reserved elements in @pool and @pool itself.  This function
151   * only sleeps if the free_fn() function sleeps.
152   *
153   * May be called on a zeroed but uninitialized mempool (i.e. allocated with
154   * kzalloc()).
155   */
156  void mempool_exit(mempool_t *pool)
157  {
158  	while (pool->curr_nr) {
159  		void *element = remove_element(pool);
160  		pool->free(element, pool->pool_data);
161  	}
162  	kfree(pool->elements);
163  	pool->elements = NULL;
164  }
165  EXPORT_SYMBOL(mempool_exit);
166  
167  /**
168   * mempool_destroy - deallocate a memory pool
169   * @pool:      pointer to the memory pool which was allocated via
170   *             mempool_create().
171   *
172   * Free all reserved elements in @pool and @pool itself.  This function
173   * only sleeps if the free_fn() function sleeps.
174   */
175  void mempool_destroy(mempool_t *pool)
176  {
177  	if (unlikely(!pool))
178  		return;
179  
180  	mempool_exit(pool);
181  	kfree(pool);
182  }
183  EXPORT_SYMBOL(mempool_destroy);
184  
185  int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
186  		      mempool_free_t *free_fn, void *pool_data,
187  		      gfp_t gfp_mask, int node_id)
188  {
189  	spin_lock_init(&pool->lock);
190  	pool->min_nr	= min_nr;
191  	pool->pool_data = pool_data;
192  	pool->alloc	= alloc_fn;
193  	pool->free	= free_fn;
194  	init_waitqueue_head(&pool->wait);
195  
196  	pool->elements = kmalloc_array_node(min_nr, sizeof(void *),
197  					    gfp_mask, node_id);
198  	if (!pool->elements)
199  		return -ENOMEM;
200  
201  	/*
202  	 * First pre-allocate the guaranteed number of buffers.
203  	 */
204  	while (pool->curr_nr < pool->min_nr) {
205  		void *element;
206  
207  		element = pool->alloc(gfp_mask, pool->pool_data);
208  		if (unlikely(!element)) {
209  			mempool_exit(pool);
210  			return -ENOMEM;
211  		}
212  		add_element(pool, element);
213  	}
214  
215  	return 0;
216  }
217  EXPORT_SYMBOL(mempool_init_node);
218  
219  /**
220   * mempool_init - initialize a memory pool
221   * @pool:      pointer to the memory pool that should be initialized
222   * @min_nr:    the minimum number of elements guaranteed to be
223   *             allocated for this pool.
224   * @alloc_fn:  user-defined element-allocation function.
225   * @free_fn:   user-defined element-freeing function.
226   * @pool_data: optional private data available to the user-defined functions.
227   *
228   * Like mempool_create(), but initializes the pool in (i.e. embedded in another
229   * structure).
230   *
231   * Return: %0 on success, negative error code otherwise.
232   */
233  int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
234  		 mempool_free_t *free_fn, void *pool_data)
235  {
236  	return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
237  				 pool_data, GFP_KERNEL, NUMA_NO_NODE);
238  
239  }
240  EXPORT_SYMBOL(mempool_init);
241  
242  /**
243   * mempool_create - create a memory pool
244   * @min_nr:    the minimum number of elements guaranteed to be
245   *             allocated for this pool.
246   * @alloc_fn:  user-defined element-allocation function.
247   * @free_fn:   user-defined element-freeing function.
248   * @pool_data: optional private data available to the user-defined functions.
249   *
250   * this function creates and allocates a guaranteed size, preallocated
251   * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
252   * functions. This function might sleep. Both the alloc_fn() and the free_fn()
253   * functions might sleep - as long as the mempool_alloc() function is not called
254   * from IRQ contexts.
255   *
256   * Return: pointer to the created memory pool object or %NULL on error.
257   */
258  mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
259  				mempool_free_t *free_fn, void *pool_data)
260  {
261  	return mempool_create_node(min_nr, alloc_fn, free_fn, pool_data,
262  				   GFP_KERNEL, NUMA_NO_NODE);
263  }
264  EXPORT_SYMBOL(mempool_create);
265  
266  mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
267  			       mempool_free_t *free_fn, void *pool_data,
268  			       gfp_t gfp_mask, int node_id)
269  {
270  	mempool_t *pool;
271  
272  	pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
273  	if (!pool)
274  		return NULL;
275  
276  	if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data,
277  			      gfp_mask, node_id)) {
278  		kfree(pool);
279  		return NULL;
280  	}
281  
282  	return pool;
283  }
284  EXPORT_SYMBOL(mempool_create_node);
285  
286  /**
287   * mempool_resize - resize an existing memory pool
288   * @pool:       pointer to the memory pool which was allocated via
289   *              mempool_create().
290   * @new_min_nr: the new minimum number of elements guaranteed to be
291   *              allocated for this pool.
292   *
293   * This function shrinks/grows the pool. In the case of growing,
294   * it cannot be guaranteed that the pool will be grown to the new
295   * size immediately, but new mempool_free() calls will refill it.
296   * This function may sleep.
297   *
298   * Note, the caller must guarantee that no mempool_destroy is called
299   * while this function is running. mempool_alloc() & mempool_free()
300   * might be called (eg. from IRQ contexts) while this function executes.
301   *
302   * Return: %0 on success, negative error code otherwise.
303   */
304  int mempool_resize(mempool_t *pool, int new_min_nr)
305  {
306  	void *element;
307  	void **new_elements;
308  	unsigned long flags;
309  
310  	BUG_ON(new_min_nr <= 0);
311  	might_sleep();
312  
313  	spin_lock_irqsave(&pool->lock, flags);
314  	if (new_min_nr <= pool->min_nr) {
315  		while (new_min_nr < pool->curr_nr) {
316  			element = remove_element(pool);
317  			spin_unlock_irqrestore(&pool->lock, flags);
318  			pool->free(element, pool->pool_data);
319  			spin_lock_irqsave(&pool->lock, flags);
320  		}
321  		pool->min_nr = new_min_nr;
322  		goto out_unlock;
323  	}
324  	spin_unlock_irqrestore(&pool->lock, flags);
325  
326  	/* Grow the pool */
327  	new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
328  				     GFP_KERNEL);
329  	if (!new_elements)
330  		return -ENOMEM;
331  
332  	spin_lock_irqsave(&pool->lock, flags);
333  	if (unlikely(new_min_nr <= pool->min_nr)) {
334  		/* Raced, other resize will do our work */
335  		spin_unlock_irqrestore(&pool->lock, flags);
336  		kfree(new_elements);
337  		goto out;
338  	}
339  	memcpy(new_elements, pool->elements,
340  			pool->curr_nr * sizeof(*new_elements));
341  	kfree(pool->elements);
342  	pool->elements = new_elements;
343  	pool->min_nr = new_min_nr;
344  
345  	while (pool->curr_nr < pool->min_nr) {
346  		spin_unlock_irqrestore(&pool->lock, flags);
347  		element = pool->alloc(GFP_KERNEL, pool->pool_data);
348  		if (!element)
349  			goto out;
350  		spin_lock_irqsave(&pool->lock, flags);
351  		if (pool->curr_nr < pool->min_nr) {
352  			add_element(pool, element);
353  		} else {
354  			spin_unlock_irqrestore(&pool->lock, flags);
355  			pool->free(element, pool->pool_data);	/* Raced */
356  			goto out;
357  		}
358  	}
359  out_unlock:
360  	spin_unlock_irqrestore(&pool->lock, flags);
361  out:
362  	return 0;
363  }
364  EXPORT_SYMBOL(mempool_resize);
365  
366  /**
367   * mempool_alloc - allocate an element from a specific memory pool
368   * @pool:      pointer to the memory pool which was allocated via
369   *             mempool_create().
370   * @gfp_mask:  the usual allocation bitmask.
371   *
372   * this function only sleeps if the alloc_fn() function sleeps or
373   * returns NULL. Note that due to preallocation, this function
374   * *never* fails when called from process contexts. (it might
375   * fail if called from an IRQ context.)
376   * Note: using __GFP_ZERO is not supported.
377   *
378   * Return: pointer to the allocated element or %NULL on error.
379   */
380  void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
381  {
382  	void *element;
383  	unsigned long flags;
384  	wait_queue_entry_t wait;
385  	gfp_t gfp_temp;
386  
387  	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
388  	might_alloc(gfp_mask);
389  
390  	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
391  	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
392  	gfp_mask |= __GFP_NOWARN;	/* failures are OK */
393  
394  	gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
395  
396  repeat_alloc:
397  
398  	element = pool->alloc(gfp_temp, pool->pool_data);
399  	if (likely(element != NULL))
400  		return element;
401  
402  	spin_lock_irqsave(&pool->lock, flags);
403  	if (likely(pool->curr_nr)) {
404  		element = remove_element(pool);
405  		spin_unlock_irqrestore(&pool->lock, flags);
406  		/* paired with rmb in mempool_free(), read comment there */
407  		smp_wmb();
408  		/*
409  		 * Update the allocation stack trace as this is more useful
410  		 * for debugging.
411  		 */
412  		kmemleak_update_trace(element);
413  		return element;
414  	}
415  
416  	/*
417  	 * We use gfp mask w/o direct reclaim or IO for the first round.  If
418  	 * alloc failed with that and @pool was empty, retry immediately.
419  	 */
420  	if (gfp_temp != gfp_mask) {
421  		spin_unlock_irqrestore(&pool->lock, flags);
422  		gfp_temp = gfp_mask;
423  		goto repeat_alloc;
424  	}
425  
426  	/* We must not sleep if !__GFP_DIRECT_RECLAIM */
427  	if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
428  		spin_unlock_irqrestore(&pool->lock, flags);
429  		return NULL;
430  	}
431  
432  	/* Let's wait for someone else to return an element to @pool */
433  	init_wait(&wait);
434  	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
435  
436  	spin_unlock_irqrestore(&pool->lock, flags);
437  
438  	/*
439  	 * FIXME: this should be io_schedule().  The timeout is there as a
440  	 * workaround for some DM problems in 2.6.18.
441  	 */
442  	io_schedule_timeout(5*HZ);
443  
444  	finish_wait(&pool->wait, &wait);
445  	goto repeat_alloc;
446  }
447  EXPORT_SYMBOL(mempool_alloc);
448  
449  /**
450   * mempool_free - return an element to the pool.
451   * @element:   pool element pointer.
452   * @pool:      pointer to the memory pool which was allocated via
453   *             mempool_create().
454   *
455   * this function only sleeps if the free_fn() function sleeps.
456   */
457  void mempool_free(void *element, mempool_t *pool)
458  {
459  	unsigned long flags;
460  
461  	if (unlikely(element == NULL))
462  		return;
463  
464  	/*
465  	 * Paired with the wmb in mempool_alloc().  The preceding read is
466  	 * for @element and the following @pool->curr_nr.  This ensures
467  	 * that the visible value of @pool->curr_nr is from after the
468  	 * allocation of @element.  This is necessary for fringe cases
469  	 * where @element was passed to this task without going through
470  	 * barriers.
471  	 *
472  	 * For example, assume @p is %NULL at the beginning and one task
473  	 * performs "p = mempool_alloc(...);" while another task is doing
474  	 * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
475  	 * may end up using curr_nr value which is from before allocation
476  	 * of @p without the following rmb.
477  	 */
478  	smp_rmb();
479  
480  	/*
481  	 * For correctness, we need a test which is guaranteed to trigger
482  	 * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
483  	 * without locking achieves that and refilling as soon as possible
484  	 * is desirable.
485  	 *
486  	 * Because curr_nr visible here is always a value after the
487  	 * allocation of @element, any task which decremented curr_nr below
488  	 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
489  	 * incremented to min_nr afterwards.  If curr_nr gets incremented
490  	 * to min_nr after the allocation of @element, the elements
491  	 * allocated after that are subject to the same guarantee.
492  	 *
493  	 * Waiters happen iff curr_nr is 0 and the above guarantee also
494  	 * ensures that there will be frees which return elements to the
495  	 * pool waking up the waiters.
496  	 */
497  	if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) {
498  		spin_lock_irqsave(&pool->lock, flags);
499  		if (likely(pool->curr_nr < pool->min_nr)) {
500  			add_element(pool, element);
501  			spin_unlock_irqrestore(&pool->lock, flags);
502  			wake_up(&pool->wait);
503  			return;
504  		}
505  		spin_unlock_irqrestore(&pool->lock, flags);
506  	}
507  	pool->free(element, pool->pool_data);
508  }
509  EXPORT_SYMBOL(mempool_free);
510  
511  /*
512   * A commonly used alloc and free fn.
513   */
514  void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
515  {
516  	struct kmem_cache *mem = pool_data;
517  	VM_BUG_ON(mem->ctor);
518  	return kmem_cache_alloc(mem, gfp_mask);
519  }
520  EXPORT_SYMBOL(mempool_alloc_slab);
521  
522  void mempool_free_slab(void *element, void *pool_data)
523  {
524  	struct kmem_cache *mem = pool_data;
525  	kmem_cache_free(mem, element);
526  }
527  EXPORT_SYMBOL(mempool_free_slab);
528  
529  /*
530   * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
531   * specified by pool_data
532   */
533  void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
534  {
535  	size_t size = (size_t)pool_data;
536  	return kmalloc(size, gfp_mask);
537  }
538  EXPORT_SYMBOL(mempool_kmalloc);
539  
540  void mempool_kfree(void *element, void *pool_data)
541  {
542  	kfree(element);
543  }
544  EXPORT_SYMBOL(mempool_kfree);
545  
546  /*
547   * A simple mempool-backed page allocator that allocates pages
548   * of the order specified by pool_data.
549   */
550  void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
551  {
552  	int order = (int)(long)pool_data;
553  	return alloc_pages(gfp_mask, order);
554  }
555  EXPORT_SYMBOL(mempool_alloc_pages);
556  
557  void mempool_free_pages(void *element, void *pool_data)
558  {
559  	int order = (int)(long)pool_data;
560  	__free_pages(element, order);
561  }
562  EXPORT_SYMBOL(mempool_free_pages);
563