xref: /linux/mm/cma.c (revision 340e983ab8afd02b59d698dd1365d7773bf136b3)
1  /*
2   * Contiguous Memory Allocator
3   *
4   * Copyright (c) 2010-2011 by Samsung Electronics.
5   * Copyright IBM Corporation, 2013
6   * Copyright LG Electronics Inc., 2014
7   * Written by:
8   *	Marek Szyprowski <m.szyprowski@samsung.com>
9   *	Michal Nazarewicz <mina86@mina86.com>
10   *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11   *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
12   *
13   * This program is free software; you can redistribute it and/or
14   * modify it under the terms of the GNU General Public License as
15   * published by the Free Software Foundation; either version 2 of the
16   * License or (at your optional) any later version of the license.
17   */
18  
19  #define pr_fmt(fmt) "cma: " fmt
20  
21  #ifdef CONFIG_CMA_DEBUG
22  #ifndef DEBUG
23  #  define DEBUG
24  #endif
25  #endif
26  #define CREATE_TRACE_POINTS
27  
28  #include <linux/memblock.h>
29  #include <linux/err.h>
30  #include <linux/mm.h>
31  #include <linux/mutex.h>
32  #include <linux/sizes.h>
33  #include <linux/slab.h>
34  #include <linux/log2.h>
35  #include <linux/cma.h>
36  #include <linux/highmem.h>
37  #include <linux/io.h>
38  #include <trace/events/cma.h>
39  
40  #include "cma.h"
41  
42  struct cma cma_areas[MAX_CMA_AREAS];
43  unsigned cma_area_count;
44  static DEFINE_MUTEX(cma_mutex);
45  
46  phys_addr_t cma_get_base(const struct cma *cma)
47  {
48  	return PFN_PHYS(cma->base_pfn);
49  }
50  
51  unsigned long cma_get_size(const struct cma *cma)
52  {
53  	return cma->count << PAGE_SHIFT;
54  }
55  
56  static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
57  					     int align_order)
58  {
59  	if (align_order <= cma->order_per_bit)
60  		return 0;
61  	return (1UL << (align_order - cma->order_per_bit)) - 1;
62  }
63  
64  /*
65   * Find a PFN aligned to the specified order and return an offset represented in
66   * order_per_bits.
67   */
68  static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
69  					       int align_order)
70  {
71  	if (align_order <= cma->order_per_bit)
72  		return 0;
73  
74  	return (ALIGN(cma->base_pfn, (1UL << align_order))
75  		- cma->base_pfn) >> cma->order_per_bit;
76  }
77  
78  static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
79  					      unsigned long pages)
80  {
81  	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
82  }
83  
84  static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
85  			     unsigned int count)
86  {
87  	unsigned long bitmap_no, bitmap_count;
88  
89  	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
90  	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
91  
92  	mutex_lock(&cma->lock);
93  	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
94  	mutex_unlock(&cma->lock);
95  }
96  
97  static int __init cma_activate_area(struct cma *cma)
98  {
99  	int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
100  	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
101  	unsigned i = cma->count >> pageblock_order;
102  	struct zone *zone;
103  
104  	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
105  
106  	if (!cma->bitmap)
107  		return -ENOMEM;
108  
109  	WARN_ON_ONCE(!pfn_valid(pfn));
110  	zone = page_zone(pfn_to_page(pfn));
111  
112  	do {
113  		unsigned j;
114  
115  		base_pfn = pfn;
116  		for (j = pageblock_nr_pages; j; --j, pfn++) {
117  			WARN_ON_ONCE(!pfn_valid(pfn));
118  			/*
119  			 * alloc_contig_range requires the pfn range
120  			 * specified to be in the same zone. Make this
121  			 * simple by forcing the entire CMA resv range
122  			 * to be in the same zone.
123  			 */
124  			if (page_zone(pfn_to_page(pfn)) != zone)
125  				goto err;
126  		}
127  		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
128  	} while (--i);
129  
130  	mutex_init(&cma->lock);
131  
132  #ifdef CONFIG_CMA_DEBUGFS
133  	INIT_HLIST_HEAD(&cma->mem_head);
134  	spin_lock_init(&cma->mem_head_lock);
135  #endif
136  
137  	return 0;
138  
139  err:
140  	kfree(cma->bitmap);
141  	cma->count = 0;
142  	return -EINVAL;
143  }
144  
145  static int __init cma_init_reserved_areas(void)
146  {
147  	int i;
148  
149  	for (i = 0; i < cma_area_count; i++) {
150  		int ret = cma_activate_area(&cma_areas[i]);
151  
152  		if (ret)
153  			return ret;
154  	}
155  
156  	return 0;
157  }
158  core_initcall(cma_init_reserved_areas);
159  
160  /**
161   * cma_init_reserved_mem() - create custom contiguous area from reserved memory
162   * @base: Base address of the reserved area
163   * @size: Size of the reserved area (in bytes),
164   * @order_per_bit: Order of pages represented by one bit on bitmap.
165   * @res_cma: Pointer to store the created cma region.
166   *
167   * This function creates custom contiguous area from already reserved memory.
168   */
169  int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
170  				 unsigned int order_per_bit,
171  				 struct cma **res_cma)
172  {
173  	struct cma *cma;
174  	phys_addr_t alignment;
175  
176  	/* Sanity checks */
177  	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
178  		pr_err("Not enough slots for CMA reserved regions!\n");
179  		return -ENOSPC;
180  	}
181  
182  	if (!size || !memblock_is_region_reserved(base, size))
183  		return -EINVAL;
184  
185  	/* ensure minimal alignment required by mm core */
186  	alignment = PAGE_SIZE <<
187  			max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
188  
189  	/* alignment should be aligned with order_per_bit */
190  	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
191  		return -EINVAL;
192  
193  	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
194  		return -EINVAL;
195  
196  	/*
197  	 * Each reserved area must be initialised later, when more kernel
198  	 * subsystems (like slab allocator) are available.
199  	 */
200  	cma = &cma_areas[cma_area_count];
201  	cma->base_pfn = PFN_DOWN(base);
202  	cma->count = size >> PAGE_SHIFT;
203  	cma->order_per_bit = order_per_bit;
204  	*res_cma = cma;
205  	cma_area_count++;
206  	totalcma_pages += (size / PAGE_SIZE);
207  
208  	return 0;
209  }
210  
211  /**
212   * cma_declare_contiguous() - reserve custom contiguous area
213   * @base: Base address of the reserved area optional, use 0 for any
214   * @size: Size of the reserved area (in bytes),
215   * @limit: End address of the reserved memory (optional, 0 for any).
216   * @alignment: Alignment for the CMA area, should be power of 2 or zero
217   * @order_per_bit: Order of pages represented by one bit on bitmap.
218   * @fixed: hint about where to place the reserved area
219   * @res_cma: Pointer to store the created cma region.
220   *
221   * This function reserves memory from early allocator. It should be
222   * called by arch specific code once the early allocator (memblock or bootmem)
223   * has been activated and all other subsystems have already allocated/reserved
224   * memory. This function allows to create custom reserved areas.
225   *
226   * If @fixed is true, reserve contiguous area at exactly @base.  If false,
227   * reserve in range from @base to @limit.
228   */
229  int __init cma_declare_contiguous(phys_addr_t base,
230  			phys_addr_t size, phys_addr_t limit,
231  			phys_addr_t alignment, unsigned int order_per_bit,
232  			bool fixed, struct cma **res_cma)
233  {
234  	phys_addr_t memblock_end = memblock_end_of_DRAM();
235  	phys_addr_t highmem_start;
236  	int ret = 0;
237  
238  #ifdef CONFIG_X86
239  	/*
240  	 * high_memory isn't direct mapped memory so retrieving its physical
241  	 * address isn't appropriate.  But it would be useful to check the
242  	 * physical address of the highmem boundary so it's justifiable to get
243  	 * the physical address from it.  On x86 there is a validation check for
244  	 * this case, so the following workaround is needed to avoid it.
245  	 */
246  	highmem_start = __pa_nodebug(high_memory);
247  #else
248  	highmem_start = __pa(high_memory);
249  #endif
250  	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
251  		__func__, &size, &base, &limit, &alignment);
252  
253  	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
254  		pr_err("Not enough slots for CMA reserved regions!\n");
255  		return -ENOSPC;
256  	}
257  
258  	if (!size)
259  		return -EINVAL;
260  
261  	if (alignment && !is_power_of_2(alignment))
262  		return -EINVAL;
263  
264  	/*
265  	 * Sanitise input arguments.
266  	 * Pages both ends in CMA area could be merged into adjacent unmovable
267  	 * migratetype page by page allocator's buddy algorithm. In the case,
268  	 * you couldn't get a contiguous memory, which is not what we want.
269  	 */
270  	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
271  			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
272  	base = ALIGN(base, alignment);
273  	size = ALIGN(size, alignment);
274  	limit &= ~(alignment - 1);
275  
276  	if (!base)
277  		fixed = false;
278  
279  	/* size should be aligned with order_per_bit */
280  	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
281  		return -EINVAL;
282  
283  	/*
284  	 * If allocating at a fixed base the request region must not cross the
285  	 * low/high memory boundary.
286  	 */
287  	if (fixed && base < highmem_start && base + size > highmem_start) {
288  		ret = -EINVAL;
289  		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
290  			&base, &highmem_start);
291  		goto err;
292  	}
293  
294  	/*
295  	 * If the limit is unspecified or above the memblock end, its effective
296  	 * value will be the memblock end. Set it explicitly to simplify further
297  	 * checks.
298  	 */
299  	if (limit == 0 || limit > memblock_end)
300  		limit = memblock_end;
301  
302  	/* Reserve memory */
303  	if (fixed) {
304  		if (memblock_is_region_reserved(base, size) ||
305  		    memblock_reserve(base, size) < 0) {
306  			ret = -EBUSY;
307  			goto err;
308  		}
309  	} else {
310  		phys_addr_t addr = 0;
311  
312  		/*
313  		 * All pages in the reserved area must come from the same zone.
314  		 * If the requested region crosses the low/high memory boundary,
315  		 * try allocating from high memory first and fall back to low
316  		 * memory in case of failure.
317  		 */
318  		if (base < highmem_start && limit > highmem_start) {
319  			addr = memblock_alloc_range(size, alignment,
320  						    highmem_start, limit,
321  						    MEMBLOCK_NONE);
322  			limit = highmem_start;
323  		}
324  
325  		if (!addr) {
326  			addr = memblock_alloc_range(size, alignment, base,
327  						    limit,
328  						    MEMBLOCK_NONE);
329  			if (!addr) {
330  				ret = -ENOMEM;
331  				goto err;
332  			}
333  		}
334  
335  		/*
336  		 * kmemleak scans/reads tracked objects for pointers to other
337  		 * objects but this address isn't mapped and accessible
338  		 */
339  		kmemleak_ignore(phys_to_virt(addr));
340  		base = addr;
341  	}
342  
343  	ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
344  	if (ret)
345  		goto err;
346  
347  	pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
348  		&base);
349  	return 0;
350  
351  err:
352  	pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
353  	return ret;
354  }
355  
356  /**
357   * cma_alloc() - allocate pages from contiguous area
358   * @cma:   Contiguous memory region for which the allocation is performed.
359   * @count: Requested number of pages.
360   * @align: Requested alignment of pages (in PAGE_SIZE order).
361   *
362   * This function allocates part of contiguous memory on specific
363   * contiguous memory area.
364   */
365  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
366  {
367  	unsigned long mask, offset;
368  	unsigned long pfn = -1;
369  	unsigned long start = 0;
370  	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
371  	struct page *page = NULL;
372  	int ret;
373  
374  	if (!cma || !cma->count)
375  		return NULL;
376  
377  	pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
378  		 count, align);
379  
380  	if (!count)
381  		return NULL;
382  
383  	mask = cma_bitmap_aligned_mask(cma, align);
384  	offset = cma_bitmap_aligned_offset(cma, align);
385  	bitmap_maxno = cma_bitmap_maxno(cma);
386  	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
387  
388  	for (;;) {
389  		mutex_lock(&cma->lock);
390  		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
391  				bitmap_maxno, start, bitmap_count, mask,
392  				offset);
393  		if (bitmap_no >= bitmap_maxno) {
394  			mutex_unlock(&cma->lock);
395  			break;
396  		}
397  		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
398  		/*
399  		 * It's safe to drop the lock here. We've marked this region for
400  		 * our exclusive use. If the migration fails we will take the
401  		 * lock again and unmark it.
402  		 */
403  		mutex_unlock(&cma->lock);
404  
405  		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
406  		mutex_lock(&cma_mutex);
407  		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
408  		mutex_unlock(&cma_mutex);
409  		if (ret == 0) {
410  			page = pfn_to_page(pfn);
411  			break;
412  		}
413  
414  		cma_clear_bitmap(cma, pfn, count);
415  		if (ret != -EBUSY)
416  			break;
417  
418  		pr_debug("%s(): memory range at %p is busy, retrying\n",
419  			 __func__, pfn_to_page(pfn));
420  		/* try again with a bit different memory target */
421  		start = bitmap_no + mask + 1;
422  	}
423  
424  	trace_cma_alloc(pfn, page, count, align);
425  
426  	pr_debug("%s(): returned %p\n", __func__, page);
427  	return page;
428  }
429  
430  /**
431   * cma_release() - release allocated pages
432   * @cma:   Contiguous memory region for which the allocation is performed.
433   * @pages: Allocated pages.
434   * @count: Number of allocated pages.
435   *
436   * This function releases memory allocated by alloc_cma().
437   * It returns false when provided pages do not belong to contiguous area and
438   * true otherwise.
439   */
440  bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
441  {
442  	unsigned long pfn;
443  
444  	if (!cma || !pages)
445  		return false;
446  
447  	pr_debug("%s(page %p)\n", __func__, (void *)pages);
448  
449  	pfn = page_to_pfn(pages);
450  
451  	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
452  		return false;
453  
454  	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
455  
456  	free_contig_range(pfn, count);
457  	cma_clear_bitmap(cma, pfn, count);
458  	trace_cma_release(pfn, pages, count);
459  
460  	return true;
461  }
462