xref: /linux/mm/cma.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Contiguous Memory Allocator
4  *
5  * Copyright (c) 2010-2011 by Samsung Electronics.
6  * Copyright IBM Corporation, 2013
7  * Copyright LG Electronics Inc., 2014
8  * Written by:
9  *	Marek Szyprowski <m.szyprowski@samsung.com>
10  *	Michal Nazarewicz <mina86@mina86.com>
11  *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12  *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
13  */
14 
15 #define pr_fmt(fmt) "cma: " fmt
16 
17 #define CREATE_TRACE_POINTS
18 
19 #include <linux/memblock.h>
20 #include <linux/err.h>
21 #include <linux/mm.h>
22 #include <linux/sizes.h>
23 #include <linux/slab.h>
24 #include <linux/log2.h>
25 #include <linux/cma.h>
26 #include <linux/highmem.h>
27 #include <linux/io.h>
28 #include <linux/kmemleak.h>
29 #include <trace/events/cma.h>
30 
31 #include "internal.h"
32 #include "cma.h"
33 
34 struct cma cma_areas[MAX_CMA_AREAS];
35 unsigned int cma_area_count;
36 static DEFINE_MUTEX(cma_mutex);
37 
38 phys_addr_t cma_get_base(const struct cma *cma)
39 {
40 	return PFN_PHYS(cma->base_pfn);
41 }
42 
43 unsigned long cma_get_size(const struct cma *cma)
44 {
45 	return cma->count << PAGE_SHIFT;
46 }
47 
48 const char *cma_get_name(const struct cma *cma)
49 {
50 	return cma->name;
51 }
52 
53 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
54 					     unsigned int align_order)
55 {
56 	if (align_order <= cma->order_per_bit)
57 		return 0;
58 	return (1UL << (align_order - cma->order_per_bit)) - 1;
59 }
60 
61 /*
62  * Find the offset of the base PFN from the specified align_order.
63  * The value returned is represented in order_per_bits.
64  */
65 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
66 					       unsigned int align_order)
67 {
68 	return (cma->base_pfn & ((1UL << align_order) - 1))
69 		>> cma->order_per_bit;
70 }
71 
72 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
73 					      unsigned long pages)
74 {
75 	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
76 }
77 
78 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
79 			     unsigned long count)
80 {
81 	unsigned long bitmap_no, bitmap_count;
82 	unsigned long flags;
83 
84 	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
85 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
86 
87 	spin_lock_irqsave(&cma->lock, flags);
88 	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
89 	spin_unlock_irqrestore(&cma->lock, flags);
90 }
91 
92 static void __init cma_activate_area(struct cma *cma)
93 {
94 	unsigned long base_pfn = cma->base_pfn, pfn;
95 	struct zone *zone;
96 
97 	cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
98 	if (!cma->bitmap)
99 		goto out_error;
100 
101 	/*
102 	 * alloc_contig_range() requires the pfn range specified to be in the
103 	 * same zone. Simplify by forcing the entire CMA resv range to be in the
104 	 * same zone.
105 	 */
106 	WARN_ON_ONCE(!pfn_valid(base_pfn));
107 	zone = page_zone(pfn_to_page(base_pfn));
108 	for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
109 		WARN_ON_ONCE(!pfn_valid(pfn));
110 		if (page_zone(pfn_to_page(pfn)) != zone)
111 			goto not_in_zone;
112 	}
113 
114 	for (pfn = base_pfn; pfn < base_pfn + cma->count;
115 	     pfn += pageblock_nr_pages)
116 		init_cma_reserved_pageblock(pfn_to_page(pfn));
117 
118 	spin_lock_init(&cma->lock);
119 
120 #ifdef CONFIG_CMA_DEBUGFS
121 	INIT_HLIST_HEAD(&cma->mem_head);
122 	spin_lock_init(&cma->mem_head_lock);
123 #endif
124 
125 	return;
126 
127 not_in_zone:
128 	bitmap_free(cma->bitmap);
129 out_error:
130 	/* Expose all pages to the buddy, they are useless for CMA. */
131 	if (!cma->reserve_pages_on_error) {
132 		for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
133 			free_reserved_page(pfn_to_page(pfn));
134 	}
135 	totalcma_pages -= cma->count;
136 	cma->count = 0;
137 	pr_err("CMA area %s could not be activated\n", cma->name);
138 }
139 
140 static int __init cma_init_reserved_areas(void)
141 {
142 	int i;
143 
144 	for (i = 0; i < cma_area_count; i++)
145 		cma_activate_area(&cma_areas[i]);
146 
147 	return 0;
148 }
149 core_initcall(cma_init_reserved_areas);
150 
151 void __init cma_reserve_pages_on_error(struct cma *cma)
152 {
153 	cma->reserve_pages_on_error = true;
154 }
155 
156 /**
157  * cma_init_reserved_mem() - create custom contiguous area from reserved memory
158  * @base: Base address of the reserved area
159  * @size: Size of the reserved area (in bytes),
160  * @order_per_bit: Order of pages represented by one bit on bitmap.
161  * @name: The name of the area. If this parameter is NULL, the name of
162  *        the area will be set to "cmaN", where N is a running counter of
163  *        used areas.
164  * @res_cma: Pointer to store the created cma region.
165  *
166  * This function creates custom contiguous area from already reserved memory.
167  */
168 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
169 				 unsigned int order_per_bit,
170 				 const char *name,
171 				 struct cma **res_cma)
172 {
173 	struct cma *cma;
174 
175 	/* Sanity checks */
176 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
177 		pr_err("Not enough slots for CMA reserved regions!\n");
178 		return -ENOSPC;
179 	}
180 
181 	if (!size || !memblock_is_region_reserved(base, size))
182 		return -EINVAL;
183 
184 	/*
185 	 * CMA uses CMA_MIN_ALIGNMENT_BYTES as alignment requirement which
186 	 * needs pageblock_order to be initialized. Let's enforce it.
187 	 */
188 	if (!pageblock_order) {
189 		pr_err("pageblock_order not yet initialized. Called during early boot?\n");
190 		return -EINVAL;
191 	}
192 
193 	/* ensure minimal alignment required by mm core */
194 	if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
195 		return -EINVAL;
196 
197 	/*
198 	 * Each reserved area must be initialised later, when more kernel
199 	 * subsystems (like slab allocator) are available.
200 	 */
201 	cma = &cma_areas[cma_area_count];
202 
203 	if (name)
204 		snprintf(cma->name, CMA_MAX_NAME, name);
205 	else
206 		snprintf(cma->name, CMA_MAX_NAME,  "cma%d\n", cma_area_count);
207 
208 	cma->base_pfn = PFN_DOWN(base);
209 	cma->count = size >> PAGE_SHIFT;
210 	cma->order_per_bit = order_per_bit;
211 	*res_cma = cma;
212 	cma_area_count++;
213 	totalcma_pages += cma->count;
214 
215 	return 0;
216 }
217 
218 /**
219  * cma_declare_contiguous_nid() - reserve custom contiguous area
220  * @base: Base address of the reserved area optional, use 0 for any
221  * @size: Size of the reserved area (in bytes),
222  * @limit: End address of the reserved memory (optional, 0 for any).
223  * @alignment: Alignment for the CMA area, should be power of 2 or zero
224  * @order_per_bit: Order of pages represented by one bit on bitmap.
225  * @fixed: hint about where to place the reserved area
226  * @name: The name of the area. See function cma_init_reserved_mem()
227  * @res_cma: Pointer to store the created cma region.
228  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
229  *
230  * This function reserves memory from early allocator. It should be
231  * called by arch specific code once the early allocator (memblock or bootmem)
232  * has been activated and all other subsystems have already allocated/reserved
233  * memory. This function allows to create custom reserved areas.
234  *
235  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
236  * reserve in range from @base to @limit.
237  */
238 int __init cma_declare_contiguous_nid(phys_addr_t base,
239 			phys_addr_t size, phys_addr_t limit,
240 			phys_addr_t alignment, unsigned int order_per_bit,
241 			bool fixed, const char *name, struct cma **res_cma,
242 			int nid)
243 {
244 	phys_addr_t memblock_end = memblock_end_of_DRAM();
245 	phys_addr_t highmem_start;
246 	int ret;
247 
248 	/*
249 	 * We can't use __pa(high_memory) directly, since high_memory
250 	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
251 	 * complain. Find the boundary by adding one to the last valid
252 	 * address.
253 	 */
254 	highmem_start = __pa(high_memory - 1) + 1;
255 	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
256 		__func__, &size, &base, &limit, &alignment);
257 
258 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
259 		pr_err("Not enough slots for CMA reserved regions!\n");
260 		return -ENOSPC;
261 	}
262 
263 	if (!size)
264 		return -EINVAL;
265 
266 	if (alignment && !is_power_of_2(alignment))
267 		return -EINVAL;
268 
269 	if (!IS_ENABLED(CONFIG_NUMA))
270 		nid = NUMA_NO_NODE;
271 
272 	/* Sanitise input arguments. */
273 	alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
274 	if (fixed && base & (alignment - 1)) {
275 		ret = -EINVAL;
276 		pr_err("Region at %pa must be aligned to %pa bytes\n",
277 			&base, &alignment);
278 		goto err;
279 	}
280 	base = ALIGN(base, alignment);
281 	size = ALIGN(size, alignment);
282 	limit &= ~(alignment - 1);
283 
284 	if (!base)
285 		fixed = false;
286 
287 	/* size should be aligned with order_per_bit */
288 	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
289 		return -EINVAL;
290 
291 	/*
292 	 * If allocating at a fixed base the request region must not cross the
293 	 * low/high memory boundary.
294 	 */
295 	if (fixed && base < highmem_start && base + size > highmem_start) {
296 		ret = -EINVAL;
297 		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
298 			&base, &highmem_start);
299 		goto err;
300 	}
301 
302 	/*
303 	 * If the limit is unspecified or above the memblock end, its effective
304 	 * value will be the memblock end. Set it explicitly to simplify further
305 	 * checks.
306 	 */
307 	if (limit == 0 || limit > memblock_end)
308 		limit = memblock_end;
309 
310 	if (base + size > limit) {
311 		ret = -EINVAL;
312 		pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
313 			&size, &base, &limit);
314 		goto err;
315 	}
316 
317 	/* Reserve memory */
318 	if (fixed) {
319 		if (memblock_is_region_reserved(base, size) ||
320 		    memblock_reserve(base, size) < 0) {
321 			ret = -EBUSY;
322 			goto err;
323 		}
324 	} else {
325 		phys_addr_t addr = 0;
326 
327 		/*
328 		 * If there is enough memory, try a bottom-up allocation first.
329 		 * It will place the new cma area close to the start of the node
330 		 * and guarantee that the compaction is moving pages out of the
331 		 * cma area and not into it.
332 		 * Avoid using first 4GB to not interfere with constrained zones
333 		 * like DMA/DMA32.
334 		 */
335 #ifdef CONFIG_PHYS_ADDR_T_64BIT
336 		if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
337 			memblock_set_bottom_up(true);
338 			addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
339 							limit, nid, true);
340 			memblock_set_bottom_up(false);
341 		}
342 #endif
343 
344 		/*
345 		 * All pages in the reserved area must come from the same zone.
346 		 * If the requested region crosses the low/high memory boundary,
347 		 * try allocating from high memory first and fall back to low
348 		 * memory in case of failure.
349 		 */
350 		if (!addr && base < highmem_start && limit > highmem_start) {
351 			addr = memblock_alloc_range_nid(size, alignment,
352 					highmem_start, limit, nid, true);
353 			limit = highmem_start;
354 		}
355 
356 		if (!addr) {
357 			addr = memblock_alloc_range_nid(size, alignment, base,
358 					limit, nid, true);
359 			if (!addr) {
360 				ret = -ENOMEM;
361 				goto err;
362 			}
363 		}
364 
365 		/*
366 		 * kmemleak scans/reads tracked objects for pointers to other
367 		 * objects but this address isn't mapped and accessible
368 		 */
369 		kmemleak_ignore_phys(addr);
370 		base = addr;
371 	}
372 
373 	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
374 	if (ret)
375 		goto free_mem;
376 
377 	pr_info("Reserved %ld MiB at %pa on node %d\n", (unsigned long)size / SZ_1M,
378 		&base, nid);
379 	return 0;
380 
381 free_mem:
382 	memblock_phys_free(base, size);
383 err:
384 	pr_err("Failed to reserve %ld MiB on node %d\n", (unsigned long)size / SZ_1M,
385 	       nid);
386 	return ret;
387 }
388 
389 static void cma_debug_show_areas(struct cma *cma)
390 {
391 	unsigned long next_zero_bit, next_set_bit, nr_zero;
392 	unsigned long start = 0;
393 	unsigned long nr_part, nr_total = 0;
394 	unsigned long nbits = cma_bitmap_maxno(cma);
395 
396 	spin_lock_irq(&cma->lock);
397 	pr_info("number of available pages: ");
398 	for (;;) {
399 		next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
400 		if (next_zero_bit >= nbits)
401 			break;
402 		next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
403 		nr_zero = next_set_bit - next_zero_bit;
404 		nr_part = nr_zero << cma->order_per_bit;
405 		pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
406 			next_zero_bit);
407 		nr_total += nr_part;
408 		start = next_zero_bit + nr_zero;
409 	}
410 	pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
411 	spin_unlock_irq(&cma->lock);
412 }
413 
414 static struct page *__cma_alloc(struct cma *cma, unsigned long count,
415 				unsigned int align, gfp_t gfp)
416 {
417 	unsigned long mask, offset;
418 	unsigned long pfn = -1;
419 	unsigned long start = 0;
420 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
421 	unsigned long i;
422 	struct page *page = NULL;
423 	int ret = -ENOMEM;
424 	const char *name = cma ? cma->name : NULL;
425 
426 	trace_cma_alloc_start(name, count, align);
427 
428 	if (!cma || !cma->count || !cma->bitmap)
429 		return page;
430 
431 	pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__,
432 		(void *)cma, cma->name, count, align);
433 
434 	if (!count)
435 		return page;
436 
437 	mask = cma_bitmap_aligned_mask(cma, align);
438 	offset = cma_bitmap_aligned_offset(cma, align);
439 	bitmap_maxno = cma_bitmap_maxno(cma);
440 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
441 
442 	if (bitmap_count > bitmap_maxno)
443 		return page;
444 
445 	for (;;) {
446 		spin_lock_irq(&cma->lock);
447 		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
448 				bitmap_maxno, start, bitmap_count, mask,
449 				offset);
450 		if (bitmap_no >= bitmap_maxno) {
451 			spin_unlock_irq(&cma->lock);
452 			break;
453 		}
454 		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
455 		/*
456 		 * It's safe to drop the lock here. We've marked this region for
457 		 * our exclusive use. If the migration fails we will take the
458 		 * lock again and unmark it.
459 		 */
460 		spin_unlock_irq(&cma->lock);
461 
462 		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
463 		mutex_lock(&cma_mutex);
464 		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp);
465 		mutex_unlock(&cma_mutex);
466 		if (ret == 0) {
467 			page = pfn_to_page(pfn);
468 			break;
469 		}
470 
471 		cma_clear_bitmap(cma, pfn, count);
472 		if (ret != -EBUSY)
473 			break;
474 
475 		pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n",
476 			 __func__, pfn, pfn_to_page(pfn));
477 
478 		trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
479 					   count, align);
480 		/* try again with a bit different memory target */
481 		start = bitmap_no + mask + 1;
482 	}
483 
484 	/*
485 	 * CMA can allocate multiple page blocks, which results in different
486 	 * blocks being marked with different tags. Reset the tags to ignore
487 	 * those page blocks.
488 	 */
489 	if (page) {
490 		for (i = 0; i < count; i++)
491 			page_kasan_tag_reset(nth_page(page, i));
492 	}
493 
494 	if (ret && !(gfp & __GFP_NOWARN)) {
495 		pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
496 				   __func__, cma->name, count, ret);
497 		cma_debug_show_areas(cma);
498 	}
499 
500 	pr_debug("%s(): returned %p\n", __func__, page);
501 	trace_cma_alloc_finish(name, pfn, page, count, align, ret);
502 	if (page) {
503 		count_vm_event(CMA_ALLOC_SUCCESS);
504 		cma_sysfs_account_success_pages(cma, count);
505 	} else {
506 		count_vm_event(CMA_ALLOC_FAIL);
507 		cma_sysfs_account_fail_pages(cma, count);
508 	}
509 
510 	return page;
511 }
512 
513 /**
514  * cma_alloc() - allocate pages from contiguous area
515  * @cma:   Contiguous memory region for which the allocation is performed.
516  * @count: Requested number of pages.
517  * @align: Requested alignment of pages (in PAGE_SIZE order).
518  * @no_warn: Avoid printing message about failed allocation
519  *
520  * This function allocates part of contiguous memory on specific
521  * contiguous memory area.
522  */
523 struct page *cma_alloc(struct cma *cma, unsigned long count,
524 		       unsigned int align, bool no_warn)
525 {
526 	return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
527 }
528 
529 struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
530 {
531 	struct page *page;
532 
533 	if (WARN_ON(!order || !(gfp & __GFP_COMP)))
534 		return NULL;
535 
536 	page = __cma_alloc(cma, 1 << order, order, gfp);
537 
538 	return page ? page_folio(page) : NULL;
539 }
540 
541 bool cma_pages_valid(struct cma *cma, const struct page *pages,
542 		     unsigned long count)
543 {
544 	unsigned long pfn;
545 
546 	if (!cma || !pages)
547 		return false;
548 
549 	pfn = page_to_pfn(pages);
550 
551 	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) {
552 		pr_debug("%s(page %p, count %lu)\n", __func__,
553 						(void *)pages, count);
554 		return false;
555 	}
556 
557 	return true;
558 }
559 
560 /**
561  * cma_release() - release allocated pages
562  * @cma:   Contiguous memory region for which the allocation is performed.
563  * @pages: Allocated pages.
564  * @count: Number of allocated pages.
565  *
566  * This function releases memory allocated by cma_alloc().
567  * It returns false when provided pages do not belong to contiguous area and
568  * true otherwise.
569  */
570 bool cma_release(struct cma *cma, const struct page *pages,
571 		 unsigned long count)
572 {
573 	unsigned long pfn;
574 
575 	if (!cma_pages_valid(cma, pages, count))
576 		return false;
577 
578 	pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
579 
580 	pfn = page_to_pfn(pages);
581 
582 	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
583 
584 	free_contig_range(pfn, count);
585 	cma_clear_bitmap(cma, pfn, count);
586 	cma_sysfs_account_release_pages(cma, count);
587 	trace_cma_release(cma->name, pfn, pages, count);
588 
589 	return true;
590 }
591 
592 bool cma_free_folio(struct cma *cma, const struct folio *folio)
593 {
594 	if (WARN_ON(!folio_test_large(folio)))
595 		return false;
596 
597 	return cma_release(cma, &folio->page, folio_nr_pages(folio));
598 }
599 
600 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
601 {
602 	int i;
603 
604 	for (i = 0; i < cma_area_count; i++) {
605 		int ret = it(&cma_areas[i], data);
606 
607 		if (ret)
608 			return ret;
609 	}
610 
611 	return 0;
612 }
613