xref: /linux/mm/cma.c (revision beace86e61e465dba204a268ab3f3377153a4973)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Contiguous Memory Allocator
4  *
5  * Copyright (c) 2010-2011 by Samsung Electronics.
6  * Copyright IBM Corporation, 2013
7  * Copyright LG Electronics Inc., 2014
8  * Written by:
9  *	Marek Szyprowski <m.szyprowski@samsung.com>
10  *	Michal Nazarewicz <mina86@mina86.com>
11  *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12  *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
13  */
14 
15 #define pr_fmt(fmt) "cma: " fmt
16 
17 #define CREATE_TRACE_POINTS
18 
19 #include <linux/memblock.h>
20 #include <linux/err.h>
21 #include <linux/list.h>
22 #include <linux/mm.h>
23 #include <linux/sizes.h>
24 #include <linux/slab.h>
25 #include <linux/string_choices.h>
26 #include <linux/log2.h>
27 #include <linux/cma.h>
28 #include <linux/highmem.h>
29 #include <linux/io.h>
30 #include <linux/kmemleak.h>
31 #include <trace/events/cma.h>
32 
33 #include "internal.h"
34 #include "cma.h"
35 
36 struct cma cma_areas[MAX_CMA_AREAS];
37 unsigned int cma_area_count;
38 
39 phys_addr_t cma_get_base(const struct cma *cma)
40 {
41 	WARN_ON_ONCE(cma->nranges != 1);
42 	return PFN_PHYS(cma->ranges[0].base_pfn);
43 }
44 
45 unsigned long cma_get_size(const struct cma *cma)
46 {
47 	return cma->count << PAGE_SHIFT;
48 }
49 
50 const char *cma_get_name(const struct cma *cma)
51 {
52 	return cma->name;
53 }
54 
55 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
56 					     unsigned int align_order)
57 {
58 	if (align_order <= cma->order_per_bit)
59 		return 0;
60 	return (1UL << (align_order - cma->order_per_bit)) - 1;
61 }
62 
63 /*
64  * Find the offset of the base PFN from the specified align_order.
65  * The value returned is represented in order_per_bits.
66  */
67 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
68 					       const struct cma_memrange *cmr,
69 					       unsigned int align_order)
70 {
71 	return (cmr->base_pfn & ((1UL << align_order) - 1))
72 		>> cma->order_per_bit;
73 }
74 
75 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
76 					      unsigned long pages)
77 {
78 	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
79 }
80 
81 static void cma_clear_bitmap(struct cma *cma, const struct cma_memrange *cmr,
82 			     unsigned long pfn, unsigned long count)
83 {
84 	unsigned long bitmap_no, bitmap_count;
85 	unsigned long flags;
86 
87 	bitmap_no = (pfn - cmr->base_pfn) >> cma->order_per_bit;
88 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
89 
90 	spin_lock_irqsave(&cma->lock, flags);
91 	bitmap_clear(cmr->bitmap, bitmap_no, bitmap_count);
92 	cma->available_count += count;
93 	spin_unlock_irqrestore(&cma->lock, flags);
94 }
95 
96 /*
97  * Check if a CMA area contains no ranges that intersect with
98  * multiple zones. Store the result in the flags in case
99  * this gets called more than once.
100  */
101 bool cma_validate_zones(struct cma *cma)
102 {
103 	int r;
104 	unsigned long base_pfn;
105 	struct cma_memrange *cmr;
106 	bool valid_bit_set;
107 
108 	/*
109 	 * If already validated, return result of previous check.
110 	 * Either the valid or invalid bit will be set if this
111 	 * check has already been done. If neither is set, the
112 	 * check has not been performed yet.
113 	 */
114 	valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags);
115 	if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags))
116 		return valid_bit_set;
117 
118 	for (r = 0; r < cma->nranges; r++) {
119 		cmr = &cma->ranges[r];
120 		base_pfn = cmr->base_pfn;
121 
122 		/*
123 		 * alloc_contig_range() requires the pfn range specified
124 		 * to be in the same zone. Simplify by forcing the entire
125 		 * CMA resv range to be in the same zone.
126 		 */
127 		WARN_ON_ONCE(!pfn_valid(base_pfn));
128 		if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) {
129 			set_bit(CMA_ZONES_INVALID, &cma->flags);
130 			return false;
131 		}
132 	}
133 
134 	set_bit(CMA_ZONES_VALID, &cma->flags);
135 
136 	return true;
137 }
138 
139 static void __init cma_activate_area(struct cma *cma)
140 {
141 	unsigned long pfn, end_pfn, early_pfn[CMA_MAX_RANGES];
142 	int allocrange, r;
143 	struct cma_memrange *cmr;
144 	unsigned long bitmap_count, count;
145 
146 	for (allocrange = 0; allocrange < cma->nranges; allocrange++) {
147 		cmr = &cma->ranges[allocrange];
148 		early_pfn[allocrange] = cmr->early_pfn;
149 		cmr->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma, cmr),
150 					    GFP_KERNEL);
151 		if (!cmr->bitmap)
152 			goto cleanup;
153 	}
154 
155 	if (!cma_validate_zones(cma))
156 		goto cleanup;
157 
158 	for (r = 0; r < cma->nranges; r++) {
159 		cmr = &cma->ranges[r];
160 		if (early_pfn[r] != cmr->base_pfn) {
161 			count = early_pfn[r] - cmr->base_pfn;
162 			bitmap_count = cma_bitmap_pages_to_bits(cma, count);
163 			bitmap_set(cmr->bitmap, 0, bitmap_count);
164 		}
165 
166 		for (pfn = early_pfn[r]; pfn < cmr->base_pfn + cmr->count;
167 		     pfn += pageblock_nr_pages)
168 			init_cma_reserved_pageblock(pfn_to_page(pfn));
169 	}
170 
171 	spin_lock_init(&cma->lock);
172 
173 	mutex_init(&cma->alloc_mutex);
174 
175 #ifdef CONFIG_CMA_DEBUGFS
176 	INIT_HLIST_HEAD(&cma->mem_head);
177 	spin_lock_init(&cma->mem_head_lock);
178 #endif
179 	set_bit(CMA_ACTIVATED, &cma->flags);
180 
181 	return;
182 
183 cleanup:
184 	for (r = 0; r < allocrange; r++)
185 		bitmap_free(cma->ranges[r].bitmap);
186 
187 	/* Expose all pages to the buddy, they are useless for CMA. */
188 	if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) {
189 		for (r = 0; r < allocrange; r++) {
190 			cmr = &cma->ranges[r];
191 			end_pfn = cmr->base_pfn + cmr->count;
192 			for (pfn = early_pfn[r]; pfn < end_pfn; pfn++)
193 				free_reserved_page(pfn_to_page(pfn));
194 		}
195 	}
196 	totalcma_pages -= cma->count;
197 	cma->available_count = cma->count = 0;
198 	pr_err("CMA area %s could not be activated\n", cma->name);
199 }
200 
201 static int __init cma_init_reserved_areas(void)
202 {
203 	int i;
204 
205 	for (i = 0; i < cma_area_count; i++)
206 		cma_activate_area(&cma_areas[i]);
207 
208 	return 0;
209 }
210 core_initcall(cma_init_reserved_areas);
211 
212 void __init cma_reserve_pages_on_error(struct cma *cma)
213 {
214 	set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags);
215 }
216 
217 static int __init cma_new_area(const char *name, phys_addr_t size,
218 			       unsigned int order_per_bit,
219 			       struct cma **res_cma)
220 {
221 	struct cma *cma;
222 
223 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
224 		pr_err("Not enough slots for CMA reserved regions!\n");
225 		return -ENOSPC;
226 	}
227 
228 	/*
229 	 * Each reserved area must be initialised later, when more kernel
230 	 * subsystems (like slab allocator) are available.
231 	 */
232 	cma = &cma_areas[cma_area_count];
233 	cma_area_count++;
234 
235 	if (name)
236 		snprintf(cma->name, CMA_MAX_NAME, "%s", name);
237 	else
238 		snprintf(cma->name, CMA_MAX_NAME,  "cma%d\n", cma_area_count);
239 
240 	cma->available_count = cma->count = size >> PAGE_SHIFT;
241 	cma->order_per_bit = order_per_bit;
242 	*res_cma = cma;
243 	totalcma_pages += cma->count;
244 
245 	return 0;
246 }
247 
248 static void __init cma_drop_area(struct cma *cma)
249 {
250 	totalcma_pages -= cma->count;
251 	cma_area_count--;
252 }
253 
254 /**
255  * cma_init_reserved_mem() - create custom contiguous area from reserved memory
256  * @base: Base address of the reserved area
257  * @size: Size of the reserved area (in bytes),
258  * @order_per_bit: Order of pages represented by one bit on bitmap.
259  * @name: The name of the area. If this parameter is NULL, the name of
260  *        the area will be set to "cmaN", where N is a running counter of
261  *        used areas.
262  * @res_cma: Pointer to store the created cma region.
263  *
264  * This function creates custom contiguous area from already reserved memory.
265  */
266 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
267 				 unsigned int order_per_bit,
268 				 const char *name,
269 				 struct cma **res_cma)
270 {
271 	struct cma *cma;
272 	int ret;
273 
274 	/* Sanity checks */
275 	if (!size || !memblock_is_region_reserved(base, size))
276 		return -EINVAL;
277 
278 	/*
279 	 * CMA uses CMA_MIN_ALIGNMENT_BYTES as alignment requirement which
280 	 * needs pageblock_order to be initialized. Let's enforce it.
281 	 */
282 	if (!pageblock_order) {
283 		pr_err("pageblock_order not yet initialized. Called during early boot?\n");
284 		return -EINVAL;
285 	}
286 
287 	/* ensure minimal alignment required by mm core */
288 	if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
289 		return -EINVAL;
290 
291 	ret = cma_new_area(name, size, order_per_bit, &cma);
292 	if (ret != 0)
293 		return ret;
294 
295 	cma->ranges[0].base_pfn = PFN_DOWN(base);
296 	cma->ranges[0].early_pfn = PFN_DOWN(base);
297 	cma->ranges[0].count = cma->count;
298 	cma->nranges = 1;
299 	cma->nid = NUMA_NO_NODE;
300 
301 	*res_cma = cma;
302 
303 	return 0;
304 }
305 
306 /*
307  * Structure used while walking physical memory ranges and finding out
308  * which one(s) to use for a CMA area.
309  */
310 struct cma_init_memrange {
311 	phys_addr_t base;
312 	phys_addr_t size;
313 	struct list_head list;
314 };
315 
316 /*
317  * Work array used during CMA initialization.
318  */
319 static struct cma_init_memrange memranges[CMA_MAX_RANGES] __initdata;
320 
321 static bool __init revsizecmp(struct cma_init_memrange *mlp,
322 			      struct cma_init_memrange *mrp)
323 {
324 	return mlp->size > mrp->size;
325 }
326 
327 static bool __init basecmp(struct cma_init_memrange *mlp,
328 			   struct cma_init_memrange *mrp)
329 {
330 	return mlp->base < mrp->base;
331 }
332 
333 /*
334  * Helper function to create sorted lists.
335  */
336 static void __init list_insert_sorted(
337 	struct list_head *ranges,
338 	struct cma_init_memrange *mrp,
339 	bool (*cmp)(struct cma_init_memrange *lh, struct cma_init_memrange *rh))
340 {
341 	struct list_head *mp;
342 	struct cma_init_memrange *mlp;
343 
344 	if (list_empty(ranges))
345 		list_add(&mrp->list, ranges);
346 	else {
347 		list_for_each(mp, ranges) {
348 			mlp = list_entry(mp, struct cma_init_memrange, list);
349 			if (cmp(mlp, mrp))
350 				break;
351 		}
352 		__list_add(&mrp->list, mlp->list.prev, &mlp->list);
353 	}
354 }
355 
356 static int __init cma_fixed_reserve(phys_addr_t base, phys_addr_t size)
357 {
358 	if (IS_ENABLED(CONFIG_HIGHMEM)) {
359 		phys_addr_t highmem_start = __pa(high_memory - 1) + 1;
360 
361 		/*
362 		 * If allocating at a fixed base the request region must not
363 		 * cross the low/high memory boundary.
364 		 */
365 		if (base < highmem_start && base + size > highmem_start) {
366 			pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
367 			       &base, &highmem_start);
368 			return -EINVAL;
369 		}
370 	}
371 
372 	if (memblock_is_region_reserved(base, size) ||
373 	    memblock_reserve(base, size) < 0) {
374 		return -EBUSY;
375 	}
376 
377 	return 0;
378 }
379 
380 static phys_addr_t __init cma_alloc_mem(phys_addr_t base, phys_addr_t size,
381 			phys_addr_t align, phys_addr_t limit, int nid)
382 {
383 	phys_addr_t addr = 0;
384 
385 	/*
386 	 * If there is enough memory, try a bottom-up allocation first.
387 	 * It will place the new cma area close to the start of the node
388 	 * and guarantee that the compaction is moving pages out of the
389 	 * cma area and not into it.
390 	 * Avoid using first 4GB to not interfere with constrained zones
391 	 * like DMA/DMA32.
392 	 */
393 #ifdef CONFIG_PHYS_ADDR_T_64BIT
394 	if (!memblock_bottom_up() && limit >= SZ_4G + size) {
395 		memblock_set_bottom_up(true);
396 		addr = memblock_alloc_range_nid(size, align, SZ_4G, limit,
397 						nid, true);
398 		memblock_set_bottom_up(false);
399 	}
400 #endif
401 
402 	/*
403 	 * On systems with HIGHMEM try allocating from there before consuming
404 	 * memory in lower zones.
405 	 */
406 	if (!addr && IS_ENABLED(CONFIG_HIGHMEM)) {
407 		phys_addr_t highmem = __pa(high_memory - 1) + 1;
408 
409 		/*
410 		 * All pages in the reserved area must come from the same zone.
411 		 * If the requested region crosses the low/high memory boundary,
412 		 * try allocating from high memory first and fall back to low
413 		 * memory in case of failure.
414 		 */
415 		if (base < highmem && limit > highmem) {
416 			addr = memblock_alloc_range_nid(size, align, highmem,
417 							limit, nid, true);
418 			limit = highmem;
419 		}
420 	}
421 
422 	if (!addr)
423 		addr = memblock_alloc_range_nid(size, align, base, limit, nid,
424 						true);
425 
426 	return addr;
427 }
428 
429 static int __init __cma_declare_contiguous_nid(phys_addr_t *basep,
430 			phys_addr_t size, phys_addr_t limit,
431 			phys_addr_t alignment, unsigned int order_per_bit,
432 			bool fixed, const char *name, struct cma **res_cma,
433 			int nid)
434 {
435 	phys_addr_t memblock_end = memblock_end_of_DRAM();
436 	phys_addr_t base = *basep;
437 	int ret;
438 
439 	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
440 		__func__, &size, &base, &limit, &alignment);
441 
442 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
443 		pr_err("Not enough slots for CMA reserved regions!\n");
444 		return -ENOSPC;
445 	}
446 
447 	if (!size)
448 		return -EINVAL;
449 
450 	if (alignment && !is_power_of_2(alignment))
451 		return -EINVAL;
452 
453 	if (!IS_ENABLED(CONFIG_NUMA))
454 		nid = NUMA_NO_NODE;
455 
456 	/* Sanitise input arguments. */
457 	alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
458 	if (fixed && base & (alignment - 1)) {
459 		pr_err("Region at %pa must be aligned to %pa bytes\n",
460 			&base, &alignment);
461 		return -EINVAL;
462 	}
463 	base = ALIGN(base, alignment);
464 	size = ALIGN(size, alignment);
465 	limit &= ~(alignment - 1);
466 
467 	if (!base)
468 		fixed = false;
469 
470 	/* size should be aligned with order_per_bit */
471 	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
472 		return -EINVAL;
473 
474 
475 	/*
476 	 * If the limit is unspecified or above the memblock end, its effective
477 	 * value will be the memblock end. Set it explicitly to simplify further
478 	 * checks.
479 	 */
480 	if (limit == 0 || limit > memblock_end)
481 		limit = memblock_end;
482 
483 	if (base + size > limit) {
484 		pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
485 			&size, &base, &limit);
486 		return -EINVAL;
487 	}
488 
489 	/* Reserve memory */
490 	if (fixed) {
491 		ret = cma_fixed_reserve(base, size);
492 		if (ret)
493 			return ret;
494 	} else {
495 		base = cma_alloc_mem(base, size, alignment, limit, nid);
496 		if (!base)
497 			return -ENOMEM;
498 
499 		/*
500 		 * kmemleak scans/reads tracked objects for pointers to other
501 		 * objects but this address isn't mapped and accessible
502 		 */
503 		kmemleak_ignore_phys(base);
504 	}
505 
506 	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
507 	if (ret) {
508 		memblock_phys_free(base, size);
509 		return ret;
510 	}
511 
512 	(*res_cma)->nid = nid;
513 	*basep = base;
514 
515 	return 0;
516 }
517 
518 /*
519  * Create CMA areas with a total size of @total_size. A normal allocation
520  * for one area is tried first. If that fails, the biggest memblock
521  * ranges above 4G are selected, and allocated bottom up.
522  *
523  * The complexity here is not great, but this function will only be
524  * called during boot, and the lists operated on have fewer than
525  * CMA_MAX_RANGES elements (default value: 8).
526  */
527 int __init cma_declare_contiguous_multi(phys_addr_t total_size,
528 			phys_addr_t align, unsigned int order_per_bit,
529 			const char *name, struct cma **res_cma, int nid)
530 {
531 	phys_addr_t start = 0, end;
532 	phys_addr_t size, sizesum, sizeleft;
533 	struct cma_init_memrange *mrp, *mlp, *failed;
534 	struct cma_memrange *cmrp;
535 	LIST_HEAD(ranges);
536 	LIST_HEAD(final_ranges);
537 	struct list_head *mp, *next;
538 	int ret, nr = 1;
539 	u64 i;
540 	struct cma *cma;
541 
542 	/*
543 	 * First, try it the normal way, producing just one range.
544 	 */
545 	ret = __cma_declare_contiguous_nid(&start, total_size, 0, align,
546 			order_per_bit, false, name, res_cma, nid);
547 	if (ret != -ENOMEM)
548 		goto out;
549 
550 	/*
551 	 * Couldn't find one range that fits our needs, so try multiple
552 	 * ranges.
553 	 *
554 	 * No need to do the alignment checks here, the call to
555 	 * cma_declare_contiguous_nid above would have caught
556 	 * any issues. With the checks, we know that:
557 	 *
558 	 * - @align is a power of 2
559 	 * - @align is >= pageblock alignment
560 	 * - @size is aligned to @align and to @order_per_bit
561 	 *
562 	 * So, as long as we create ranges that have a base
563 	 * aligned to @align, and a size that is aligned to
564 	 * both @align and @order_to_bit, things will work out.
565 	 */
566 	nr = 0;
567 	sizesum = 0;
568 	failed = NULL;
569 
570 	ret = cma_new_area(name, total_size, order_per_bit, &cma);
571 	if (ret != 0)
572 		goto out;
573 
574 	align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
575 	/*
576 	 * Create a list of ranges above 4G, largest range first.
577 	 */
578 	for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
579 		if (upper_32_bits(start) == 0)
580 			continue;
581 
582 		start = ALIGN(start, align);
583 		if (start >= end)
584 			continue;
585 
586 		end = ALIGN_DOWN(end, align);
587 		if (end <= start)
588 			continue;
589 
590 		size = end - start;
591 		size = ALIGN_DOWN(size, (PAGE_SIZE << order_per_bit));
592 		if (!size)
593 			continue;
594 		sizesum += size;
595 
596 		pr_debug("consider %016llx - %016llx\n", (u64)start, (u64)end);
597 
598 		/*
599 		 * If we don't yet have used the maximum number of
600 		 * areas, grab a new one.
601 		 *
602 		 * If we can't use anymore, see if this range is not
603 		 * smaller than the smallest one already recorded. If
604 		 * not, re-use the smallest element.
605 		 */
606 		if (nr < CMA_MAX_RANGES)
607 			mrp = &memranges[nr++];
608 		else {
609 			mrp = list_last_entry(&ranges,
610 					      struct cma_init_memrange, list);
611 			if (size < mrp->size)
612 				continue;
613 			list_del(&mrp->list);
614 			sizesum -= mrp->size;
615 			pr_debug("deleted %016llx - %016llx from the list\n",
616 				(u64)mrp->base, (u64)mrp->base + size);
617 		}
618 		mrp->base = start;
619 		mrp->size = size;
620 
621 		/*
622 		 * Now do a sorted insert.
623 		 */
624 		list_insert_sorted(&ranges, mrp, revsizecmp);
625 		pr_debug("added %016llx - %016llx to the list\n",
626 		    (u64)mrp->base, (u64)mrp->base + size);
627 		pr_debug("total size now %llu\n", (u64)sizesum);
628 	}
629 
630 	/*
631 	 * There is not enough room in the CMA_MAX_RANGES largest
632 	 * ranges, so bail out.
633 	 */
634 	if (sizesum < total_size) {
635 		cma_drop_area(cma);
636 		ret = -ENOMEM;
637 		goto out;
638 	}
639 
640 	/*
641 	 * Found ranges that provide enough combined space.
642 	 * Now, sorted them by address, smallest first, because we
643 	 * want to mimic a bottom-up memblock allocation.
644 	 */
645 	sizesum = 0;
646 	list_for_each_safe(mp, next, &ranges) {
647 		mlp = list_entry(mp, struct cma_init_memrange, list);
648 		list_del(mp);
649 		list_insert_sorted(&final_ranges, mlp, basecmp);
650 		sizesum += mlp->size;
651 		if (sizesum >= total_size)
652 			break;
653 	}
654 
655 	/*
656 	 * Walk the final list, and add a CMA range for
657 	 * each range, possibly not using the last one fully.
658 	 */
659 	nr = 0;
660 	sizeleft = total_size;
661 	list_for_each(mp, &final_ranges) {
662 		mlp = list_entry(mp, struct cma_init_memrange, list);
663 		size = min(sizeleft, mlp->size);
664 		if (memblock_reserve(mlp->base, size)) {
665 			/*
666 			 * Unexpected error. Could go on to
667 			 * the next one, but just abort to
668 			 * be safe.
669 			 */
670 			failed = mlp;
671 			break;
672 		}
673 
674 		pr_debug("created region %d: %016llx - %016llx\n",
675 		    nr, (u64)mlp->base, (u64)mlp->base + size);
676 		cmrp = &cma->ranges[nr++];
677 		cmrp->base_pfn = PHYS_PFN(mlp->base);
678 		cmrp->early_pfn = cmrp->base_pfn;
679 		cmrp->count = size >> PAGE_SHIFT;
680 
681 		sizeleft -= size;
682 		if (sizeleft == 0)
683 			break;
684 	}
685 
686 	if (failed) {
687 		list_for_each(mp, &final_ranges) {
688 			mlp = list_entry(mp, struct cma_init_memrange, list);
689 			if (mlp == failed)
690 				break;
691 			memblock_phys_free(mlp->base, mlp->size);
692 		}
693 		cma_drop_area(cma);
694 		ret = -ENOMEM;
695 		goto out;
696 	}
697 
698 	cma->nranges = nr;
699 	cma->nid = nid;
700 	*res_cma = cma;
701 
702 out:
703 	if (ret != 0)
704 		pr_err("Failed to reserve %lu MiB\n",
705 			(unsigned long)total_size / SZ_1M);
706 	else
707 		pr_info("Reserved %lu MiB in %d range%s\n",
708 			(unsigned long)total_size / SZ_1M, nr, str_plural(nr));
709 	return ret;
710 }
711 
712 /**
713  * cma_declare_contiguous_nid() - reserve custom contiguous area
714  * @base: Base address of the reserved area optional, use 0 for any
715  * @size: Size of the reserved area (in bytes),
716  * @limit: End address of the reserved memory (optional, 0 for any).
717  * @alignment: Alignment for the CMA area, should be power of 2 or zero
718  * @order_per_bit: Order of pages represented by one bit on bitmap.
719  * @fixed: hint about where to place the reserved area
720  * @name: The name of the area. See function cma_init_reserved_mem()
721  * @res_cma: Pointer to store the created cma region.
722  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
723  *
724  * This function reserves memory from early allocator. It should be
725  * called by arch specific code once the early allocator (memblock or bootmem)
726  * has been activated and all other subsystems have already allocated/reserved
727  * memory. This function allows to create custom reserved areas.
728  *
729  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
730  * reserve in range from @base to @limit.
731  */
732 int __init cma_declare_contiguous_nid(phys_addr_t base,
733 			phys_addr_t size, phys_addr_t limit,
734 			phys_addr_t alignment, unsigned int order_per_bit,
735 			bool fixed, const char *name, struct cma **res_cma,
736 			int nid)
737 {
738 	int ret;
739 
740 	ret = __cma_declare_contiguous_nid(&base, size, limit, alignment,
741 			order_per_bit, fixed, name, res_cma, nid);
742 	if (ret != 0)
743 		pr_err("Failed to reserve %ld MiB\n",
744 				(unsigned long)size / SZ_1M);
745 	else
746 		pr_info("Reserved %ld MiB at %pa\n",
747 				(unsigned long)size / SZ_1M, &base);
748 
749 	return ret;
750 }
751 
752 static void cma_debug_show_areas(struct cma *cma)
753 {
754 	unsigned long start, end;
755 	unsigned long nr_part;
756 	unsigned long nbits;
757 	int r;
758 	struct cma_memrange *cmr;
759 
760 	spin_lock_irq(&cma->lock);
761 	pr_info("number of available pages: ");
762 	for (r = 0; r < cma->nranges; r++) {
763 		cmr = &cma->ranges[r];
764 
765 		nbits = cma_bitmap_maxno(cma, cmr);
766 
767 		pr_info("range %d: ", r);
768 		for_each_clear_bitrange(start, end, cmr->bitmap, nbits) {
769 			nr_part = (end - start) << cma->order_per_bit;
770 			pr_cont("%s%lu@%lu", start ? "+" : "", nr_part, start);
771 		}
772 		pr_info("\n");
773 	}
774 	pr_cont("=> %lu free of %lu total pages\n", cma->available_count,
775 			cma->count);
776 	spin_unlock_irq(&cma->lock);
777 }
778 
779 static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
780 				unsigned long count, unsigned int align,
781 				struct page **pagep, gfp_t gfp)
782 {
783 	unsigned long mask, offset;
784 	unsigned long pfn = -1;
785 	unsigned long start = 0;
786 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
787 	int ret = -EBUSY;
788 	struct page *page = NULL;
789 
790 	mask = cma_bitmap_aligned_mask(cma, align);
791 	offset = cma_bitmap_aligned_offset(cma, cmr, align);
792 	bitmap_maxno = cma_bitmap_maxno(cma, cmr);
793 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
794 
795 	if (bitmap_count > bitmap_maxno)
796 		goto out;
797 
798 	for (;;) {
799 		spin_lock_irq(&cma->lock);
800 		/*
801 		 * If the request is larger than the available number
802 		 * of pages, stop right away.
803 		 */
804 		if (count > cma->available_count) {
805 			spin_unlock_irq(&cma->lock);
806 			break;
807 		}
808 		bitmap_no = bitmap_find_next_zero_area_off(cmr->bitmap,
809 				bitmap_maxno, start, bitmap_count, mask,
810 				offset);
811 		if (bitmap_no >= bitmap_maxno) {
812 			spin_unlock_irq(&cma->lock);
813 			break;
814 		}
815 		bitmap_set(cmr->bitmap, bitmap_no, bitmap_count);
816 		cma->available_count -= count;
817 		/*
818 		 * It's safe to drop the lock here. We've marked this region for
819 		 * our exclusive use. If the migration fails we will take the
820 		 * lock again and unmark it.
821 		 */
822 		spin_unlock_irq(&cma->lock);
823 
824 		pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit);
825 		mutex_lock(&cma->alloc_mutex);
826 		ret = alloc_contig_range(pfn, pfn + count, ACR_FLAGS_CMA, gfp);
827 		mutex_unlock(&cma->alloc_mutex);
828 		if (ret == 0) {
829 			page = pfn_to_page(pfn);
830 			break;
831 		}
832 
833 		cma_clear_bitmap(cma, cmr, pfn, count);
834 		if (ret != -EBUSY)
835 			break;
836 
837 		pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n",
838 			 __func__, pfn, pfn_to_page(pfn));
839 
840 		trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
841 					   count, align);
842 		/* try again with a bit different memory target */
843 		start = bitmap_no + mask + 1;
844 	}
845 out:
846 	*pagep = page;
847 	return ret;
848 }
849 
850 static struct page *__cma_alloc(struct cma *cma, unsigned long count,
851 		       unsigned int align, gfp_t gfp)
852 {
853 	struct page *page = NULL;
854 	int ret = -ENOMEM, r;
855 	unsigned long i;
856 	const char *name = cma ? cma->name : NULL;
857 
858 	if (!cma || !cma->count)
859 		return page;
860 
861 	pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__,
862 		(void *)cma, cma->name, count, align);
863 
864 	if (!count)
865 		return page;
866 
867 	trace_cma_alloc_start(name, count, align);
868 
869 	for (r = 0; r < cma->nranges; r++) {
870 		page = NULL;
871 
872 		ret = cma_range_alloc(cma, &cma->ranges[r], count, align,
873 				       &page, gfp);
874 		if (ret != -EBUSY || page)
875 			break;
876 	}
877 
878 	/*
879 	 * CMA can allocate multiple page blocks, which results in different
880 	 * blocks being marked with different tags. Reset the tags to ignore
881 	 * those page blocks.
882 	 */
883 	if (page) {
884 		for (i = 0; i < count; i++)
885 			page_kasan_tag_reset(nth_page(page, i));
886 	}
887 
888 	if (ret && !(gfp & __GFP_NOWARN)) {
889 		pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
890 				   __func__, cma->name, count, ret);
891 		cma_debug_show_areas(cma);
892 	}
893 
894 	pr_debug("%s(): returned %p\n", __func__, page);
895 	trace_cma_alloc_finish(name, page ? page_to_pfn(page) : 0,
896 			       page, count, align, ret);
897 	if (page) {
898 		count_vm_event(CMA_ALLOC_SUCCESS);
899 		cma_sysfs_account_success_pages(cma, count);
900 	} else {
901 		count_vm_event(CMA_ALLOC_FAIL);
902 		cma_sysfs_account_fail_pages(cma, count);
903 	}
904 
905 	return page;
906 }
907 
908 /**
909  * cma_alloc() - allocate pages from contiguous area
910  * @cma:   Contiguous memory region for which the allocation is performed.
911  * @count: Requested number of pages.
912  * @align: Requested alignment of pages (in PAGE_SIZE order).
913  * @no_warn: Avoid printing message about failed allocation
914  *
915  * This function allocates part of contiguous memory on specific
916  * contiguous memory area.
917  */
918 struct page *cma_alloc(struct cma *cma, unsigned long count,
919 		       unsigned int align, bool no_warn)
920 {
921 	return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
922 }
923 
924 struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
925 {
926 	struct page *page;
927 
928 	if (WARN_ON(!order || !(gfp & __GFP_COMP)))
929 		return NULL;
930 
931 	page = __cma_alloc(cma, 1 << order, order, gfp);
932 
933 	return page ? page_folio(page) : NULL;
934 }
935 
936 bool cma_pages_valid(struct cma *cma, const struct page *pages,
937 		     unsigned long count)
938 {
939 	unsigned long pfn, end;
940 	int r;
941 	struct cma_memrange *cmr;
942 	bool ret;
943 
944 	if (!cma || !pages || count > cma->count)
945 		return false;
946 
947 	pfn = page_to_pfn(pages);
948 	ret = false;
949 
950 	for (r = 0; r < cma->nranges; r++) {
951 		cmr = &cma->ranges[r];
952 		end = cmr->base_pfn + cmr->count;
953 		if (pfn >= cmr->base_pfn && pfn < end) {
954 			ret = pfn + count <= end;
955 			break;
956 		}
957 	}
958 
959 	if (!ret)
960 		pr_debug("%s(page %p, count %lu)\n",
961 				__func__, (void *)pages, count);
962 
963 	return ret;
964 }
965 
966 /**
967  * cma_release() - release allocated pages
968  * @cma:   Contiguous memory region for which the allocation is performed.
969  * @pages: Allocated pages.
970  * @count: Number of allocated pages.
971  *
972  * This function releases memory allocated by cma_alloc().
973  * It returns false when provided pages do not belong to contiguous area and
974  * true otherwise.
975  */
976 bool cma_release(struct cma *cma, const struct page *pages,
977 		 unsigned long count)
978 {
979 	struct cma_memrange *cmr;
980 	unsigned long pfn, end_pfn;
981 	int r;
982 
983 	pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
984 
985 	if (!cma_pages_valid(cma, pages, count))
986 		return false;
987 
988 	pfn = page_to_pfn(pages);
989 	end_pfn = pfn + count;
990 
991 	for (r = 0; r < cma->nranges; r++) {
992 		cmr = &cma->ranges[r];
993 		if (pfn >= cmr->base_pfn &&
994 		    pfn < (cmr->base_pfn + cmr->count)) {
995 			VM_BUG_ON(end_pfn > cmr->base_pfn + cmr->count);
996 			break;
997 		}
998 	}
999 
1000 	if (r == cma->nranges)
1001 		return false;
1002 
1003 	free_contig_range(pfn, count);
1004 	cma_clear_bitmap(cma, cmr, pfn, count);
1005 	cma_sysfs_account_release_pages(cma, count);
1006 	trace_cma_release(cma->name, pfn, pages, count);
1007 
1008 	return true;
1009 }
1010 
1011 bool cma_free_folio(struct cma *cma, const struct folio *folio)
1012 {
1013 	if (WARN_ON(!folio_test_large(folio)))
1014 		return false;
1015 
1016 	return cma_release(cma, &folio->page, folio_nr_pages(folio));
1017 }
1018 
1019 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
1020 {
1021 	int i;
1022 
1023 	for (i = 0; i < cma_area_count; i++) {
1024 		int ret = it(&cma_areas[i], data);
1025 
1026 		if (ret)
1027 			return ret;
1028 	}
1029 
1030 	return 0;
1031 }
1032 
1033 bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end)
1034 {
1035 	int r;
1036 	struct cma_memrange *cmr;
1037 	unsigned long rstart, rend;
1038 
1039 	for (r = 0; r < cma->nranges; r++) {
1040 		cmr = &cma->ranges[r];
1041 
1042 		rstart = PFN_PHYS(cmr->base_pfn);
1043 		rend = PFN_PHYS(cmr->base_pfn + cmr->count);
1044 		if (end < rstart)
1045 			continue;
1046 		if (start >= rend)
1047 			continue;
1048 		return true;
1049 	}
1050 
1051 	return false;
1052 }
1053 
1054 /*
1055  * Very basic function to reserve memory from a CMA area that has not
1056  * yet been activated. This is expected to be called early, when the
1057  * system is single-threaded, so there is no locking. The alignment
1058  * checking is restrictive - only pageblock-aligned areas
1059  * (CMA_MIN_ALIGNMENT_BYTES) may be reserved through this function.
1060  * This keeps things simple, and is enough for the current use case.
1061  *
1062  * The CMA bitmaps have not yet been allocated, so just start
1063  * reserving from the bottom up, using a PFN to keep track
1064  * of what has been reserved. Unreserving is not possible.
1065  *
1066  * The caller is responsible for initializing the page structures
1067  * in the area properly, since this just points to memblock-allocated
1068  * memory. The caller should subsequently use init_cma_pageblock to
1069  * set the migrate type and CMA stats  the pageblocks that were reserved.
1070  *
1071  * If the CMA area fails to activate later, memory obtained through
1072  * this interface is not handed to the page allocator, this is
1073  * the responsibility of the caller (e.g. like normal memblock-allocated
1074  * memory).
1075  */
1076 void __init *cma_reserve_early(struct cma *cma, unsigned long size)
1077 {
1078 	int r;
1079 	struct cma_memrange *cmr;
1080 	unsigned long available;
1081 	void *ret = NULL;
1082 
1083 	if (!cma || !cma->count)
1084 		return NULL;
1085 	/*
1086 	 * Can only be called early in init.
1087 	 */
1088 	if (test_bit(CMA_ACTIVATED, &cma->flags))
1089 		return NULL;
1090 
1091 	if (!IS_ALIGNED(size, CMA_MIN_ALIGNMENT_BYTES))
1092 		return NULL;
1093 
1094 	if (!IS_ALIGNED(size, (PAGE_SIZE << cma->order_per_bit)))
1095 		return NULL;
1096 
1097 	size >>= PAGE_SHIFT;
1098 
1099 	if (size > cma->available_count)
1100 		return NULL;
1101 
1102 	for (r = 0; r < cma->nranges; r++) {
1103 		cmr = &cma->ranges[r];
1104 		available = cmr->count - (cmr->early_pfn - cmr->base_pfn);
1105 		if (size <= available) {
1106 			ret = phys_to_virt(PFN_PHYS(cmr->early_pfn));
1107 			cmr->early_pfn += size;
1108 			cma->available_count -= size;
1109 			return ret;
1110 		}
1111 	}
1112 
1113 	return ret;
1114 }
1115