xref: /linux/mm/cma.c (revision a58f3dcf20ea9e7e968ee8369fd782bbb53dff73)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Contiguous Memory Allocator
4  *
5  * Copyright (c) 2010-2011 by Samsung Electronics.
6  * Copyright IBM Corporation, 2013
7  * Copyright LG Electronics Inc., 2014
8  * Written by:
9  *	Marek Szyprowski <m.szyprowski@samsung.com>
10  *	Michal Nazarewicz <mina86@mina86.com>
11  *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12  *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
13  */
14 
15 #define pr_fmt(fmt) "cma: " fmt
16 
17 #define CREATE_TRACE_POINTS
18 
19 #include <linux/memblock.h>
20 #include <linux/err.h>
21 #include <linux/list.h>
22 #include <linux/mm.h>
23 #include <linux/sizes.h>
24 #include <linux/slab.h>
25 #include <linux/log2.h>
26 #include <linux/cma.h>
27 #include <linux/highmem.h>
28 #include <linux/io.h>
29 #include <linux/kmemleak.h>
30 #include <trace/events/cma.h>
31 
32 #include "internal.h"
33 #include "cma.h"
34 
35 struct cma cma_areas[MAX_CMA_AREAS];
36 unsigned int cma_area_count;
37 static DEFINE_MUTEX(cma_mutex);
38 
39 static int __init __cma_declare_contiguous_nid(phys_addr_t base,
40 			phys_addr_t size, phys_addr_t limit,
41 			phys_addr_t alignment, unsigned int order_per_bit,
42 			bool fixed, const char *name, struct cma **res_cma,
43 			int nid);
44 
45 phys_addr_t cma_get_base(const struct cma *cma)
46 {
47 	WARN_ON_ONCE(cma->nranges != 1);
48 	return PFN_PHYS(cma->ranges[0].base_pfn);
49 }
50 
51 unsigned long cma_get_size(const struct cma *cma)
52 {
53 	return cma->count << PAGE_SHIFT;
54 }
55 
56 const char *cma_get_name(const struct cma *cma)
57 {
58 	return cma->name;
59 }
60 
61 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
62 					     unsigned int align_order)
63 {
64 	if (align_order <= cma->order_per_bit)
65 		return 0;
66 	return (1UL << (align_order - cma->order_per_bit)) - 1;
67 }
68 
69 /*
70  * Find the offset of the base PFN from the specified align_order.
71  * The value returned is represented in order_per_bits.
72  */
73 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
74 					       const struct cma_memrange *cmr,
75 					       unsigned int align_order)
76 {
77 	return (cmr->base_pfn & ((1UL << align_order) - 1))
78 		>> cma->order_per_bit;
79 }
80 
81 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
82 					      unsigned long pages)
83 {
84 	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
85 }
86 
87 static void cma_clear_bitmap(struct cma *cma, const struct cma_memrange *cmr,
88 			     unsigned long pfn, unsigned long count)
89 {
90 	unsigned long bitmap_no, bitmap_count;
91 	unsigned long flags;
92 
93 	bitmap_no = (pfn - cmr->base_pfn) >> cma->order_per_bit;
94 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
95 
96 	spin_lock_irqsave(&cma->lock, flags);
97 	bitmap_clear(cmr->bitmap, bitmap_no, bitmap_count);
98 	cma->available_count += count;
99 	spin_unlock_irqrestore(&cma->lock, flags);
100 }
101 
102 /*
103  * Check if a CMA area contains no ranges that intersect with
104  * multiple zones. Store the result in the flags in case
105  * this gets called more than once.
106  */
107 bool cma_validate_zones(struct cma *cma)
108 {
109 	int r;
110 	unsigned long base_pfn;
111 	struct cma_memrange *cmr;
112 	bool valid_bit_set;
113 
114 	/*
115 	 * If already validated, return result of previous check.
116 	 * Either the valid or invalid bit will be set if this
117 	 * check has already been done. If neither is set, the
118 	 * check has not been performed yet.
119 	 */
120 	valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags);
121 	if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags))
122 		return valid_bit_set;
123 
124 	for (r = 0; r < cma->nranges; r++) {
125 		cmr = &cma->ranges[r];
126 		base_pfn = cmr->base_pfn;
127 
128 		/*
129 		 * alloc_contig_range() requires the pfn range specified
130 		 * to be in the same zone. Simplify by forcing the entire
131 		 * CMA resv range to be in the same zone.
132 		 */
133 		WARN_ON_ONCE(!pfn_valid(base_pfn));
134 		if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) {
135 			set_bit(CMA_ZONES_INVALID, &cma->flags);
136 			return false;
137 		}
138 	}
139 
140 	set_bit(CMA_ZONES_VALID, &cma->flags);
141 
142 	return true;
143 }
144 
145 static void __init cma_activate_area(struct cma *cma)
146 {
147 	unsigned long pfn, end_pfn;
148 	int allocrange, r;
149 	struct cma_memrange *cmr;
150 	unsigned long bitmap_count, count;
151 
152 	for (allocrange = 0; allocrange < cma->nranges; allocrange++) {
153 		cmr = &cma->ranges[allocrange];
154 		cmr->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma, cmr),
155 					    GFP_KERNEL);
156 		if (!cmr->bitmap)
157 			goto cleanup;
158 	}
159 
160 	if (!cma_validate_zones(cma))
161 		goto cleanup;
162 
163 	for (r = 0; r < cma->nranges; r++) {
164 		cmr = &cma->ranges[r];
165 		if (cmr->early_pfn != cmr->base_pfn) {
166 			count = cmr->early_pfn - cmr->base_pfn;
167 			bitmap_count = cma_bitmap_pages_to_bits(cma, count);
168 			bitmap_set(cmr->bitmap, 0, bitmap_count);
169 		}
170 
171 		for (pfn = cmr->early_pfn; pfn < cmr->base_pfn + cmr->count;
172 		     pfn += pageblock_nr_pages)
173 			init_cma_reserved_pageblock(pfn_to_page(pfn));
174 	}
175 
176 	spin_lock_init(&cma->lock);
177 
178 #ifdef CONFIG_CMA_DEBUGFS
179 	INIT_HLIST_HEAD(&cma->mem_head);
180 	spin_lock_init(&cma->mem_head_lock);
181 #endif
182 	set_bit(CMA_ACTIVATED, &cma->flags);
183 
184 	return;
185 
186 cleanup:
187 	for (r = 0; r < allocrange; r++)
188 		bitmap_free(cma->ranges[r].bitmap);
189 
190 	/* Expose all pages to the buddy, they are useless for CMA. */
191 	if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) {
192 		for (r = 0; r < allocrange; r++) {
193 			cmr = &cma->ranges[r];
194 			end_pfn = cmr->base_pfn + cmr->count;
195 			for (pfn = cmr->early_pfn; pfn < end_pfn; pfn++)
196 				free_reserved_page(pfn_to_page(pfn));
197 		}
198 	}
199 	totalcma_pages -= cma->count;
200 	cma->available_count = cma->count = 0;
201 	pr_err("CMA area %s could not be activated\n", cma->name);
202 }
203 
204 static int __init cma_init_reserved_areas(void)
205 {
206 	int i;
207 
208 	for (i = 0; i < cma_area_count; i++)
209 		cma_activate_area(&cma_areas[i]);
210 
211 	return 0;
212 }
213 core_initcall(cma_init_reserved_areas);
214 
215 void __init cma_reserve_pages_on_error(struct cma *cma)
216 {
217 	set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags);
218 }
219 
220 static int __init cma_new_area(const char *name, phys_addr_t size,
221 			       unsigned int order_per_bit,
222 			       struct cma **res_cma)
223 {
224 	struct cma *cma;
225 
226 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
227 		pr_err("Not enough slots for CMA reserved regions!\n");
228 		return -ENOSPC;
229 	}
230 
231 	/*
232 	 * Each reserved area must be initialised later, when more kernel
233 	 * subsystems (like slab allocator) are available.
234 	 */
235 	cma = &cma_areas[cma_area_count];
236 	cma_area_count++;
237 
238 	if (name)
239 		snprintf(cma->name, CMA_MAX_NAME, "%s", name);
240 	else
241 		snprintf(cma->name, CMA_MAX_NAME,  "cma%d\n", cma_area_count);
242 
243 	cma->available_count = cma->count = size >> PAGE_SHIFT;
244 	cma->order_per_bit = order_per_bit;
245 	*res_cma = cma;
246 	totalcma_pages += cma->count;
247 
248 	return 0;
249 }
250 
251 static void __init cma_drop_area(struct cma *cma)
252 {
253 	totalcma_pages -= cma->count;
254 	cma_area_count--;
255 }
256 
257 /**
258  * cma_init_reserved_mem() - create custom contiguous area from reserved memory
259  * @base: Base address of the reserved area
260  * @size: Size of the reserved area (in bytes),
261  * @order_per_bit: Order of pages represented by one bit on bitmap.
262  * @name: The name of the area. If this parameter is NULL, the name of
263  *        the area will be set to "cmaN", where N is a running counter of
264  *        used areas.
265  * @res_cma: Pointer to store the created cma region.
266  *
267  * This function creates custom contiguous area from already reserved memory.
268  */
269 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
270 				 unsigned int order_per_bit,
271 				 const char *name,
272 				 struct cma **res_cma)
273 {
274 	struct cma *cma;
275 	int ret;
276 
277 	/* Sanity checks */
278 	if (!size || !memblock_is_region_reserved(base, size))
279 		return -EINVAL;
280 
281 	/*
282 	 * CMA uses CMA_MIN_ALIGNMENT_BYTES as alignment requirement which
283 	 * needs pageblock_order to be initialized. Let's enforce it.
284 	 */
285 	if (!pageblock_order) {
286 		pr_err("pageblock_order not yet initialized. Called during early boot?\n");
287 		return -EINVAL;
288 	}
289 
290 	/* ensure minimal alignment required by mm core */
291 	if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
292 		return -EINVAL;
293 
294 	ret = cma_new_area(name, size, order_per_bit, &cma);
295 	if (ret != 0)
296 		return ret;
297 
298 	cma->ranges[0].base_pfn = PFN_DOWN(base);
299 	cma->ranges[0].early_pfn = PFN_DOWN(base);
300 	cma->ranges[0].count = cma->count;
301 	cma->nranges = 1;
302 	cma->nid = NUMA_NO_NODE;
303 
304 	*res_cma = cma;
305 
306 	return 0;
307 }
308 
309 /*
310  * Structure used while walking physical memory ranges and finding out
311  * which one(s) to use for a CMA area.
312  */
313 struct cma_init_memrange {
314 	phys_addr_t base;
315 	phys_addr_t size;
316 	struct list_head list;
317 };
318 
319 /*
320  * Work array used during CMA initialization.
321  */
322 static struct cma_init_memrange memranges[CMA_MAX_RANGES] __initdata;
323 
324 static bool __init revsizecmp(struct cma_init_memrange *mlp,
325 			      struct cma_init_memrange *mrp)
326 {
327 	return mlp->size > mrp->size;
328 }
329 
330 static bool __init basecmp(struct cma_init_memrange *mlp,
331 			   struct cma_init_memrange *mrp)
332 {
333 	return mlp->base < mrp->base;
334 }
335 
336 /*
337  * Helper function to create sorted lists.
338  */
339 static void __init list_insert_sorted(
340 	struct list_head *ranges,
341 	struct cma_init_memrange *mrp,
342 	bool (*cmp)(struct cma_init_memrange *lh, struct cma_init_memrange *rh))
343 {
344 	struct list_head *mp;
345 	struct cma_init_memrange *mlp;
346 
347 	if (list_empty(ranges))
348 		list_add(&mrp->list, ranges);
349 	else {
350 		list_for_each(mp, ranges) {
351 			mlp = list_entry(mp, struct cma_init_memrange, list);
352 			if (cmp(mlp, mrp))
353 				break;
354 		}
355 		__list_add(&mrp->list, mlp->list.prev, &mlp->list);
356 	}
357 }
358 
359 /*
360  * Create CMA areas with a total size of @total_size. A normal allocation
361  * for one area is tried first. If that fails, the biggest memblock
362  * ranges above 4G are selected, and allocated bottom up.
363  *
364  * The complexity here is not great, but this function will only be
365  * called during boot, and the lists operated on have fewer than
366  * CMA_MAX_RANGES elements (default value: 8).
367  */
368 int __init cma_declare_contiguous_multi(phys_addr_t total_size,
369 			phys_addr_t align, unsigned int order_per_bit,
370 			const char *name, struct cma **res_cma, int nid)
371 {
372 	phys_addr_t start, end;
373 	phys_addr_t size, sizesum, sizeleft;
374 	struct cma_init_memrange *mrp, *mlp, *failed;
375 	struct cma_memrange *cmrp;
376 	LIST_HEAD(ranges);
377 	LIST_HEAD(final_ranges);
378 	struct list_head *mp, *next;
379 	int ret, nr = 1;
380 	u64 i;
381 	struct cma *cma;
382 
383 	/*
384 	 * First, try it the normal way, producing just one range.
385 	 */
386 	ret = __cma_declare_contiguous_nid(0, total_size, 0, align,
387 			order_per_bit, false, name, res_cma, nid);
388 	if (ret != -ENOMEM)
389 		goto out;
390 
391 	/*
392 	 * Couldn't find one range that fits our needs, so try multiple
393 	 * ranges.
394 	 *
395 	 * No need to do the alignment checks here, the call to
396 	 * cma_declare_contiguous_nid above would have caught
397 	 * any issues. With the checks, we know that:
398 	 *
399 	 * - @align is a power of 2
400 	 * - @align is >= pageblock alignment
401 	 * - @size is aligned to @align and to @order_per_bit
402 	 *
403 	 * So, as long as we create ranges that have a base
404 	 * aligned to @align, and a size that is aligned to
405 	 * both @align and @order_to_bit, things will work out.
406 	 */
407 	nr = 0;
408 	sizesum = 0;
409 	failed = NULL;
410 
411 	ret = cma_new_area(name, total_size, order_per_bit, &cma);
412 	if (ret != 0)
413 		goto out;
414 
415 	align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
416 	/*
417 	 * Create a list of ranges above 4G, largest range first.
418 	 */
419 	for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
420 		if (upper_32_bits(start) == 0)
421 			continue;
422 
423 		start = ALIGN(start, align);
424 		if (start >= end)
425 			continue;
426 
427 		end = ALIGN_DOWN(end, align);
428 		if (end <= start)
429 			continue;
430 
431 		size = end - start;
432 		size = ALIGN_DOWN(size, (PAGE_SIZE << order_per_bit));
433 		if (!size)
434 			continue;
435 		sizesum += size;
436 
437 		pr_debug("consider %016llx - %016llx\n", (u64)start, (u64)end);
438 
439 		/*
440 		 * If we don't yet have used the maximum number of
441 		 * areas, grab a new one.
442 		 *
443 		 * If we can't use anymore, see if this range is not
444 		 * smaller than the smallest one already recorded. If
445 		 * not, re-use the smallest element.
446 		 */
447 		if (nr < CMA_MAX_RANGES)
448 			mrp = &memranges[nr++];
449 		else {
450 			mrp = list_last_entry(&ranges,
451 					      struct cma_init_memrange, list);
452 			if (size < mrp->size)
453 				continue;
454 			list_del(&mrp->list);
455 			sizesum -= mrp->size;
456 			pr_debug("deleted %016llx - %016llx from the list\n",
457 				(u64)mrp->base, (u64)mrp->base + size);
458 		}
459 		mrp->base = start;
460 		mrp->size = size;
461 
462 		/*
463 		 * Now do a sorted insert.
464 		 */
465 		list_insert_sorted(&ranges, mrp, revsizecmp);
466 		pr_debug("added %016llx - %016llx to the list\n",
467 		    (u64)mrp->base, (u64)mrp->base + size);
468 		pr_debug("total size now %llu\n", (u64)sizesum);
469 	}
470 
471 	/*
472 	 * There is not enough room in the CMA_MAX_RANGES largest
473 	 * ranges, so bail out.
474 	 */
475 	if (sizesum < total_size) {
476 		cma_drop_area(cma);
477 		ret = -ENOMEM;
478 		goto out;
479 	}
480 
481 	/*
482 	 * Found ranges that provide enough combined space.
483 	 * Now, sorted them by address, smallest first, because we
484 	 * want to mimic a bottom-up memblock allocation.
485 	 */
486 	sizesum = 0;
487 	list_for_each_safe(mp, next, &ranges) {
488 		mlp = list_entry(mp, struct cma_init_memrange, list);
489 		list_del(mp);
490 		list_insert_sorted(&final_ranges, mlp, basecmp);
491 		sizesum += mlp->size;
492 		if (sizesum >= total_size)
493 			break;
494 	}
495 
496 	/*
497 	 * Walk the final list, and add a CMA range for
498 	 * each range, possibly not using the last one fully.
499 	 */
500 	nr = 0;
501 	sizeleft = total_size;
502 	list_for_each(mp, &final_ranges) {
503 		mlp = list_entry(mp, struct cma_init_memrange, list);
504 		size = min(sizeleft, mlp->size);
505 		if (memblock_reserve(mlp->base, size)) {
506 			/*
507 			 * Unexpected error. Could go on to
508 			 * the next one, but just abort to
509 			 * be safe.
510 			 */
511 			failed = mlp;
512 			break;
513 		}
514 
515 		pr_debug("created region %d: %016llx - %016llx\n",
516 		    nr, (u64)mlp->base, (u64)mlp->base + size);
517 		cmrp = &cma->ranges[nr++];
518 		cmrp->base_pfn = PHYS_PFN(mlp->base);
519 		cmrp->early_pfn = cmrp->base_pfn;
520 		cmrp->count = size >> PAGE_SHIFT;
521 
522 		sizeleft -= size;
523 		if (sizeleft == 0)
524 			break;
525 	}
526 
527 	if (failed) {
528 		list_for_each(mp, &final_ranges) {
529 			mlp = list_entry(mp, struct cma_init_memrange, list);
530 			if (mlp == failed)
531 				break;
532 			memblock_phys_free(mlp->base, mlp->size);
533 		}
534 		cma_drop_area(cma);
535 		ret = -ENOMEM;
536 		goto out;
537 	}
538 
539 	cma->nranges = nr;
540 	cma->nid = nid;
541 	*res_cma = cma;
542 
543 out:
544 	if (ret != 0)
545 		pr_err("Failed to reserve %lu MiB\n",
546 			(unsigned long)total_size / SZ_1M);
547 	else
548 		pr_info("Reserved %lu MiB in %d range%s\n",
549 			(unsigned long)total_size / SZ_1M, nr,
550 			nr > 1 ? "s" : "");
551 	return ret;
552 }
553 
554 /**
555  * cma_declare_contiguous_nid() - reserve custom contiguous area
556  * @base: Base address of the reserved area optional, use 0 for any
557  * @size: Size of the reserved area (in bytes),
558  * @limit: End address of the reserved memory (optional, 0 for any).
559  * @alignment: Alignment for the CMA area, should be power of 2 or zero
560  * @order_per_bit: Order of pages represented by one bit on bitmap.
561  * @fixed: hint about where to place the reserved area
562  * @name: The name of the area. See function cma_init_reserved_mem()
563  * @res_cma: Pointer to store the created cma region.
564  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
565  *
566  * This function reserves memory from early allocator. It should be
567  * called by arch specific code once the early allocator (memblock or bootmem)
568  * has been activated and all other subsystems have already allocated/reserved
569  * memory. This function allows to create custom reserved areas.
570  *
571  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
572  * reserve in range from @base to @limit.
573  */
574 int __init cma_declare_contiguous_nid(phys_addr_t base,
575 			phys_addr_t size, phys_addr_t limit,
576 			phys_addr_t alignment, unsigned int order_per_bit,
577 			bool fixed, const char *name, struct cma **res_cma,
578 			int nid)
579 {
580 	int ret;
581 
582 	ret = __cma_declare_contiguous_nid(base, size, limit, alignment,
583 			order_per_bit, fixed, name, res_cma, nid);
584 	if (ret != 0)
585 		pr_err("Failed to reserve %ld MiB\n",
586 				(unsigned long)size / SZ_1M);
587 	else
588 		pr_info("Reserved %ld MiB at %pa\n",
589 				(unsigned long)size / SZ_1M, &base);
590 
591 	return ret;
592 }
593 
594 static int __init __cma_declare_contiguous_nid(phys_addr_t base,
595 			phys_addr_t size, phys_addr_t limit,
596 			phys_addr_t alignment, unsigned int order_per_bit,
597 			bool fixed, const char *name, struct cma **res_cma,
598 			int nid)
599 {
600 	phys_addr_t memblock_end = memblock_end_of_DRAM();
601 	phys_addr_t highmem_start;
602 	int ret;
603 
604 	/*
605 	 * We can't use __pa(high_memory) directly, since high_memory
606 	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
607 	 * complain. Find the boundary by adding one to the last valid
608 	 * address.
609 	 */
610 	highmem_start = __pa(high_memory - 1) + 1;
611 	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
612 		__func__, &size, &base, &limit, &alignment);
613 
614 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
615 		pr_err("Not enough slots for CMA reserved regions!\n");
616 		return -ENOSPC;
617 	}
618 
619 	if (!size)
620 		return -EINVAL;
621 
622 	if (alignment && !is_power_of_2(alignment))
623 		return -EINVAL;
624 
625 	if (!IS_ENABLED(CONFIG_NUMA))
626 		nid = NUMA_NO_NODE;
627 
628 	/* Sanitise input arguments. */
629 	alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
630 	if (fixed && base & (alignment - 1)) {
631 		pr_err("Region at %pa must be aligned to %pa bytes\n",
632 			&base, &alignment);
633 		return -EINVAL;
634 	}
635 	base = ALIGN(base, alignment);
636 	size = ALIGN(size, alignment);
637 	limit &= ~(alignment - 1);
638 
639 	if (!base)
640 		fixed = false;
641 
642 	/* size should be aligned with order_per_bit */
643 	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
644 		return -EINVAL;
645 
646 	/*
647 	 * If allocating at a fixed base the request region must not cross the
648 	 * low/high memory boundary.
649 	 */
650 	if (fixed && base < highmem_start && base + size > highmem_start) {
651 		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
652 			&base, &highmem_start);
653 		return -EINVAL;
654 	}
655 
656 	/*
657 	 * If the limit is unspecified or above the memblock end, its effective
658 	 * value will be the memblock end. Set it explicitly to simplify further
659 	 * checks.
660 	 */
661 	if (limit == 0 || limit > memblock_end)
662 		limit = memblock_end;
663 
664 	if (base + size > limit) {
665 		pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
666 			&size, &base, &limit);
667 		return -EINVAL;
668 	}
669 
670 	/* Reserve memory */
671 	if (fixed) {
672 		if (memblock_is_region_reserved(base, size) ||
673 		    memblock_reserve(base, size) < 0) {
674 			return -EBUSY;
675 		}
676 	} else {
677 		phys_addr_t addr = 0;
678 
679 		/*
680 		 * If there is enough memory, try a bottom-up allocation first.
681 		 * It will place the new cma area close to the start of the node
682 		 * and guarantee that the compaction is moving pages out of the
683 		 * cma area and not into it.
684 		 * Avoid using first 4GB to not interfere with constrained zones
685 		 * like DMA/DMA32.
686 		 */
687 #ifdef CONFIG_PHYS_ADDR_T_64BIT
688 		if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
689 			memblock_set_bottom_up(true);
690 			addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
691 							limit, nid, true);
692 			memblock_set_bottom_up(false);
693 		}
694 #endif
695 
696 		/*
697 		 * All pages in the reserved area must come from the same zone.
698 		 * If the requested region crosses the low/high memory boundary,
699 		 * try allocating from high memory first and fall back to low
700 		 * memory in case of failure.
701 		 */
702 		if (!addr && base < highmem_start && limit > highmem_start) {
703 			addr = memblock_alloc_range_nid(size, alignment,
704 					highmem_start, limit, nid, true);
705 			limit = highmem_start;
706 		}
707 
708 		if (!addr) {
709 			addr = memblock_alloc_range_nid(size, alignment, base,
710 					limit, nid, true);
711 			if (!addr)
712 				return -ENOMEM;
713 		}
714 
715 		/*
716 		 * kmemleak scans/reads tracked objects for pointers to other
717 		 * objects but this address isn't mapped and accessible
718 		 */
719 		kmemleak_ignore_phys(addr);
720 		base = addr;
721 	}
722 
723 	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
724 	if (ret)
725 		memblock_phys_free(base, size);
726 
727 	(*res_cma)->nid = nid;
728 
729 	return ret;
730 }
731 
732 static void cma_debug_show_areas(struct cma *cma)
733 {
734 	unsigned long next_zero_bit, next_set_bit, nr_zero;
735 	unsigned long start;
736 	unsigned long nr_part;
737 	unsigned long nbits;
738 	int r;
739 	struct cma_memrange *cmr;
740 
741 	spin_lock_irq(&cma->lock);
742 	pr_info("number of available pages: ");
743 	for (r = 0; r < cma->nranges; r++) {
744 		cmr = &cma->ranges[r];
745 
746 		start = 0;
747 		nbits = cma_bitmap_maxno(cma, cmr);
748 
749 		pr_info("range %d: ", r);
750 		for (;;) {
751 			next_zero_bit = find_next_zero_bit(cmr->bitmap,
752 							   nbits, start);
753 			if (next_zero_bit >= nbits)
754 				break;
755 			next_set_bit = find_next_bit(cmr->bitmap, nbits,
756 						     next_zero_bit);
757 			nr_zero = next_set_bit - next_zero_bit;
758 			nr_part = nr_zero << cma->order_per_bit;
759 			pr_cont("%s%lu@%lu", start ? "+" : "", nr_part,
760 				next_zero_bit);
761 			start = next_zero_bit + nr_zero;
762 		}
763 		pr_info("\n");
764 	}
765 	pr_cont("=> %lu free of %lu total pages\n", cma->available_count,
766 			cma->count);
767 	spin_unlock_irq(&cma->lock);
768 }
769 
770 static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
771 				unsigned long count, unsigned int align,
772 				struct page **pagep, gfp_t gfp)
773 {
774 	unsigned long mask, offset;
775 	unsigned long pfn = -1;
776 	unsigned long start = 0;
777 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
778 	int ret = -EBUSY;
779 	struct page *page = NULL;
780 
781 	mask = cma_bitmap_aligned_mask(cma, align);
782 	offset = cma_bitmap_aligned_offset(cma, cmr, align);
783 	bitmap_maxno = cma_bitmap_maxno(cma, cmr);
784 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
785 
786 	if (bitmap_count > bitmap_maxno)
787 		goto out;
788 
789 	for (;;) {
790 		spin_lock_irq(&cma->lock);
791 		/*
792 		 * If the request is larger than the available number
793 		 * of pages, stop right away.
794 		 */
795 		if (count > cma->available_count) {
796 			spin_unlock_irq(&cma->lock);
797 			break;
798 		}
799 		bitmap_no = bitmap_find_next_zero_area_off(cmr->bitmap,
800 				bitmap_maxno, start, bitmap_count, mask,
801 				offset);
802 		if (bitmap_no >= bitmap_maxno) {
803 			spin_unlock_irq(&cma->lock);
804 			break;
805 		}
806 		bitmap_set(cmr->bitmap, bitmap_no, bitmap_count);
807 		cma->available_count -= count;
808 		/*
809 		 * It's safe to drop the lock here. We've marked this region for
810 		 * our exclusive use. If the migration fails we will take the
811 		 * lock again and unmark it.
812 		 */
813 		spin_unlock_irq(&cma->lock);
814 
815 		pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit);
816 		mutex_lock(&cma_mutex);
817 		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp);
818 		mutex_unlock(&cma_mutex);
819 		if (ret == 0) {
820 			page = pfn_to_page(pfn);
821 			break;
822 		}
823 
824 		cma_clear_bitmap(cma, cmr, pfn, count);
825 		if (ret != -EBUSY)
826 			break;
827 
828 		pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n",
829 			 __func__, pfn, pfn_to_page(pfn));
830 
831 		trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
832 					   count, align);
833 		/* try again with a bit different memory target */
834 		start = bitmap_no + mask + 1;
835 	}
836 out:
837 	*pagep = page;
838 	return ret;
839 }
840 
841 static struct page *__cma_alloc(struct cma *cma, unsigned long count,
842 		       unsigned int align, gfp_t gfp)
843 {
844 	struct page *page = NULL;
845 	int ret = -ENOMEM, r;
846 	unsigned long i;
847 	const char *name = cma ? cma->name : NULL;
848 
849 	trace_cma_alloc_start(name, count, align);
850 
851 	if (!cma || !cma->count)
852 		return page;
853 
854 	pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__,
855 		(void *)cma, cma->name, count, align);
856 
857 	if (!count)
858 		return page;
859 
860 	for (r = 0; r < cma->nranges; r++) {
861 		page = NULL;
862 
863 		ret = cma_range_alloc(cma, &cma->ranges[r], count, align,
864 				       &page, gfp);
865 		if (ret != -EBUSY || page)
866 			break;
867 	}
868 
869 	/*
870 	 * CMA can allocate multiple page blocks, which results in different
871 	 * blocks being marked with different tags. Reset the tags to ignore
872 	 * those page blocks.
873 	 */
874 	if (page) {
875 		for (i = 0; i < count; i++)
876 			page_kasan_tag_reset(nth_page(page, i));
877 	}
878 
879 	if (ret && !(gfp & __GFP_NOWARN)) {
880 		pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
881 				   __func__, cma->name, count, ret);
882 		cma_debug_show_areas(cma);
883 	}
884 
885 	pr_debug("%s(): returned %p\n", __func__, page);
886 	trace_cma_alloc_finish(name, page ? page_to_pfn(page) : 0,
887 			       page, count, align, ret);
888 	if (page) {
889 		count_vm_event(CMA_ALLOC_SUCCESS);
890 		cma_sysfs_account_success_pages(cma, count);
891 	} else {
892 		count_vm_event(CMA_ALLOC_FAIL);
893 		cma_sysfs_account_fail_pages(cma, count);
894 	}
895 
896 	return page;
897 }
898 
899 /**
900  * cma_alloc() - allocate pages from contiguous area
901  * @cma:   Contiguous memory region for which the allocation is performed.
902  * @count: Requested number of pages.
903  * @align: Requested alignment of pages (in PAGE_SIZE order).
904  * @no_warn: Avoid printing message about failed allocation
905  *
906  * This function allocates part of contiguous memory on specific
907  * contiguous memory area.
908  */
909 struct page *cma_alloc(struct cma *cma, unsigned long count,
910 		       unsigned int align, bool no_warn)
911 {
912 	return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
913 }
914 
915 struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
916 {
917 	struct page *page;
918 
919 	if (WARN_ON(!order || !(gfp & __GFP_COMP)))
920 		return NULL;
921 
922 	page = __cma_alloc(cma, 1 << order, order, gfp);
923 
924 	return page ? page_folio(page) : NULL;
925 }
926 
927 bool cma_pages_valid(struct cma *cma, const struct page *pages,
928 		     unsigned long count)
929 {
930 	unsigned long pfn, end;
931 	int r;
932 	struct cma_memrange *cmr;
933 	bool ret;
934 
935 	if (!cma || !pages || count > cma->count)
936 		return false;
937 
938 	pfn = page_to_pfn(pages);
939 	ret = false;
940 
941 	for (r = 0; r < cma->nranges; r++) {
942 		cmr = &cma->ranges[r];
943 		end = cmr->base_pfn + cmr->count;
944 		if (pfn >= cmr->base_pfn && pfn < end) {
945 			ret = pfn + count <= end;
946 			break;
947 		}
948 	}
949 
950 	if (!ret)
951 		pr_debug("%s(page %p, count %lu)\n",
952 				__func__, (void *)pages, count);
953 
954 	return ret;
955 }
956 
957 /**
958  * cma_release() - release allocated pages
959  * @cma:   Contiguous memory region for which the allocation is performed.
960  * @pages: Allocated pages.
961  * @count: Number of allocated pages.
962  *
963  * This function releases memory allocated by cma_alloc().
964  * It returns false when provided pages do not belong to contiguous area and
965  * true otherwise.
966  */
967 bool cma_release(struct cma *cma, const struct page *pages,
968 		 unsigned long count)
969 {
970 	struct cma_memrange *cmr;
971 	unsigned long pfn, end_pfn;
972 	int r;
973 
974 	pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
975 
976 	if (!cma_pages_valid(cma, pages, count))
977 		return false;
978 
979 	pfn = page_to_pfn(pages);
980 	end_pfn = pfn + count;
981 
982 	for (r = 0; r < cma->nranges; r++) {
983 		cmr = &cma->ranges[r];
984 		if (pfn >= cmr->base_pfn &&
985 		    pfn < (cmr->base_pfn + cmr->count)) {
986 			VM_BUG_ON(end_pfn > cmr->base_pfn + cmr->count);
987 			break;
988 		}
989 	}
990 
991 	if (r == cma->nranges)
992 		return false;
993 
994 	free_contig_range(pfn, count);
995 	cma_clear_bitmap(cma, cmr, pfn, count);
996 	cma_sysfs_account_release_pages(cma, count);
997 	trace_cma_release(cma->name, pfn, pages, count);
998 
999 	return true;
1000 }
1001 
1002 bool cma_free_folio(struct cma *cma, const struct folio *folio)
1003 {
1004 	if (WARN_ON(!folio_test_large(folio)))
1005 		return false;
1006 
1007 	return cma_release(cma, &folio->page, folio_nr_pages(folio));
1008 }
1009 
1010 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
1011 {
1012 	int i;
1013 
1014 	for (i = 0; i < cma_area_count; i++) {
1015 		int ret = it(&cma_areas[i], data);
1016 
1017 		if (ret)
1018 			return ret;
1019 	}
1020 
1021 	return 0;
1022 }
1023 
1024 bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end)
1025 {
1026 	int r;
1027 	struct cma_memrange *cmr;
1028 	unsigned long rstart, rend;
1029 
1030 	for (r = 0; r < cma->nranges; r++) {
1031 		cmr = &cma->ranges[r];
1032 
1033 		rstart = PFN_PHYS(cmr->base_pfn);
1034 		rend = PFN_PHYS(cmr->base_pfn + cmr->count);
1035 		if (end < rstart)
1036 			continue;
1037 		if (start >= rend)
1038 			continue;
1039 		return true;
1040 	}
1041 
1042 	return false;
1043 }
1044 
1045 /*
1046  * Very basic function to reserve memory from a CMA area that has not
1047  * yet been activated. This is expected to be called early, when the
1048  * system is single-threaded, so there is no locking. The alignment
1049  * checking is restrictive - only pageblock-aligned areas
1050  * (CMA_MIN_ALIGNMENT_BYTES) may be reserved through this function.
1051  * This keeps things simple, and is enough for the current use case.
1052  *
1053  * The CMA bitmaps have not yet been allocated, so just start
1054  * reserving from the bottom up, using a PFN to keep track
1055  * of what has been reserved. Unreserving is not possible.
1056  *
1057  * The caller is responsible for initializing the page structures
1058  * in the area properly, since this just points to memblock-allocated
1059  * memory. The caller should subsequently use init_cma_pageblock to
1060  * set the migrate type and CMA stats  the pageblocks that were reserved.
1061  *
1062  * If the CMA area fails to activate later, memory obtained through
1063  * this interface is not handed to the page allocator, this is
1064  * the responsibility of the caller (e.g. like normal memblock-allocated
1065  * memory).
1066  */
1067 void __init *cma_reserve_early(struct cma *cma, unsigned long size)
1068 {
1069 	int r;
1070 	struct cma_memrange *cmr;
1071 	unsigned long available;
1072 	void *ret = NULL;
1073 
1074 	if (!cma || !cma->count)
1075 		return NULL;
1076 	/*
1077 	 * Can only be called early in init.
1078 	 */
1079 	if (test_bit(CMA_ACTIVATED, &cma->flags))
1080 		return NULL;
1081 
1082 	if (!IS_ALIGNED(size, CMA_MIN_ALIGNMENT_BYTES))
1083 		return NULL;
1084 
1085 	if (!IS_ALIGNED(size, (PAGE_SIZE << cma->order_per_bit)))
1086 		return NULL;
1087 
1088 	size >>= PAGE_SHIFT;
1089 
1090 	if (size > cma->available_count)
1091 		return NULL;
1092 
1093 	for (r = 0; r < cma->nranges; r++) {
1094 		cmr = &cma->ranges[r];
1095 		available = cmr->count - (cmr->early_pfn - cmr->base_pfn);
1096 		if (size <= available) {
1097 			ret = phys_to_virt(PFN_PHYS(cmr->early_pfn));
1098 			cmr->early_pfn += size;
1099 			cma->available_count -= size;
1100 			return ret;
1101 		}
1102 	}
1103 
1104 	return ret;
1105 }
1106