xref: /linux/mm/cma.c (revision ad789a85b1633ea84ad8ccf625588d6416877e69)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Contiguous Memory Allocator
4  *
5  * Copyright (c) 2010-2011 by Samsung Electronics.
6  * Copyright IBM Corporation, 2013
7  * Copyright LG Electronics Inc., 2014
8  * Written by:
9  *	Marek Szyprowski <m.szyprowski@samsung.com>
10  *	Michal Nazarewicz <mina86@mina86.com>
11  *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12  *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
13  */
14 
15 #define pr_fmt(fmt) "cma: " fmt
16 
17 #define CREATE_TRACE_POINTS
18 
19 #include <linux/memblock.h>
20 #include <linux/err.h>
21 #include <linux/list.h>
22 #include <linux/mm.h>
23 #include <linux/sizes.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 #include <linux/string_choices.h>
27 #include <linux/log2.h>
28 #include <linux/cma.h>
29 #include <linux/highmem.h>
30 #include <linux/io.h>
31 #include <linux/kmemleak.h>
32 #include <trace/events/cma.h>
33 
34 #include "internal.h"
35 #include "cma.h"
36 
37 struct cma cma_areas[MAX_CMA_AREAS];
38 unsigned int cma_area_count;
39 
40 phys_addr_t cma_get_base(const struct cma *cma)
41 {
42 	WARN_ON_ONCE(cma->nranges != 1);
43 	return PFN_PHYS(cma->ranges[0].base_pfn);
44 }
45 
46 unsigned long cma_get_size(const struct cma *cma)
47 {
48 	return cma->count << PAGE_SHIFT;
49 }
50 
51 const char *cma_get_name(const struct cma *cma)
52 {
53 	return cma->name;
54 }
55 
56 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
57 					     unsigned int align_order)
58 {
59 	if (align_order <= cma->order_per_bit)
60 		return 0;
61 	return (1UL << (align_order - cma->order_per_bit)) - 1;
62 }
63 
64 /*
65  * Find the offset of the base PFN from the specified align_order.
66  * The value returned is represented in order_per_bits.
67  */
68 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
69 					       const struct cma_memrange *cmr,
70 					       unsigned int align_order)
71 {
72 	return (cmr->base_pfn & ((1UL << align_order) - 1))
73 		>> cma->order_per_bit;
74 }
75 
76 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
77 					      unsigned long pages)
78 {
79 	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
80 }
81 
82 static void cma_clear_bitmap(struct cma *cma, const struct cma_memrange *cmr,
83 			     unsigned long pfn, unsigned long count)
84 {
85 	unsigned long bitmap_no, bitmap_count;
86 	unsigned long flags;
87 
88 	bitmap_no = (pfn - cmr->base_pfn) >> cma->order_per_bit;
89 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
90 
91 	spin_lock_irqsave(&cma->lock, flags);
92 	bitmap_clear(cmr->bitmap, bitmap_no, bitmap_count);
93 	cma->available_count += count;
94 	spin_unlock_irqrestore(&cma->lock, flags);
95 }
96 
97 /*
98  * Check if a CMA area contains no ranges that intersect with
99  * multiple zones. Store the result in the flags in case
100  * this gets called more than once.
101  */
102 bool cma_validate_zones(struct cma *cma)
103 {
104 	int r;
105 	unsigned long base_pfn;
106 	struct cma_memrange *cmr;
107 	bool valid_bit_set;
108 
109 	/*
110 	 * If already validated, return result of previous check.
111 	 * Either the valid or invalid bit will be set if this
112 	 * check has already been done. If neither is set, the
113 	 * check has not been performed yet.
114 	 */
115 	valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags);
116 	if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags))
117 		return valid_bit_set;
118 
119 	for (r = 0; r < cma->nranges; r++) {
120 		cmr = &cma->ranges[r];
121 		base_pfn = cmr->base_pfn;
122 
123 		/*
124 		 * alloc_contig_range() requires the pfn range specified
125 		 * to be in the same zone. Simplify by forcing the entire
126 		 * CMA resv range to be in the same zone.
127 		 */
128 		WARN_ON_ONCE(!pfn_valid(base_pfn));
129 		if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) {
130 			set_bit(CMA_ZONES_INVALID, &cma->flags);
131 			return false;
132 		}
133 	}
134 
135 	set_bit(CMA_ZONES_VALID, &cma->flags);
136 
137 	return true;
138 }
139 
140 static void __init cma_activate_area(struct cma *cma)
141 {
142 	unsigned long pfn, end_pfn, early_pfn[CMA_MAX_RANGES];
143 	int allocrange, r;
144 	struct cma_memrange *cmr;
145 	unsigned long bitmap_count, count;
146 
147 	for (allocrange = 0; allocrange < cma->nranges; allocrange++) {
148 		cmr = &cma->ranges[allocrange];
149 		early_pfn[allocrange] = cmr->early_pfn;
150 		cmr->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma, cmr),
151 					    GFP_KERNEL);
152 		if (!cmr->bitmap)
153 			goto cleanup;
154 	}
155 
156 	if (!cma_validate_zones(cma))
157 		goto cleanup;
158 
159 	for (r = 0; r < cma->nranges; r++) {
160 		cmr = &cma->ranges[r];
161 		if (early_pfn[r] != cmr->base_pfn) {
162 			count = early_pfn[r] - cmr->base_pfn;
163 			bitmap_count = cma_bitmap_pages_to_bits(cma, count);
164 			bitmap_set(cmr->bitmap, 0, bitmap_count);
165 		}
166 
167 		for (pfn = early_pfn[r]; pfn < cmr->base_pfn + cmr->count;
168 		     pfn += pageblock_nr_pages)
169 			init_cma_reserved_pageblock(pfn_to_page(pfn));
170 	}
171 
172 	spin_lock_init(&cma->lock);
173 
174 	mutex_init(&cma->alloc_mutex);
175 
176 #ifdef CONFIG_CMA_DEBUGFS
177 	INIT_HLIST_HEAD(&cma->mem_head);
178 	spin_lock_init(&cma->mem_head_lock);
179 #endif
180 	set_bit(CMA_ACTIVATED, &cma->flags);
181 
182 	return;
183 
184 cleanup:
185 	for (r = 0; r < allocrange; r++)
186 		bitmap_free(cma->ranges[r].bitmap);
187 
188 	/* Expose all pages to the buddy, they are useless for CMA. */
189 	if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) {
190 		for (r = 0; r < allocrange; r++) {
191 			cmr = &cma->ranges[r];
192 			end_pfn = cmr->base_pfn + cmr->count;
193 			for (pfn = early_pfn[r]; pfn < end_pfn; pfn++)
194 				free_reserved_page(pfn_to_page(pfn));
195 		}
196 	}
197 	totalcma_pages -= cma->count;
198 	cma->available_count = cma->count = 0;
199 	pr_err("CMA area %s could not be activated\n", cma->name);
200 }
201 
202 static int __init cma_init_reserved_areas(void)
203 {
204 	int i;
205 
206 	for (i = 0; i < cma_area_count; i++)
207 		cma_activate_area(&cma_areas[i]);
208 
209 	return 0;
210 }
211 core_initcall(cma_init_reserved_areas);
212 
213 void __init cma_reserve_pages_on_error(struct cma *cma)
214 {
215 	set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags);
216 }
217 
218 static int __init cma_new_area(const char *name, phys_addr_t size,
219 			       unsigned int order_per_bit,
220 			       struct cma **res_cma)
221 {
222 	struct cma *cma;
223 
224 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
225 		pr_err("Not enough slots for CMA reserved regions!\n");
226 		return -ENOSPC;
227 	}
228 
229 	/*
230 	 * Each reserved area must be initialised later, when more kernel
231 	 * subsystems (like slab allocator) are available.
232 	 */
233 	cma = &cma_areas[cma_area_count];
234 	cma_area_count++;
235 
236 	if (name)
237 		strscpy(cma->name, name);
238 	else
239 		snprintf(cma->name, CMA_MAX_NAME,  "cma%d\n", cma_area_count);
240 
241 	cma->available_count = cma->count = size >> PAGE_SHIFT;
242 	cma->order_per_bit = order_per_bit;
243 	*res_cma = cma;
244 	totalcma_pages += cma->count;
245 
246 	return 0;
247 }
248 
249 static void __init cma_drop_area(struct cma *cma)
250 {
251 	totalcma_pages -= cma->count;
252 	cma_area_count--;
253 }
254 
255 /**
256  * cma_init_reserved_mem() - create custom contiguous area from reserved memory
257  * @base: Base address of the reserved area
258  * @size: Size of the reserved area (in bytes),
259  * @order_per_bit: Order of pages represented by one bit on bitmap.
260  * @name: The name of the area. If this parameter is NULL, the name of
261  *        the area will be set to "cmaN", where N is a running counter of
262  *        used areas.
263  * @res_cma: Pointer to store the created cma region.
264  *
265  * This function creates custom contiguous area from already reserved memory.
266  */
267 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
268 				 unsigned int order_per_bit,
269 				 const char *name,
270 				 struct cma **res_cma)
271 {
272 	struct cma *cma;
273 	int ret;
274 
275 	/* Sanity checks */
276 	if (!size || !memblock_is_region_reserved(base, size))
277 		return -EINVAL;
278 
279 	/*
280 	 * CMA uses CMA_MIN_ALIGNMENT_BYTES as alignment requirement which
281 	 * needs pageblock_order to be initialized. Let's enforce it.
282 	 */
283 	if (!pageblock_order) {
284 		pr_err("pageblock_order not yet initialized. Called during early boot?\n");
285 		return -EINVAL;
286 	}
287 
288 	/* ensure minimal alignment required by mm core */
289 	if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
290 		return -EINVAL;
291 
292 	ret = cma_new_area(name, size, order_per_bit, &cma);
293 	if (ret != 0)
294 		return ret;
295 
296 	cma->ranges[0].base_pfn = PFN_DOWN(base);
297 	cma->ranges[0].early_pfn = PFN_DOWN(base);
298 	cma->ranges[0].count = cma->count;
299 	cma->nranges = 1;
300 	cma->nid = NUMA_NO_NODE;
301 
302 	*res_cma = cma;
303 
304 	return 0;
305 }
306 
307 /*
308  * Structure used while walking physical memory ranges and finding out
309  * which one(s) to use for a CMA area.
310  */
311 struct cma_init_memrange {
312 	phys_addr_t base;
313 	phys_addr_t size;
314 	struct list_head list;
315 };
316 
317 /*
318  * Work array used during CMA initialization.
319  */
320 static struct cma_init_memrange memranges[CMA_MAX_RANGES] __initdata;
321 
322 static bool __init revsizecmp(struct cma_init_memrange *mlp,
323 			      struct cma_init_memrange *mrp)
324 {
325 	return mlp->size > mrp->size;
326 }
327 
328 static bool __init basecmp(struct cma_init_memrange *mlp,
329 			   struct cma_init_memrange *mrp)
330 {
331 	return mlp->base < mrp->base;
332 }
333 
334 /*
335  * Helper function to create sorted lists.
336  */
337 static void __init list_insert_sorted(
338 	struct list_head *ranges,
339 	struct cma_init_memrange *mrp,
340 	bool (*cmp)(struct cma_init_memrange *lh, struct cma_init_memrange *rh))
341 {
342 	struct list_head *mp;
343 	struct cma_init_memrange *mlp;
344 
345 	if (list_empty(ranges))
346 		list_add(&mrp->list, ranges);
347 	else {
348 		list_for_each(mp, ranges) {
349 			mlp = list_entry(mp, struct cma_init_memrange, list);
350 			if (cmp(mlp, mrp))
351 				break;
352 		}
353 		__list_add(&mrp->list, mlp->list.prev, &mlp->list);
354 	}
355 }
356 
357 static int __init cma_fixed_reserve(phys_addr_t base, phys_addr_t size)
358 {
359 	if (IS_ENABLED(CONFIG_HIGHMEM)) {
360 		phys_addr_t highmem_start = __pa(high_memory - 1) + 1;
361 
362 		/*
363 		 * If allocating at a fixed base the request region must not
364 		 * cross the low/high memory boundary.
365 		 */
366 		if (base < highmem_start && base + size > highmem_start) {
367 			pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
368 			       &base, &highmem_start);
369 			return -EINVAL;
370 		}
371 	}
372 
373 	if (memblock_is_region_reserved(base, size) ||
374 	    memblock_reserve(base, size) < 0) {
375 		return -EBUSY;
376 	}
377 
378 	return 0;
379 }
380 
381 static phys_addr_t __init cma_alloc_mem(phys_addr_t base, phys_addr_t size,
382 			phys_addr_t align, phys_addr_t limit, int nid)
383 {
384 	phys_addr_t addr = 0;
385 
386 	/*
387 	 * If there is enough memory, try a bottom-up allocation first.
388 	 * It will place the new cma area close to the start of the node
389 	 * and guarantee that the compaction is moving pages out of the
390 	 * cma area and not into it.
391 	 * Avoid using first 4GB to not interfere with constrained zones
392 	 * like DMA/DMA32.
393 	 */
394 #ifdef CONFIG_PHYS_ADDR_T_64BIT
395 	if (!memblock_bottom_up() && limit >= SZ_4G + size) {
396 		memblock_set_bottom_up(true);
397 		addr = memblock_alloc_range_nid(size, align, SZ_4G, limit,
398 						nid, true);
399 		memblock_set_bottom_up(false);
400 	}
401 #endif
402 
403 	/*
404 	 * On systems with HIGHMEM try allocating from there before consuming
405 	 * memory in lower zones.
406 	 */
407 	if (!addr && IS_ENABLED(CONFIG_HIGHMEM)) {
408 		phys_addr_t highmem = __pa(high_memory - 1) + 1;
409 
410 		/*
411 		 * All pages in the reserved area must come from the same zone.
412 		 * If the requested region crosses the low/high memory boundary,
413 		 * try allocating from high memory first and fall back to low
414 		 * memory in case of failure.
415 		 */
416 		if (base < highmem && limit > highmem) {
417 			addr = memblock_alloc_range_nid(size, align, highmem,
418 							limit, nid, true);
419 			limit = highmem;
420 		}
421 	}
422 
423 	if (!addr)
424 		addr = memblock_alloc_range_nid(size, align, base, limit, nid,
425 						true);
426 
427 	return addr;
428 }
429 
430 static int __init __cma_declare_contiguous_nid(phys_addr_t *basep,
431 			phys_addr_t size, phys_addr_t limit,
432 			phys_addr_t alignment, unsigned int order_per_bit,
433 			bool fixed, const char *name, struct cma **res_cma,
434 			int nid)
435 {
436 	phys_addr_t memblock_end = memblock_end_of_DRAM();
437 	phys_addr_t base = *basep;
438 	int ret;
439 
440 	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
441 		__func__, &size, &base, &limit, &alignment);
442 
443 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
444 		pr_err("Not enough slots for CMA reserved regions!\n");
445 		return -ENOSPC;
446 	}
447 
448 	if (!size)
449 		return -EINVAL;
450 
451 	if (alignment && !is_power_of_2(alignment))
452 		return -EINVAL;
453 
454 	if (!IS_ENABLED(CONFIG_NUMA))
455 		nid = NUMA_NO_NODE;
456 
457 	/* Sanitise input arguments. */
458 	alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
459 	if (fixed && base & (alignment - 1)) {
460 		pr_err("Region at %pa must be aligned to %pa bytes\n",
461 			&base, &alignment);
462 		return -EINVAL;
463 	}
464 	base = ALIGN(base, alignment);
465 	size = ALIGN(size, alignment);
466 	limit &= ~(alignment - 1);
467 
468 	if (!base)
469 		fixed = false;
470 
471 	/* size should be aligned with order_per_bit */
472 	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
473 		return -EINVAL;
474 
475 
476 	/*
477 	 * If the limit is unspecified or above the memblock end, its effective
478 	 * value will be the memblock end. Set it explicitly to simplify further
479 	 * checks.
480 	 */
481 	if (limit == 0 || limit > memblock_end)
482 		limit = memblock_end;
483 
484 	if (base + size > limit) {
485 		pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
486 			&size, &base, &limit);
487 		return -EINVAL;
488 	}
489 
490 	/* Reserve memory */
491 	if (fixed) {
492 		ret = cma_fixed_reserve(base, size);
493 		if (ret)
494 			return ret;
495 	} else {
496 		base = cma_alloc_mem(base, size, alignment, limit, nid);
497 		if (!base)
498 			return -ENOMEM;
499 
500 		/*
501 		 * kmemleak scans/reads tracked objects for pointers to other
502 		 * objects but this address isn't mapped and accessible
503 		 */
504 		kmemleak_ignore_phys(base);
505 	}
506 
507 	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
508 	if (ret) {
509 		memblock_phys_free(base, size);
510 		return ret;
511 	}
512 
513 	(*res_cma)->nid = nid;
514 	*basep = base;
515 
516 	return 0;
517 }
518 
519 /*
520  * Create CMA areas with a total size of @total_size. A normal allocation
521  * for one area is tried first. If that fails, the biggest memblock
522  * ranges above 4G are selected, and allocated bottom up.
523  *
524  * The complexity here is not great, but this function will only be
525  * called during boot, and the lists operated on have fewer than
526  * CMA_MAX_RANGES elements (default value: 8).
527  */
528 int __init cma_declare_contiguous_multi(phys_addr_t total_size,
529 			phys_addr_t align, unsigned int order_per_bit,
530 			const char *name, struct cma **res_cma, int nid)
531 {
532 	phys_addr_t start = 0, end;
533 	phys_addr_t size, sizesum, sizeleft;
534 	struct cma_init_memrange *mrp, *mlp, *failed;
535 	struct cma_memrange *cmrp;
536 	LIST_HEAD(ranges);
537 	LIST_HEAD(final_ranges);
538 	struct list_head *mp, *next;
539 	int ret, nr = 1;
540 	u64 i;
541 	struct cma *cma;
542 
543 	/*
544 	 * First, try it the normal way, producing just one range.
545 	 */
546 	ret = __cma_declare_contiguous_nid(&start, total_size, 0, align,
547 			order_per_bit, false, name, res_cma, nid);
548 	if (ret != -ENOMEM)
549 		goto out;
550 
551 	/*
552 	 * Couldn't find one range that fits our needs, so try multiple
553 	 * ranges.
554 	 *
555 	 * No need to do the alignment checks here, the call to
556 	 * cma_declare_contiguous_nid above would have caught
557 	 * any issues. With the checks, we know that:
558 	 *
559 	 * - @align is a power of 2
560 	 * - @align is >= pageblock alignment
561 	 * - @size is aligned to @align and to @order_per_bit
562 	 *
563 	 * So, as long as we create ranges that have a base
564 	 * aligned to @align, and a size that is aligned to
565 	 * both @align and @order_to_bit, things will work out.
566 	 */
567 	nr = 0;
568 	sizesum = 0;
569 	failed = NULL;
570 
571 	ret = cma_new_area(name, total_size, order_per_bit, &cma);
572 	if (ret != 0)
573 		goto out;
574 
575 	align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
576 	/*
577 	 * Create a list of ranges above 4G, largest range first.
578 	 */
579 	for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
580 		if (upper_32_bits(start) == 0)
581 			continue;
582 
583 		start = ALIGN(start, align);
584 		if (start >= end)
585 			continue;
586 
587 		end = ALIGN_DOWN(end, align);
588 		if (end <= start)
589 			continue;
590 
591 		size = end - start;
592 		size = ALIGN_DOWN(size, (PAGE_SIZE << order_per_bit));
593 		if (!size)
594 			continue;
595 		sizesum += size;
596 
597 		pr_debug("consider %016llx - %016llx\n", (u64)start, (u64)end);
598 
599 		/*
600 		 * If we don't yet have used the maximum number of
601 		 * areas, grab a new one.
602 		 *
603 		 * If we can't use anymore, see if this range is not
604 		 * smaller than the smallest one already recorded. If
605 		 * not, re-use the smallest element.
606 		 */
607 		if (nr < CMA_MAX_RANGES)
608 			mrp = &memranges[nr++];
609 		else {
610 			mrp = list_last_entry(&ranges,
611 					      struct cma_init_memrange, list);
612 			if (size < mrp->size)
613 				continue;
614 			list_del(&mrp->list);
615 			sizesum -= mrp->size;
616 			pr_debug("deleted %016llx - %016llx from the list\n",
617 				(u64)mrp->base, (u64)mrp->base + size);
618 		}
619 		mrp->base = start;
620 		mrp->size = size;
621 
622 		/*
623 		 * Now do a sorted insert.
624 		 */
625 		list_insert_sorted(&ranges, mrp, revsizecmp);
626 		pr_debug("added %016llx - %016llx to the list\n",
627 		    (u64)mrp->base, (u64)mrp->base + size);
628 		pr_debug("total size now %llu\n", (u64)sizesum);
629 	}
630 
631 	/*
632 	 * There is not enough room in the CMA_MAX_RANGES largest
633 	 * ranges, so bail out.
634 	 */
635 	if (sizesum < total_size) {
636 		cma_drop_area(cma);
637 		ret = -ENOMEM;
638 		goto out;
639 	}
640 
641 	/*
642 	 * Found ranges that provide enough combined space.
643 	 * Now, sorted them by address, smallest first, because we
644 	 * want to mimic a bottom-up memblock allocation.
645 	 */
646 	sizesum = 0;
647 	list_for_each_safe(mp, next, &ranges) {
648 		mlp = list_entry(mp, struct cma_init_memrange, list);
649 		list_del(mp);
650 		list_insert_sorted(&final_ranges, mlp, basecmp);
651 		sizesum += mlp->size;
652 		if (sizesum >= total_size)
653 			break;
654 	}
655 
656 	/*
657 	 * Walk the final list, and add a CMA range for
658 	 * each range, possibly not using the last one fully.
659 	 */
660 	nr = 0;
661 	sizeleft = total_size;
662 	list_for_each(mp, &final_ranges) {
663 		mlp = list_entry(mp, struct cma_init_memrange, list);
664 		size = min(sizeleft, mlp->size);
665 		if (memblock_reserve(mlp->base, size)) {
666 			/*
667 			 * Unexpected error. Could go on to
668 			 * the next one, but just abort to
669 			 * be safe.
670 			 */
671 			failed = mlp;
672 			break;
673 		}
674 
675 		pr_debug("created region %d: %016llx - %016llx\n",
676 		    nr, (u64)mlp->base, (u64)mlp->base + size);
677 		cmrp = &cma->ranges[nr++];
678 		cmrp->base_pfn = PHYS_PFN(mlp->base);
679 		cmrp->early_pfn = cmrp->base_pfn;
680 		cmrp->count = size >> PAGE_SHIFT;
681 
682 		sizeleft -= size;
683 		if (sizeleft == 0)
684 			break;
685 	}
686 
687 	if (failed) {
688 		list_for_each(mp, &final_ranges) {
689 			mlp = list_entry(mp, struct cma_init_memrange, list);
690 			if (mlp == failed)
691 				break;
692 			memblock_phys_free(mlp->base, mlp->size);
693 		}
694 		cma_drop_area(cma);
695 		ret = -ENOMEM;
696 		goto out;
697 	}
698 
699 	cma->nranges = nr;
700 	cma->nid = nid;
701 	*res_cma = cma;
702 
703 out:
704 	if (ret != 0)
705 		pr_err("Failed to reserve %lu MiB\n",
706 			(unsigned long)total_size / SZ_1M);
707 	else
708 		pr_info("Reserved %lu MiB in %d range%s\n",
709 			(unsigned long)total_size / SZ_1M, nr, str_plural(nr));
710 	return ret;
711 }
712 
713 /**
714  * cma_declare_contiguous_nid() - reserve custom contiguous area
715  * @base: Base address of the reserved area optional, use 0 for any
716  * @size: Size of the reserved area (in bytes),
717  * @limit: End address of the reserved memory (optional, 0 for any).
718  * @alignment: Alignment for the CMA area, should be power of 2 or zero
719  * @order_per_bit: Order of pages represented by one bit on bitmap.
720  * @fixed: hint about where to place the reserved area
721  * @name: The name of the area. See function cma_init_reserved_mem()
722  * @res_cma: Pointer to store the created cma region.
723  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
724  *
725  * This function reserves memory from early allocator. It should be
726  * called by arch specific code once the early allocator (memblock or bootmem)
727  * has been activated and all other subsystems have already allocated/reserved
728  * memory. This function allows to create custom reserved areas.
729  *
730  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
731  * reserve in range from @base to @limit.
732  */
733 int __init cma_declare_contiguous_nid(phys_addr_t base,
734 			phys_addr_t size, phys_addr_t limit,
735 			phys_addr_t alignment, unsigned int order_per_bit,
736 			bool fixed, const char *name, struct cma **res_cma,
737 			int nid)
738 {
739 	int ret;
740 
741 	ret = __cma_declare_contiguous_nid(&base, size, limit, alignment,
742 			order_per_bit, fixed, name, res_cma, nid);
743 	if (ret != 0)
744 		pr_err("Failed to reserve %ld MiB\n",
745 				(unsigned long)size / SZ_1M);
746 	else
747 		pr_info("Reserved %ld MiB at %pa\n",
748 				(unsigned long)size / SZ_1M, &base);
749 
750 	return ret;
751 }
752 
753 static void cma_debug_show_areas(struct cma *cma)
754 {
755 	unsigned long start, end;
756 	unsigned long nr_part;
757 	unsigned long nbits;
758 	int r;
759 	struct cma_memrange *cmr;
760 
761 	spin_lock_irq(&cma->lock);
762 	pr_info("number of available pages: ");
763 	for (r = 0; r < cma->nranges; r++) {
764 		cmr = &cma->ranges[r];
765 
766 		nbits = cma_bitmap_maxno(cma, cmr);
767 
768 		pr_info("range %d: ", r);
769 		for_each_clear_bitrange(start, end, cmr->bitmap, nbits) {
770 			nr_part = (end - start) << cma->order_per_bit;
771 			pr_cont("%s%lu@%lu", start ? "+" : "", nr_part, start);
772 		}
773 		pr_info("\n");
774 	}
775 	pr_cont("=> %lu free of %lu total pages\n", cma->available_count,
776 			cma->count);
777 	spin_unlock_irq(&cma->lock);
778 }
779 
780 static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
781 				unsigned long count, unsigned int align,
782 				struct page **pagep, gfp_t gfp)
783 {
784 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
785 	unsigned long start, pfn, mask, offset;
786 	int ret = -EBUSY;
787 	struct page *page = NULL;
788 
789 	mask = cma_bitmap_aligned_mask(cma, align);
790 	offset = cma_bitmap_aligned_offset(cma, cmr, align);
791 	bitmap_maxno = cma_bitmap_maxno(cma, cmr);
792 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
793 
794 	if (bitmap_count > bitmap_maxno)
795 		goto out;
796 
797 	for (start = 0; ; start = bitmap_no + mask + 1) {
798 		spin_lock_irq(&cma->lock);
799 		/*
800 		 * If the request is larger than the available number
801 		 * of pages, stop right away.
802 		 */
803 		if (count > cma->available_count) {
804 			spin_unlock_irq(&cma->lock);
805 			break;
806 		}
807 		bitmap_no = bitmap_find_next_zero_area_off(cmr->bitmap,
808 				bitmap_maxno, start, bitmap_count, mask,
809 				offset);
810 		if (bitmap_no >= bitmap_maxno) {
811 			spin_unlock_irq(&cma->lock);
812 			break;
813 		}
814 
815 		pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit);
816 		page = pfn_to_page(pfn);
817 
818 		/*
819 		 * Do not hand out page ranges that are not contiguous, so
820 		 * callers can just iterate the pages without having to worry
821 		 * about these corner cases.
822 		 */
823 		if (!page_range_contiguous(page, count)) {
824 			spin_unlock_irq(&cma->lock);
825 			pr_warn_ratelimited("%s: %s: skipping incompatible area [0x%lx-0x%lx]",
826 					    __func__, cma->name, pfn, pfn + count - 1);
827 			continue;
828 		}
829 
830 		bitmap_set(cmr->bitmap, bitmap_no, bitmap_count);
831 		cma->available_count -= count;
832 		/*
833 		 * It's safe to drop the lock here. We've marked this region for
834 		 * our exclusive use. If the migration fails we will take the
835 		 * lock again and unmark it.
836 		 */
837 		spin_unlock_irq(&cma->lock);
838 
839 		mutex_lock(&cma->alloc_mutex);
840 		ret = alloc_contig_frozen_range(pfn, pfn + count, ACR_FLAGS_CMA, gfp);
841 		mutex_unlock(&cma->alloc_mutex);
842 		if (!ret)
843 			break;
844 
845 		cma_clear_bitmap(cma, cmr, pfn, count);
846 		if (ret != -EBUSY)
847 			break;
848 
849 		pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n",
850 			 __func__, pfn, page);
851 
852 		trace_cma_alloc_busy_retry(cma->name, pfn, page, count, align);
853 	}
854 out:
855 	if (!ret)
856 		*pagep = page;
857 	return ret;
858 }
859 
860 static struct page *__cma_alloc_frozen(struct cma *cma,
861 		unsigned long count, unsigned int align, gfp_t gfp)
862 {
863 	struct page *page = NULL;
864 	int ret = -ENOMEM, r;
865 	unsigned long i;
866 	const char *name = cma ? cma->name : NULL;
867 
868 	if (!cma || !cma->count)
869 		return page;
870 
871 	pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__,
872 		(void *)cma, cma->name, count, align);
873 
874 	if (!count)
875 		return page;
876 
877 	trace_cma_alloc_start(name, count, cma->available_count, cma->count, align);
878 
879 	for (r = 0; r < cma->nranges; r++) {
880 		page = NULL;
881 
882 		ret = cma_range_alloc(cma, &cma->ranges[r], count, align,
883 				       &page, gfp);
884 		if (ret != -EBUSY || page)
885 			break;
886 	}
887 
888 	/*
889 	 * CMA can allocate multiple page blocks, which results in different
890 	 * blocks being marked with different tags. Reset the tags to ignore
891 	 * those page blocks.
892 	 */
893 	if (page) {
894 		for (i = 0; i < count; i++)
895 			page_kasan_tag_reset(page + i);
896 	}
897 
898 	if (ret && !(gfp & __GFP_NOWARN)) {
899 		pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
900 				   __func__, cma->name, count, ret);
901 		cma_debug_show_areas(cma);
902 	}
903 
904 	pr_debug("%s(): returned %p\n", __func__, page);
905 	trace_cma_alloc_finish(name, page ? page_to_pfn(page) : 0,
906 			       page, count, align, ret);
907 	if (page) {
908 		count_vm_event(CMA_ALLOC_SUCCESS);
909 		cma_sysfs_account_success_pages(cma, count);
910 	} else {
911 		count_vm_event(CMA_ALLOC_FAIL);
912 		cma_sysfs_account_fail_pages(cma, count);
913 	}
914 
915 	return page;
916 }
917 
918 struct page *cma_alloc_frozen(struct cma *cma, unsigned long count,
919 		unsigned int align, bool no_warn)
920 {
921 	gfp_t gfp = GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0);
922 
923 	return __cma_alloc_frozen(cma, count, align, gfp);
924 }
925 
926 struct page *cma_alloc_frozen_compound(struct cma *cma, unsigned int order)
927 {
928 	gfp_t gfp = GFP_KERNEL | __GFP_COMP | __GFP_NOWARN;
929 
930 	return __cma_alloc_frozen(cma, 1 << order, order, gfp);
931 }
932 
933 /**
934  * cma_alloc() - allocate pages from contiguous area
935  * @cma:   Contiguous memory region for which the allocation is performed.
936  * @count: Requested number of pages.
937  * @align: Requested alignment of pages (in PAGE_SIZE order).
938  * @no_warn: Avoid printing message about failed allocation
939  *
940  * This function allocates part of contiguous memory on specific
941  * contiguous memory area.
942  */
943 struct page *cma_alloc(struct cma *cma, unsigned long count,
944 		       unsigned int align, bool no_warn)
945 {
946 	struct page *page;
947 
948 	page = cma_alloc_frozen(cma, count, align, no_warn);
949 	if (page)
950 		set_pages_refcounted(page, count);
951 
952 	return page;
953 }
954 
955 static struct cma_memrange *find_cma_memrange(struct cma *cma,
956 		const struct page *pages, unsigned long count)
957 {
958 	struct cma_memrange *cmr = NULL;
959 	unsigned long pfn, end_pfn;
960 	int r;
961 
962 	pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
963 
964 	if (!cma || !pages || count > cma->count)
965 		return NULL;
966 
967 	pfn = page_to_pfn(pages);
968 
969 	for (r = 0; r < cma->nranges; r++) {
970 		cmr = &cma->ranges[r];
971 		end_pfn = cmr->base_pfn + cmr->count;
972 		if (pfn >= cmr->base_pfn && pfn < end_pfn) {
973 			if (pfn + count <= end_pfn)
974 				break;
975 
976 			VM_WARN_ON_ONCE(1);
977 		}
978 	}
979 
980 	if (r == cma->nranges) {
981 		pr_debug("%s(page %p, count %lu, no cma range matches the page range)\n",
982 			 __func__, (void *)pages, count);
983 		return NULL;
984 	}
985 
986 	return cmr;
987 }
988 
989 static void __cma_release_frozen(struct cma *cma, struct cma_memrange *cmr,
990 		const struct page *pages, unsigned long count)
991 {
992 	unsigned long pfn = page_to_pfn(pages);
993 
994 	pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
995 
996 	free_contig_frozen_range(pfn, count);
997 	cma_clear_bitmap(cma, cmr, pfn, count);
998 	cma_sysfs_account_release_pages(cma, count);
999 	trace_cma_release(cma->name, pfn, pages, count);
1000 }
1001 
1002 /**
1003  * cma_release() - release allocated pages
1004  * @cma:   Contiguous memory region for which the allocation is performed.
1005  * @pages: Allocated pages.
1006  * @count: Number of allocated pages.
1007  *
1008  * This function releases memory allocated by cma_alloc().
1009  * It returns false when provided pages do not belong to contiguous area and
1010  * true otherwise.
1011  */
1012 bool cma_release(struct cma *cma, const struct page *pages,
1013 		 unsigned long count)
1014 {
1015 	struct cma_memrange *cmr;
1016 	unsigned long i, pfn;
1017 
1018 	cmr = find_cma_memrange(cma, pages, count);
1019 	if (!cmr)
1020 		return false;
1021 
1022 	pfn = page_to_pfn(pages);
1023 	for (i = 0; i < count; i++, pfn++)
1024 		VM_WARN_ON(!put_page_testzero(pfn_to_page(pfn)));
1025 
1026 	__cma_release_frozen(cma, cmr, pages, count);
1027 
1028 	return true;
1029 }
1030 
1031 bool cma_release_frozen(struct cma *cma, const struct page *pages,
1032 		unsigned long count)
1033 {
1034 	struct cma_memrange *cmr;
1035 
1036 	cmr = find_cma_memrange(cma, pages, count);
1037 	if (!cmr)
1038 		return false;
1039 
1040 	__cma_release_frozen(cma, cmr, pages, count);
1041 
1042 	return true;
1043 }
1044 
1045 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
1046 {
1047 	int i;
1048 
1049 	for (i = 0; i < cma_area_count; i++) {
1050 		int ret = it(&cma_areas[i], data);
1051 
1052 		if (ret)
1053 			return ret;
1054 	}
1055 
1056 	return 0;
1057 }
1058 
1059 bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end)
1060 {
1061 	int r;
1062 	struct cma_memrange *cmr;
1063 	unsigned long rstart, rend;
1064 
1065 	for (r = 0; r < cma->nranges; r++) {
1066 		cmr = &cma->ranges[r];
1067 
1068 		rstart = PFN_PHYS(cmr->base_pfn);
1069 		rend = PFN_PHYS(cmr->base_pfn + cmr->count);
1070 		if (end < rstart)
1071 			continue;
1072 		if (start >= rend)
1073 			continue;
1074 		return true;
1075 	}
1076 
1077 	return false;
1078 }
1079 
1080 /*
1081  * Very basic function to reserve memory from a CMA area that has not
1082  * yet been activated. This is expected to be called early, when the
1083  * system is single-threaded, so there is no locking. The alignment
1084  * checking is restrictive - only pageblock-aligned areas
1085  * (CMA_MIN_ALIGNMENT_BYTES) may be reserved through this function.
1086  * This keeps things simple, and is enough for the current use case.
1087  *
1088  * The CMA bitmaps have not yet been allocated, so just start
1089  * reserving from the bottom up, using a PFN to keep track
1090  * of what has been reserved. Unreserving is not possible.
1091  *
1092  * The caller is responsible for initializing the page structures
1093  * in the area properly, since this just points to memblock-allocated
1094  * memory. The caller should subsequently use init_cma_pageblock to
1095  * set the migrate type and CMA stats  the pageblocks that were reserved.
1096  *
1097  * If the CMA area fails to activate later, memory obtained through
1098  * this interface is not handed to the page allocator, this is
1099  * the responsibility of the caller (e.g. like normal memblock-allocated
1100  * memory).
1101  */
1102 void __init *cma_reserve_early(struct cma *cma, unsigned long size)
1103 {
1104 	int r;
1105 	struct cma_memrange *cmr;
1106 	unsigned long available;
1107 	void *ret = NULL;
1108 
1109 	if (!cma || !cma->count)
1110 		return NULL;
1111 	/*
1112 	 * Can only be called early in init.
1113 	 */
1114 	if (test_bit(CMA_ACTIVATED, &cma->flags))
1115 		return NULL;
1116 
1117 	if (!IS_ALIGNED(size, CMA_MIN_ALIGNMENT_BYTES))
1118 		return NULL;
1119 
1120 	if (!IS_ALIGNED(size, (PAGE_SIZE << cma->order_per_bit)))
1121 		return NULL;
1122 
1123 	size >>= PAGE_SHIFT;
1124 
1125 	if (size > cma->available_count)
1126 		return NULL;
1127 
1128 	for (r = 0; r < cma->nranges; r++) {
1129 		cmr = &cma->ranges[r];
1130 		available = cmr->count - (cmr->early_pfn - cmr->base_pfn);
1131 		if (size <= available) {
1132 			ret = phys_to_virt(PFN_PHYS(cmr->early_pfn));
1133 			cmr->early_pfn += size;
1134 			cma->available_count -= size;
1135 			return ret;
1136 		}
1137 	}
1138 
1139 	return ret;
1140 }
1141