1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Contiguous Memory Allocator
4 *
5 * Copyright (c) 2010-2011 by Samsung Electronics.
6 * Copyright IBM Corporation, 2013
7 * Copyright LG Electronics Inc., 2014
8 * Written by:
9 * Marek Szyprowski <m.szyprowski@samsung.com>
10 * Michal Nazarewicz <mina86@mina86.com>
11 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
13 */
14
15 #define pr_fmt(fmt) "cma: " fmt
16
17 #define CREATE_TRACE_POINTS
18
19 #include <linux/memblock.h>
20 #include <linux/err.h>
21 #include <linux/list.h>
22 #include <linux/mm.h>
23 #include <linux/sizes.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 #include <linux/string_choices.h>
27 #include <linux/log2.h>
28 #include <linux/cma.h>
29 #include <linux/highmem.h>
30 #include <linux/io.h>
31 #include <linux/kmemleak.h>
32 #include <trace/events/cma.h>
33
34 #include "internal.h"
35 #include "cma.h"
36
37 struct cma cma_areas[MAX_CMA_AREAS];
38 unsigned int cma_area_count;
39
cma_get_base(const struct cma * cma)40 phys_addr_t cma_get_base(const struct cma *cma)
41 {
42 WARN_ON_ONCE(cma->nranges != 1);
43 return PFN_PHYS(cma->ranges[0].base_pfn);
44 }
45
cma_get_size(const struct cma * cma)46 unsigned long cma_get_size(const struct cma *cma)
47 {
48 return cma->count << PAGE_SHIFT;
49 }
50
cma_get_name(const struct cma * cma)51 const char *cma_get_name(const struct cma *cma)
52 {
53 return cma->name;
54 }
55 EXPORT_SYMBOL_GPL(cma_get_name);
56
cma_bitmap_aligned_mask(const struct cma * cma,unsigned int align_order)57 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
58 unsigned int align_order)
59 {
60 if (align_order <= cma->order_per_bit)
61 return 0;
62 return (1UL << (align_order - cma->order_per_bit)) - 1;
63 }
64
65 /*
66 * Find the offset of the base PFN from the specified align_order.
67 * The value returned is represented in order_per_bits.
68 */
cma_bitmap_aligned_offset(const struct cma * cma,const struct cma_memrange * cmr,unsigned int align_order)69 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
70 const struct cma_memrange *cmr,
71 unsigned int align_order)
72 {
73 return (cmr->base_pfn & ((1UL << align_order) - 1))
74 >> cma->order_per_bit;
75 }
76
cma_bitmap_pages_to_bits(const struct cma * cma,unsigned long pages)77 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
78 unsigned long pages)
79 {
80 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
81 }
82
cma_clear_bitmap(struct cma * cma,const struct cma_memrange * cmr,unsigned long pfn,unsigned long count)83 static void cma_clear_bitmap(struct cma *cma, const struct cma_memrange *cmr,
84 unsigned long pfn, unsigned long count)
85 {
86 unsigned long bitmap_no, bitmap_count;
87 unsigned long flags;
88
89 bitmap_no = (pfn - cmr->base_pfn) >> cma->order_per_bit;
90 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
91
92 spin_lock_irqsave(&cma->lock, flags);
93 bitmap_clear(cmr->bitmap, bitmap_no, bitmap_count);
94 cma->available_count += count;
95 spin_unlock_irqrestore(&cma->lock, flags);
96 }
97
98 /*
99 * Check if a CMA area contains no ranges that intersect with
100 * multiple zones. Store the result in the flags in case
101 * this gets called more than once.
102 */
cma_validate_zones(struct cma * cma)103 bool cma_validate_zones(struct cma *cma)
104 {
105 int r;
106 unsigned long base_pfn;
107 struct cma_memrange *cmr;
108 bool valid_bit_set;
109
110 /*
111 * If already validated, return result of previous check.
112 * Either the valid or invalid bit will be set if this
113 * check has already been done. If neither is set, the
114 * check has not been performed yet.
115 */
116 valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags);
117 if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags))
118 return valid_bit_set;
119
120 for (r = 0; r < cma->nranges; r++) {
121 cmr = &cma->ranges[r];
122 base_pfn = cmr->base_pfn;
123
124 /*
125 * alloc_contig_range() requires the pfn range specified
126 * to be in the same zone. Simplify by forcing the entire
127 * CMA resv range to be in the same zone.
128 */
129 WARN_ON_ONCE(!pfn_valid(base_pfn));
130 if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) {
131 set_bit(CMA_ZONES_INVALID, &cma->flags);
132 return false;
133 }
134 }
135
136 set_bit(CMA_ZONES_VALID, &cma->flags);
137
138 return true;
139 }
140
cma_activate_area(struct cma * cma)141 static void __init cma_activate_area(struct cma *cma)
142 {
143 unsigned long pfn, end_pfn, early_pfn[CMA_MAX_RANGES];
144 int allocrange, r;
145 struct cma_memrange *cmr;
146 unsigned long bitmap_count, count;
147
148 for (allocrange = 0; allocrange < cma->nranges; allocrange++) {
149 cmr = &cma->ranges[allocrange];
150 early_pfn[allocrange] = cmr->early_pfn;
151 cmr->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma, cmr),
152 GFP_KERNEL);
153 if (!cmr->bitmap)
154 goto cleanup;
155 }
156
157 if (!cma_validate_zones(cma))
158 goto cleanup;
159
160 for (r = 0; r < cma->nranges; r++) {
161 cmr = &cma->ranges[r];
162 if (early_pfn[r] != cmr->base_pfn) {
163 count = early_pfn[r] - cmr->base_pfn;
164 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
165 bitmap_set(cmr->bitmap, 0, bitmap_count);
166 }
167
168 for (pfn = early_pfn[r]; pfn < cmr->base_pfn + cmr->count;
169 pfn += pageblock_nr_pages)
170 init_cma_reserved_pageblock(pfn_to_page(pfn));
171 }
172
173 spin_lock_init(&cma->lock);
174
175 mutex_init(&cma->alloc_mutex);
176
177 #ifdef CONFIG_CMA_DEBUGFS
178 INIT_HLIST_HEAD(&cma->mem_head);
179 spin_lock_init(&cma->mem_head_lock);
180 #endif
181 set_bit(CMA_ACTIVATED, &cma->flags);
182
183 return;
184
185 cleanup:
186 for (r = 0; r < allocrange; r++)
187 bitmap_free(cma->ranges[r].bitmap);
188
189 /* Expose all pages to the buddy, they are useless for CMA. */
190 if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) {
191 for (r = 0; r < allocrange; r++) {
192 cmr = &cma->ranges[r];
193 end_pfn = cmr->base_pfn + cmr->count;
194 for (pfn = early_pfn[r]; pfn < end_pfn; pfn++)
195 free_reserved_page(pfn_to_page(pfn));
196 }
197 }
198 totalcma_pages -= cma->count;
199 cma->available_count = cma->count = 0;
200 pr_err("CMA area %s could not be activated\n", cma->name);
201 }
202
cma_init_reserved_areas(void)203 static int __init cma_init_reserved_areas(void)
204 {
205 int i;
206
207 for (i = 0; i < cma_area_count; i++)
208 cma_activate_area(&cma_areas[i]);
209
210 return 0;
211 }
212 core_initcall(cma_init_reserved_areas);
213
cma_reserve_pages_on_error(struct cma * cma)214 void __init cma_reserve_pages_on_error(struct cma *cma)
215 {
216 set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags);
217 }
218
cma_new_area(const char * name,phys_addr_t size,unsigned int order_per_bit,struct cma ** res_cma)219 static int __init cma_new_area(const char *name, phys_addr_t size,
220 unsigned int order_per_bit,
221 struct cma **res_cma)
222 {
223 struct cma *cma;
224
225 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
226 pr_err("Not enough slots for CMA reserved regions!\n");
227 return -ENOSPC;
228 }
229
230 /*
231 * Each reserved area must be initialised later, when more kernel
232 * subsystems (like slab allocator) are available.
233 */
234 cma = &cma_areas[cma_area_count];
235 cma_area_count++;
236
237 if (name)
238 strscpy(cma->name, name);
239 else
240 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
241
242 cma->available_count = cma->count = size >> PAGE_SHIFT;
243 cma->order_per_bit = order_per_bit;
244 *res_cma = cma;
245 totalcma_pages += cma->count;
246
247 return 0;
248 }
249
cma_drop_area(struct cma * cma)250 static void __init cma_drop_area(struct cma *cma)
251 {
252 totalcma_pages -= cma->count;
253 cma_area_count--;
254 }
255
256 /**
257 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
258 * @base: Base address of the reserved area
259 * @size: Size of the reserved area (in bytes),
260 * @order_per_bit: Order of pages represented by one bit on bitmap.
261 * @name: The name of the area. If this parameter is NULL, the name of
262 * the area will be set to "cmaN", where N is a running counter of
263 * used areas.
264 * @res_cma: Pointer to store the created cma region.
265 *
266 * This function creates custom contiguous area from already reserved memory.
267 */
cma_init_reserved_mem(phys_addr_t base,phys_addr_t size,unsigned int order_per_bit,const char * name,struct cma ** res_cma)268 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
269 unsigned int order_per_bit,
270 const char *name,
271 struct cma **res_cma)
272 {
273 struct cma *cma;
274 int ret;
275
276 /* Sanity checks */
277 if (!size || !memblock_is_region_reserved(base, size))
278 return -EINVAL;
279
280 /*
281 * CMA uses CMA_MIN_ALIGNMENT_BYTES as alignment requirement which
282 * needs pageblock_order to be initialized. Let's enforce it.
283 */
284 if (!pageblock_order) {
285 pr_err("pageblock_order not yet initialized. Called during early boot?\n");
286 return -EINVAL;
287 }
288
289 /* ensure minimal alignment required by mm core */
290 if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
291 return -EINVAL;
292
293 ret = cma_new_area(name, size, order_per_bit, &cma);
294 if (ret != 0)
295 return ret;
296
297 cma->ranges[0].base_pfn = PFN_DOWN(base);
298 cma->ranges[0].early_pfn = PFN_DOWN(base);
299 cma->ranges[0].count = cma->count;
300 cma->nranges = 1;
301 cma->nid = NUMA_NO_NODE;
302
303 *res_cma = cma;
304
305 return 0;
306 }
307
308 /*
309 * Structure used while walking physical memory ranges and finding out
310 * which one(s) to use for a CMA area.
311 */
312 struct cma_init_memrange {
313 phys_addr_t base;
314 phys_addr_t size;
315 struct list_head list;
316 };
317
318 /*
319 * Work array used during CMA initialization.
320 */
321 static struct cma_init_memrange memranges[CMA_MAX_RANGES] __initdata;
322
revsizecmp(struct cma_init_memrange * mlp,struct cma_init_memrange * mrp)323 static bool __init revsizecmp(struct cma_init_memrange *mlp,
324 struct cma_init_memrange *mrp)
325 {
326 return mlp->size > mrp->size;
327 }
328
basecmp(struct cma_init_memrange * mlp,struct cma_init_memrange * mrp)329 static bool __init basecmp(struct cma_init_memrange *mlp,
330 struct cma_init_memrange *mrp)
331 {
332 return mlp->base < mrp->base;
333 }
334
335 /*
336 * Helper function to create sorted lists.
337 */
list_insert_sorted(struct list_head * ranges,struct cma_init_memrange * mrp,bool (* cmp)(struct cma_init_memrange * lh,struct cma_init_memrange * rh))338 static void __init list_insert_sorted(
339 struct list_head *ranges,
340 struct cma_init_memrange *mrp,
341 bool (*cmp)(struct cma_init_memrange *lh, struct cma_init_memrange *rh))
342 {
343 struct list_head *mp;
344 struct cma_init_memrange *mlp;
345
346 if (list_empty(ranges))
347 list_add(&mrp->list, ranges);
348 else {
349 list_for_each(mp, ranges) {
350 mlp = list_entry(mp, struct cma_init_memrange, list);
351 if (cmp(mlp, mrp))
352 break;
353 }
354 __list_add(&mrp->list, mlp->list.prev, &mlp->list);
355 }
356 }
357
cma_fixed_reserve(phys_addr_t base,phys_addr_t size)358 static int __init cma_fixed_reserve(phys_addr_t base, phys_addr_t size)
359 {
360 if (IS_ENABLED(CONFIG_HIGHMEM)) {
361 phys_addr_t highmem_start = __pa(high_memory - 1) + 1;
362
363 /*
364 * If allocating at a fixed base the request region must not
365 * cross the low/high memory boundary.
366 */
367 if (base < highmem_start && base + size > highmem_start) {
368 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
369 &base, &highmem_start);
370 return -EINVAL;
371 }
372 }
373
374 if (memblock_is_region_reserved(base, size) ||
375 memblock_reserve(base, size) < 0) {
376 return -EBUSY;
377 }
378
379 return 0;
380 }
381
cma_alloc_mem(phys_addr_t base,phys_addr_t size,phys_addr_t align,phys_addr_t limit,int nid)382 static phys_addr_t __init cma_alloc_mem(phys_addr_t base, phys_addr_t size,
383 phys_addr_t align, phys_addr_t limit, int nid)
384 {
385 phys_addr_t addr = 0;
386
387 /*
388 * If there is enough memory, try a bottom-up allocation first.
389 * It will place the new cma area close to the start of the node
390 * and guarantee that the compaction is moving pages out of the
391 * cma area and not into it.
392 * Avoid using first 4GB to not interfere with constrained zones
393 * like DMA/DMA32.
394 */
395 #ifdef CONFIG_PHYS_ADDR_T_64BIT
396 if (!memblock_bottom_up() && limit >= SZ_4G + size) {
397 memblock_set_bottom_up(true);
398 addr = memblock_alloc_range_nid(size, align, SZ_4G, limit,
399 nid, true);
400 memblock_set_bottom_up(false);
401 }
402 #endif
403
404 /*
405 * On systems with HIGHMEM try allocating from there before consuming
406 * memory in lower zones.
407 */
408 if (!addr && IS_ENABLED(CONFIG_HIGHMEM)) {
409 phys_addr_t highmem = __pa(high_memory - 1) + 1;
410
411 /*
412 * All pages in the reserved area must come from the same zone.
413 * If the requested region crosses the low/high memory boundary,
414 * try allocating from high memory first and fall back to low
415 * memory in case of failure.
416 */
417 if (base < highmem && limit > highmem) {
418 addr = memblock_alloc_range_nid(size, align, highmem,
419 limit, nid, true);
420 limit = highmem;
421 }
422 }
423
424 if (!addr)
425 addr = memblock_alloc_range_nid(size, align, base, limit, nid,
426 true);
427
428 return addr;
429 }
430
__cma_declare_contiguous_nid(phys_addr_t * basep,phys_addr_t size,phys_addr_t limit,phys_addr_t alignment,unsigned int order_per_bit,bool fixed,const char * name,struct cma ** res_cma,int nid)431 static int __init __cma_declare_contiguous_nid(phys_addr_t *basep,
432 phys_addr_t size, phys_addr_t limit,
433 phys_addr_t alignment, unsigned int order_per_bit,
434 bool fixed, const char *name, struct cma **res_cma,
435 int nid)
436 {
437 phys_addr_t memblock_end = memblock_end_of_DRAM();
438 phys_addr_t base = *basep;
439 int ret;
440
441 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
442 __func__, &size, &base, &limit, &alignment);
443
444 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
445 pr_err("Not enough slots for CMA reserved regions!\n");
446 return -ENOSPC;
447 }
448
449 if (!size)
450 return -EINVAL;
451
452 if (alignment && !is_power_of_2(alignment))
453 return -EINVAL;
454
455 if (!IS_ENABLED(CONFIG_NUMA))
456 nid = NUMA_NO_NODE;
457
458 /* Sanitise input arguments. */
459 alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
460 if (fixed && base & (alignment - 1)) {
461 pr_err("Region at %pa must be aligned to %pa bytes\n",
462 &base, &alignment);
463 return -EINVAL;
464 }
465 base = ALIGN(base, alignment);
466 size = ALIGN(size, alignment);
467 limit &= ~(alignment - 1);
468
469 if (!base)
470 fixed = false;
471
472 /* size should be aligned with order_per_bit */
473 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
474 return -EINVAL;
475
476
477 /*
478 * If the limit is unspecified or above the memblock end, its effective
479 * value will be the memblock end. Set it explicitly to simplify further
480 * checks.
481 */
482 if (limit == 0 || limit > memblock_end)
483 limit = memblock_end;
484
485 if (base + size > limit) {
486 pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
487 &size, &base, &limit);
488 return -EINVAL;
489 }
490
491 /* Reserve memory */
492 if (fixed) {
493 ret = cma_fixed_reserve(base, size);
494 if (ret)
495 return ret;
496 } else {
497 base = cma_alloc_mem(base, size, alignment, limit, nid);
498 if (!base)
499 return -ENOMEM;
500
501 /*
502 * kmemleak scans/reads tracked objects for pointers to other
503 * objects but this address isn't mapped and accessible
504 */
505 kmemleak_ignore_phys(base);
506 }
507
508 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
509 if (ret) {
510 memblock_phys_free(base, size);
511 return ret;
512 }
513
514 (*res_cma)->nid = nid;
515 *basep = base;
516
517 return 0;
518 }
519
520 /*
521 * Create CMA areas with a total size of @total_size. A normal allocation
522 * for one area is tried first. If that fails, the biggest memblock
523 * ranges above 4G are selected, and allocated bottom up.
524 *
525 * The complexity here is not great, but this function will only be
526 * called during boot, and the lists operated on have fewer than
527 * CMA_MAX_RANGES elements (default value: 8).
528 */
cma_declare_contiguous_multi(phys_addr_t total_size,phys_addr_t align,unsigned int order_per_bit,const char * name,struct cma ** res_cma,int nid)529 int __init cma_declare_contiguous_multi(phys_addr_t total_size,
530 phys_addr_t align, unsigned int order_per_bit,
531 const char *name, struct cma **res_cma, int nid)
532 {
533 phys_addr_t start = 0, end;
534 phys_addr_t size, sizesum, sizeleft;
535 struct cma_init_memrange *mrp, *mlp, *failed;
536 struct cma_memrange *cmrp;
537 LIST_HEAD(ranges);
538 LIST_HEAD(final_ranges);
539 struct list_head *mp, *next;
540 int ret, nr = 1;
541 u64 i;
542 struct cma *cma;
543
544 /*
545 * First, try it the normal way, producing just one range.
546 */
547 ret = __cma_declare_contiguous_nid(&start, total_size, 0, align,
548 order_per_bit, false, name, res_cma, nid);
549 if (ret != -ENOMEM)
550 goto out;
551
552 /*
553 * Couldn't find one range that fits our needs, so try multiple
554 * ranges.
555 *
556 * No need to do the alignment checks here, the call to
557 * cma_declare_contiguous_nid above would have caught
558 * any issues. With the checks, we know that:
559 *
560 * - @align is a power of 2
561 * - @align is >= pageblock alignment
562 * - @size is aligned to @align and to @order_per_bit
563 *
564 * So, as long as we create ranges that have a base
565 * aligned to @align, and a size that is aligned to
566 * both @align and @order_to_bit, things will work out.
567 */
568 nr = 0;
569 sizesum = 0;
570 failed = NULL;
571
572 ret = cma_new_area(name, total_size, order_per_bit, &cma);
573 if (ret != 0)
574 goto out;
575
576 align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
577 /*
578 * Create a list of ranges above 4G, largest range first.
579 */
580 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
581 if (upper_32_bits(start) == 0)
582 continue;
583
584 start = ALIGN(start, align);
585 if (start >= end)
586 continue;
587
588 end = ALIGN_DOWN(end, align);
589 if (end <= start)
590 continue;
591
592 size = end - start;
593 size = ALIGN_DOWN(size, (PAGE_SIZE << order_per_bit));
594 if (!size)
595 continue;
596 sizesum += size;
597
598 pr_debug("consider %016llx - %016llx\n", (u64)start, (u64)end);
599
600 /*
601 * If we don't yet have used the maximum number of
602 * areas, grab a new one.
603 *
604 * If we can't use anymore, see if this range is not
605 * smaller than the smallest one already recorded. If
606 * not, re-use the smallest element.
607 */
608 if (nr < CMA_MAX_RANGES)
609 mrp = &memranges[nr++];
610 else {
611 mrp = list_last_entry(&ranges,
612 struct cma_init_memrange, list);
613 if (size < mrp->size)
614 continue;
615 list_del(&mrp->list);
616 sizesum -= mrp->size;
617 pr_debug("deleted %016llx - %016llx from the list\n",
618 (u64)mrp->base, (u64)mrp->base + size);
619 }
620 mrp->base = start;
621 mrp->size = size;
622
623 /*
624 * Now do a sorted insert.
625 */
626 list_insert_sorted(&ranges, mrp, revsizecmp);
627 pr_debug("added %016llx - %016llx to the list\n",
628 (u64)mrp->base, (u64)mrp->base + size);
629 pr_debug("total size now %llu\n", (u64)sizesum);
630 }
631
632 /*
633 * There is not enough room in the CMA_MAX_RANGES largest
634 * ranges, so bail out.
635 */
636 if (sizesum < total_size) {
637 cma_drop_area(cma);
638 ret = -ENOMEM;
639 goto out;
640 }
641
642 /*
643 * Found ranges that provide enough combined space.
644 * Now, sorted them by address, smallest first, because we
645 * want to mimic a bottom-up memblock allocation.
646 */
647 sizesum = 0;
648 list_for_each_safe(mp, next, &ranges) {
649 mlp = list_entry(mp, struct cma_init_memrange, list);
650 list_del(mp);
651 list_insert_sorted(&final_ranges, mlp, basecmp);
652 sizesum += mlp->size;
653 if (sizesum >= total_size)
654 break;
655 }
656
657 /*
658 * Walk the final list, and add a CMA range for
659 * each range, possibly not using the last one fully.
660 */
661 nr = 0;
662 sizeleft = total_size;
663 list_for_each(mp, &final_ranges) {
664 mlp = list_entry(mp, struct cma_init_memrange, list);
665 size = min(sizeleft, mlp->size);
666 if (memblock_reserve(mlp->base, size)) {
667 /*
668 * Unexpected error. Could go on to
669 * the next one, but just abort to
670 * be safe.
671 */
672 failed = mlp;
673 break;
674 }
675
676 pr_debug("created region %d: %016llx - %016llx\n",
677 nr, (u64)mlp->base, (u64)mlp->base + size);
678 cmrp = &cma->ranges[nr++];
679 cmrp->base_pfn = PHYS_PFN(mlp->base);
680 cmrp->early_pfn = cmrp->base_pfn;
681 cmrp->count = size >> PAGE_SHIFT;
682
683 sizeleft -= size;
684 if (sizeleft == 0)
685 break;
686 }
687
688 if (failed) {
689 list_for_each(mp, &final_ranges) {
690 mlp = list_entry(mp, struct cma_init_memrange, list);
691 if (mlp == failed)
692 break;
693 memblock_phys_free(mlp->base, mlp->size);
694 }
695 cma_drop_area(cma);
696 ret = -ENOMEM;
697 goto out;
698 }
699
700 cma->nranges = nr;
701 cma->nid = nid;
702 *res_cma = cma;
703
704 out:
705 if (ret != 0)
706 pr_err("Failed to reserve %lu MiB\n",
707 (unsigned long)total_size / SZ_1M);
708 else
709 pr_info("Reserved %lu MiB in %d range%s\n",
710 (unsigned long)total_size / SZ_1M, nr, str_plural(nr));
711 return ret;
712 }
713
714 /**
715 * cma_declare_contiguous_nid() - reserve custom contiguous area
716 * @base: Base address of the reserved area optional, use 0 for any
717 * @size: Size of the reserved area (in bytes),
718 * @limit: End address of the reserved memory (optional, 0 for any).
719 * @alignment: Alignment for the CMA area, should be power of 2 or zero
720 * @order_per_bit: Order of pages represented by one bit on bitmap.
721 * @fixed: hint about where to place the reserved area
722 * @name: The name of the area. See function cma_init_reserved_mem()
723 * @res_cma: Pointer to store the created cma region.
724 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
725 *
726 * This function reserves memory from early allocator. It should be
727 * called by arch specific code once the early allocator (memblock or bootmem)
728 * has been activated and all other subsystems have already allocated/reserved
729 * memory. This function allows to create custom reserved areas.
730 *
731 * If @fixed is true, reserve contiguous area at exactly @base. If false,
732 * reserve in range from @base to @limit.
733 */
cma_declare_contiguous_nid(phys_addr_t base,phys_addr_t size,phys_addr_t limit,phys_addr_t alignment,unsigned int order_per_bit,bool fixed,const char * name,struct cma ** res_cma,int nid)734 int __init cma_declare_contiguous_nid(phys_addr_t base,
735 phys_addr_t size, phys_addr_t limit,
736 phys_addr_t alignment, unsigned int order_per_bit,
737 bool fixed, const char *name, struct cma **res_cma,
738 int nid)
739 {
740 int ret;
741
742 ret = __cma_declare_contiguous_nid(&base, size, limit, alignment,
743 order_per_bit, fixed, name, res_cma, nid);
744 if (ret != 0)
745 pr_err("Failed to reserve %ld MiB\n",
746 (unsigned long)size / SZ_1M);
747 else
748 pr_info("Reserved %ld MiB at %pa\n",
749 (unsigned long)size / SZ_1M, &base);
750
751 return ret;
752 }
753
cma_debug_show_areas(struct cma * cma)754 static void cma_debug_show_areas(struct cma *cma)
755 {
756 unsigned long start, end;
757 unsigned long nr_part;
758 unsigned long nbits;
759 int r;
760 struct cma_memrange *cmr;
761
762 spin_lock_irq(&cma->lock);
763 pr_info("number of available pages: ");
764 for (r = 0; r < cma->nranges; r++) {
765 cmr = &cma->ranges[r];
766
767 nbits = cma_bitmap_maxno(cma, cmr);
768
769 pr_info("range %d: ", r);
770 for_each_clear_bitrange(start, end, cmr->bitmap, nbits) {
771 nr_part = (end - start) << cma->order_per_bit;
772 pr_cont("%s%lu@%lu", start ? "+" : "", nr_part, start);
773 }
774 pr_info("\n");
775 }
776 pr_cont("=> %lu free of %lu total pages\n", cma->available_count,
777 cma->count);
778 spin_unlock_irq(&cma->lock);
779 }
780
cma_range_alloc(struct cma * cma,struct cma_memrange * cmr,unsigned long count,unsigned int align,struct page ** pagep,gfp_t gfp)781 static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
782 unsigned long count, unsigned int align,
783 struct page **pagep, gfp_t gfp)
784 {
785 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
786 unsigned long start, pfn, mask, offset;
787 int ret = -EBUSY;
788 struct page *page = NULL;
789
790 mask = cma_bitmap_aligned_mask(cma, align);
791 offset = cma_bitmap_aligned_offset(cma, cmr, align);
792 bitmap_maxno = cma_bitmap_maxno(cma, cmr);
793 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
794
795 if (bitmap_count > bitmap_maxno)
796 goto out;
797
798 for (start = 0; ; start = bitmap_no + mask + 1) {
799 spin_lock_irq(&cma->lock);
800 /*
801 * If the request is larger than the available number
802 * of pages, stop right away.
803 */
804 if (count > cma->available_count) {
805 spin_unlock_irq(&cma->lock);
806 break;
807 }
808 bitmap_no = bitmap_find_next_zero_area_off(cmr->bitmap,
809 bitmap_maxno, start, bitmap_count, mask,
810 offset);
811 if (bitmap_no >= bitmap_maxno) {
812 spin_unlock_irq(&cma->lock);
813 break;
814 }
815
816 pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit);
817 page = pfn_to_page(pfn);
818
819 /*
820 * Do not hand out page ranges that are not contiguous, so
821 * callers can just iterate the pages without having to worry
822 * about these corner cases.
823 */
824 if (!page_range_contiguous(page, count)) {
825 spin_unlock_irq(&cma->lock);
826 pr_warn_ratelimited("%s: %s: skipping incompatible area [0x%lx-0x%lx]",
827 __func__, cma->name, pfn, pfn + count - 1);
828 continue;
829 }
830
831 bitmap_set(cmr->bitmap, bitmap_no, bitmap_count);
832 cma->available_count -= count;
833 /*
834 * It's safe to drop the lock here. We've marked this region for
835 * our exclusive use. If the migration fails we will take the
836 * lock again and unmark it.
837 */
838 spin_unlock_irq(&cma->lock);
839
840 mutex_lock(&cma->alloc_mutex);
841 ret = alloc_contig_frozen_range(pfn, pfn + count, ACR_FLAGS_CMA, gfp);
842 mutex_unlock(&cma->alloc_mutex);
843 if (!ret)
844 break;
845
846 cma_clear_bitmap(cma, cmr, pfn, count);
847 if (ret != -EBUSY)
848 break;
849
850 pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n",
851 __func__, pfn, page);
852
853 trace_cma_alloc_busy_retry(cma->name, pfn, page, count, align);
854 }
855 out:
856 if (!ret)
857 *pagep = page;
858 return ret;
859 }
860
__cma_alloc_frozen(struct cma * cma,unsigned long count,unsigned int align,gfp_t gfp)861 static struct page *__cma_alloc_frozen(struct cma *cma,
862 unsigned long count, unsigned int align, gfp_t gfp)
863 {
864 struct page *page = NULL;
865 int ret = -ENOMEM, r;
866 unsigned long i;
867 const char *name = cma ? cma->name : NULL;
868
869 if (!cma || !cma->count)
870 return page;
871
872 pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__,
873 (void *)cma, cma->name, count, align);
874
875 if (!count)
876 return page;
877
878 trace_cma_alloc_start(name, count, cma->available_count, cma->count, align);
879
880 for (r = 0; r < cma->nranges; r++) {
881 page = NULL;
882
883 ret = cma_range_alloc(cma, &cma->ranges[r], count, align,
884 &page, gfp);
885 if (ret != -EBUSY || page)
886 break;
887 }
888
889 /*
890 * CMA can allocate multiple page blocks, which results in different
891 * blocks being marked with different tags. Reset the tags to ignore
892 * those page blocks.
893 */
894 if (page) {
895 for (i = 0; i < count; i++)
896 page_kasan_tag_reset(page + i);
897 }
898
899 if (ret && !(gfp & __GFP_NOWARN)) {
900 pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
901 __func__, cma->name, count, ret);
902 cma_debug_show_areas(cma);
903 }
904
905 pr_debug("%s(): returned %p\n", __func__, page);
906 trace_cma_alloc_finish(name, page ? page_to_pfn(page) : 0,
907 page, count, align, ret);
908 if (page) {
909 count_vm_event(CMA_ALLOC_SUCCESS);
910 cma_sysfs_account_success_pages(cma, count);
911 } else {
912 count_vm_event(CMA_ALLOC_FAIL);
913 cma_sysfs_account_fail_pages(cma, count);
914 }
915
916 return page;
917 }
918
cma_alloc_frozen(struct cma * cma,unsigned long count,unsigned int align,bool no_warn)919 struct page *cma_alloc_frozen(struct cma *cma, unsigned long count,
920 unsigned int align, bool no_warn)
921 {
922 gfp_t gfp = GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0);
923
924 return __cma_alloc_frozen(cma, count, align, gfp);
925 }
926
cma_alloc_frozen_compound(struct cma * cma,unsigned int order)927 struct page *cma_alloc_frozen_compound(struct cma *cma, unsigned int order)
928 {
929 gfp_t gfp = GFP_KERNEL | __GFP_COMP | __GFP_NOWARN;
930
931 return __cma_alloc_frozen(cma, 1 << order, order, gfp);
932 }
933
934 /**
935 * cma_alloc() - allocate pages from contiguous area
936 * @cma: Contiguous memory region for which the allocation is performed.
937 * @count: Requested number of pages.
938 * @align: Requested alignment of pages (in PAGE_SIZE order).
939 * @no_warn: Avoid printing message about failed allocation
940 *
941 * This function allocates part of contiguous memory on specific
942 * contiguous memory area.
943 */
cma_alloc(struct cma * cma,unsigned long count,unsigned int align,bool no_warn)944 struct page *cma_alloc(struct cma *cma, unsigned long count,
945 unsigned int align, bool no_warn)
946 {
947 struct page *page;
948
949 page = cma_alloc_frozen(cma, count, align, no_warn);
950 if (page)
951 set_pages_refcounted(page, count);
952
953 return page;
954 }
955 EXPORT_SYMBOL_GPL(cma_alloc);
956
find_cma_memrange(struct cma * cma,const struct page * pages,unsigned long count)957 static struct cma_memrange *find_cma_memrange(struct cma *cma,
958 const struct page *pages, unsigned long count)
959 {
960 struct cma_memrange *cmr = NULL;
961 unsigned long pfn, end_pfn;
962 int r;
963
964 pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
965
966 if (!cma || !pages || count > cma->count)
967 return NULL;
968
969 pfn = page_to_pfn(pages);
970
971 for (r = 0; r < cma->nranges; r++) {
972 cmr = &cma->ranges[r];
973 end_pfn = cmr->base_pfn + cmr->count;
974 if (pfn >= cmr->base_pfn && pfn < end_pfn) {
975 if (pfn + count <= end_pfn)
976 break;
977
978 VM_WARN_ON_ONCE(1);
979 }
980 }
981
982 if (r == cma->nranges) {
983 pr_debug("%s(page %p, count %lu, no cma range matches the page range)\n",
984 __func__, (void *)pages, count);
985 return NULL;
986 }
987
988 return cmr;
989 }
990
__cma_release_frozen(struct cma * cma,struct cma_memrange * cmr,const struct page * pages,unsigned long count)991 static void __cma_release_frozen(struct cma *cma, struct cma_memrange *cmr,
992 const struct page *pages, unsigned long count)
993 {
994 unsigned long pfn = page_to_pfn(pages);
995
996 pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
997
998 free_contig_frozen_range(pfn, count);
999 cma_clear_bitmap(cma, cmr, pfn, count);
1000 cma_sysfs_account_release_pages(cma, count);
1001 trace_cma_release(cma->name, pfn, pages, count);
1002 }
1003
1004 /**
1005 * cma_release() - release allocated pages
1006 * @cma: Contiguous memory region for which the allocation is performed.
1007 * @pages: Allocated pages.
1008 * @count: Number of allocated pages.
1009 *
1010 * This function releases memory allocated by cma_alloc().
1011 * It returns false when provided pages do not belong to contiguous area and
1012 * true otherwise.
1013 */
cma_release(struct cma * cma,const struct page * pages,unsigned long count)1014 bool cma_release(struct cma *cma, const struct page *pages,
1015 unsigned long count)
1016 {
1017 struct cma_memrange *cmr;
1018 unsigned long ret = 0;
1019 unsigned long i, pfn;
1020
1021 cmr = find_cma_memrange(cma, pages, count);
1022 if (!cmr)
1023 return false;
1024
1025 pfn = page_to_pfn(pages);
1026 for (i = 0; i < count; i++, pfn++)
1027 ret += !put_page_testzero(pfn_to_page(pfn));
1028
1029 WARN(ret, "%lu pages are still in use!\n", ret);
1030
1031 __cma_release_frozen(cma, cmr, pages, count);
1032
1033 return true;
1034 }
1035 EXPORT_SYMBOL_GPL(cma_release);
1036
cma_release_frozen(struct cma * cma,const struct page * pages,unsigned long count)1037 bool cma_release_frozen(struct cma *cma, const struct page *pages,
1038 unsigned long count)
1039 {
1040 struct cma_memrange *cmr;
1041
1042 cmr = find_cma_memrange(cma, pages, count);
1043 if (!cmr)
1044 return false;
1045
1046 __cma_release_frozen(cma, cmr, pages, count);
1047
1048 return true;
1049 }
1050
cma_for_each_area(int (* it)(struct cma * cma,void * data),void * data)1051 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
1052 {
1053 int i;
1054
1055 for (i = 0; i < cma_area_count; i++) {
1056 int ret = it(&cma_areas[i], data);
1057
1058 if (ret)
1059 return ret;
1060 }
1061
1062 return 0;
1063 }
1064
cma_intersects(struct cma * cma,unsigned long start,unsigned long end)1065 bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end)
1066 {
1067 int r;
1068 struct cma_memrange *cmr;
1069 unsigned long rstart, rend;
1070
1071 for (r = 0; r < cma->nranges; r++) {
1072 cmr = &cma->ranges[r];
1073
1074 rstart = PFN_PHYS(cmr->base_pfn);
1075 rend = PFN_PHYS(cmr->base_pfn + cmr->count);
1076 if (end < rstart)
1077 continue;
1078 if (start >= rend)
1079 continue;
1080 return true;
1081 }
1082
1083 return false;
1084 }
1085
1086 /*
1087 * Very basic function to reserve memory from a CMA area that has not
1088 * yet been activated. This is expected to be called early, when the
1089 * system is single-threaded, so there is no locking. The alignment
1090 * checking is restrictive - only pageblock-aligned areas
1091 * (CMA_MIN_ALIGNMENT_BYTES) may be reserved through this function.
1092 * This keeps things simple, and is enough for the current use case.
1093 *
1094 * The CMA bitmaps have not yet been allocated, so just start
1095 * reserving from the bottom up, using a PFN to keep track
1096 * of what has been reserved. Unreserving is not possible.
1097 *
1098 * The caller is responsible for initializing the page structures
1099 * in the area properly, since this just points to memblock-allocated
1100 * memory. The caller should subsequently use init_cma_pageblock to
1101 * set the migrate type and CMA stats the pageblocks that were reserved.
1102 *
1103 * If the CMA area fails to activate later, memory obtained through
1104 * this interface is not handed to the page allocator, this is
1105 * the responsibility of the caller (e.g. like normal memblock-allocated
1106 * memory).
1107 */
cma_reserve_early(struct cma * cma,unsigned long size)1108 void __init *cma_reserve_early(struct cma *cma, unsigned long size)
1109 {
1110 int r;
1111 struct cma_memrange *cmr;
1112 unsigned long available;
1113 void *ret = NULL;
1114
1115 if (!cma || !cma->count)
1116 return NULL;
1117 /*
1118 * Can only be called early in init.
1119 */
1120 if (test_bit(CMA_ACTIVATED, &cma->flags))
1121 return NULL;
1122
1123 if (!IS_ALIGNED(size, CMA_MIN_ALIGNMENT_BYTES))
1124 return NULL;
1125
1126 if (!IS_ALIGNED(size, (PAGE_SIZE << cma->order_per_bit)))
1127 return NULL;
1128
1129 size >>= PAGE_SHIFT;
1130
1131 if (size > cma->available_count)
1132 return NULL;
1133
1134 for (r = 0; r < cma->nranges; r++) {
1135 cmr = &cma->ranges[r];
1136 available = cmr->count - (cmr->early_pfn - cmr->base_pfn);
1137 if (size <= available) {
1138 ret = phys_to_virt(PFN_PHYS(cmr->early_pfn));
1139 cmr->early_pfn += size;
1140 cma->available_count -= size;
1141 return ret;
1142 }
1143 }
1144
1145 return ret;
1146 }
1147