1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Contiguous Memory Allocator
4 *
5 * Copyright (c) 2010-2011 by Samsung Electronics.
6 * Copyright IBM Corporation, 2013
7 * Copyright LG Electronics Inc., 2014
8 * Written by:
9 * Marek Szyprowski <m.szyprowski@samsung.com>
10 * Michal Nazarewicz <mina86@mina86.com>
11 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
13 */
14
15 #define pr_fmt(fmt) "cma: " fmt
16
17 #define CREATE_TRACE_POINTS
18
19 #include <linux/memblock.h>
20 #include <linux/err.h>
21 #include <linux/mm.h>
22 #include <linux/sizes.h>
23 #include <linux/slab.h>
24 #include <linux/log2.h>
25 #include <linux/cma.h>
26 #include <linux/highmem.h>
27 #include <linux/io.h>
28 #include <linux/kmemleak.h>
29 #include <trace/events/cma.h>
30
31 #include "internal.h"
32 #include "cma.h"
33
34 struct cma cma_areas[MAX_CMA_AREAS];
35 unsigned cma_area_count;
36 static DEFINE_MUTEX(cma_mutex);
37
cma_get_base(const struct cma * cma)38 phys_addr_t cma_get_base(const struct cma *cma)
39 {
40 return PFN_PHYS(cma->base_pfn);
41 }
42
cma_get_size(const struct cma * cma)43 unsigned long cma_get_size(const struct cma *cma)
44 {
45 return cma->count << PAGE_SHIFT;
46 }
47
cma_get_name(const struct cma * cma)48 const char *cma_get_name(const struct cma *cma)
49 {
50 return cma->name;
51 }
52
cma_bitmap_aligned_mask(const struct cma * cma,unsigned int align_order)53 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
54 unsigned int align_order)
55 {
56 if (align_order <= cma->order_per_bit)
57 return 0;
58 return (1UL << (align_order - cma->order_per_bit)) - 1;
59 }
60
61 /*
62 * Find the offset of the base PFN from the specified align_order.
63 * The value returned is represented in order_per_bits.
64 */
cma_bitmap_aligned_offset(const struct cma * cma,unsigned int align_order)65 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
66 unsigned int align_order)
67 {
68 return (cma->base_pfn & ((1UL << align_order) - 1))
69 >> cma->order_per_bit;
70 }
71
cma_bitmap_pages_to_bits(const struct cma * cma,unsigned long pages)72 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
73 unsigned long pages)
74 {
75 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
76 }
77
cma_clear_bitmap(struct cma * cma,unsigned long pfn,unsigned long count)78 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
79 unsigned long count)
80 {
81 unsigned long bitmap_no, bitmap_count;
82 unsigned long flags;
83
84 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
85 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
86
87 spin_lock_irqsave(&cma->lock, flags);
88 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
89 spin_unlock_irqrestore(&cma->lock, flags);
90 }
91
cma_activate_area(struct cma * cma)92 static void __init cma_activate_area(struct cma *cma)
93 {
94 unsigned long base_pfn = cma->base_pfn, pfn;
95 struct zone *zone;
96
97 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
98 if (!cma->bitmap)
99 goto out_error;
100
101 /*
102 * alloc_contig_range() requires the pfn range specified to be in the
103 * same zone. Simplify by forcing the entire CMA resv range to be in the
104 * same zone.
105 */
106 WARN_ON_ONCE(!pfn_valid(base_pfn));
107 zone = page_zone(pfn_to_page(base_pfn));
108 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
109 WARN_ON_ONCE(!pfn_valid(pfn));
110 if (page_zone(pfn_to_page(pfn)) != zone)
111 goto not_in_zone;
112 }
113
114 for (pfn = base_pfn; pfn < base_pfn + cma->count;
115 pfn += pageblock_nr_pages)
116 init_cma_reserved_pageblock(pfn_to_page(pfn));
117
118 spin_lock_init(&cma->lock);
119
120 #ifdef CONFIG_CMA_DEBUGFS
121 INIT_HLIST_HEAD(&cma->mem_head);
122 spin_lock_init(&cma->mem_head_lock);
123 #endif
124
125 return;
126
127 not_in_zone:
128 bitmap_free(cma->bitmap);
129 out_error:
130 /* Expose all pages to the buddy, they are useless for CMA. */
131 if (!cma->reserve_pages_on_error) {
132 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
133 free_reserved_page(pfn_to_page(pfn));
134 }
135 totalcma_pages -= cma->count;
136 cma->count = 0;
137 pr_err("CMA area %s could not be activated\n", cma->name);
138 return;
139 }
140
cma_init_reserved_areas(void)141 static int __init cma_init_reserved_areas(void)
142 {
143 int i;
144
145 for (i = 0; i < cma_area_count; i++)
146 cma_activate_area(&cma_areas[i]);
147
148 return 0;
149 }
150 core_initcall(cma_init_reserved_areas);
151
cma_reserve_pages_on_error(struct cma * cma)152 void __init cma_reserve_pages_on_error(struct cma *cma)
153 {
154 cma->reserve_pages_on_error = true;
155 }
156
157 /**
158 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
159 * @base: Base address of the reserved area
160 * @size: Size of the reserved area (in bytes),
161 * @order_per_bit: Order of pages represented by one bit on bitmap.
162 * @name: The name of the area. If this parameter is NULL, the name of
163 * the area will be set to "cmaN", where N is a running counter of
164 * used areas.
165 * @res_cma: Pointer to store the created cma region.
166 *
167 * This function creates custom contiguous area from already reserved memory.
168 */
cma_init_reserved_mem(phys_addr_t base,phys_addr_t size,unsigned int order_per_bit,const char * name,struct cma ** res_cma)169 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
170 unsigned int order_per_bit,
171 const char *name,
172 struct cma **res_cma)
173 {
174 struct cma *cma;
175
176 /* Sanity checks */
177 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
178 pr_err("Not enough slots for CMA reserved regions!\n");
179 return -ENOSPC;
180 }
181
182 if (!size || !memblock_is_region_reserved(base, size))
183 return -EINVAL;
184
185 /* ensure minimal alignment required by mm core */
186 if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
187 return -EINVAL;
188
189 /*
190 * Each reserved area must be initialised later, when more kernel
191 * subsystems (like slab allocator) are available.
192 */
193 cma = &cma_areas[cma_area_count];
194
195 if (name)
196 snprintf(cma->name, CMA_MAX_NAME, name);
197 else
198 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
199
200 cma->base_pfn = PFN_DOWN(base);
201 cma->count = size >> PAGE_SHIFT;
202 cma->order_per_bit = order_per_bit;
203 *res_cma = cma;
204 cma_area_count++;
205 totalcma_pages += cma->count;
206
207 return 0;
208 }
209
210 /**
211 * cma_declare_contiguous_nid() - reserve custom contiguous area
212 * @base: Base address of the reserved area optional, use 0 for any
213 * @size: Size of the reserved area (in bytes),
214 * @limit: End address of the reserved memory (optional, 0 for any).
215 * @alignment: Alignment for the CMA area, should be power of 2 or zero
216 * @order_per_bit: Order of pages represented by one bit on bitmap.
217 * @fixed: hint about where to place the reserved area
218 * @name: The name of the area. See function cma_init_reserved_mem()
219 * @res_cma: Pointer to store the created cma region.
220 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
221 *
222 * This function reserves memory from early allocator. It should be
223 * called by arch specific code once the early allocator (memblock or bootmem)
224 * has been activated and all other subsystems have already allocated/reserved
225 * memory. This function allows to create custom reserved areas.
226 *
227 * If @fixed is true, reserve contiguous area at exactly @base. If false,
228 * reserve in range from @base to @limit.
229 */
cma_declare_contiguous_nid(phys_addr_t base,phys_addr_t size,phys_addr_t limit,phys_addr_t alignment,unsigned int order_per_bit,bool fixed,const char * name,struct cma ** res_cma,int nid)230 int __init cma_declare_contiguous_nid(phys_addr_t base,
231 phys_addr_t size, phys_addr_t limit,
232 phys_addr_t alignment, unsigned int order_per_bit,
233 bool fixed, const char *name, struct cma **res_cma,
234 int nid)
235 {
236 phys_addr_t memblock_end = memblock_end_of_DRAM();
237 phys_addr_t highmem_start;
238 int ret;
239
240 /*
241 * We can't use __pa(high_memory) directly, since high_memory
242 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
243 * complain. Find the boundary by adding one to the last valid
244 * address.
245 */
246 highmem_start = __pa(high_memory - 1) + 1;
247 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
248 __func__, &size, &base, &limit, &alignment);
249
250 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
251 pr_err("Not enough slots for CMA reserved regions!\n");
252 return -ENOSPC;
253 }
254
255 if (!size)
256 return -EINVAL;
257
258 if (alignment && !is_power_of_2(alignment))
259 return -EINVAL;
260
261 if (!IS_ENABLED(CONFIG_NUMA))
262 nid = NUMA_NO_NODE;
263
264 /* Sanitise input arguments. */
265 alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
266 if (fixed && base & (alignment - 1)) {
267 ret = -EINVAL;
268 pr_err("Region at %pa must be aligned to %pa bytes\n",
269 &base, &alignment);
270 goto err;
271 }
272 base = ALIGN(base, alignment);
273 size = ALIGN(size, alignment);
274 limit &= ~(alignment - 1);
275
276 if (!base)
277 fixed = false;
278
279 /* size should be aligned with order_per_bit */
280 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
281 return -EINVAL;
282
283 /*
284 * If allocating at a fixed base the request region must not cross the
285 * low/high memory boundary.
286 */
287 if (fixed && base < highmem_start && base + size > highmem_start) {
288 ret = -EINVAL;
289 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
290 &base, &highmem_start);
291 goto err;
292 }
293
294 /*
295 * If the limit is unspecified or above the memblock end, its effective
296 * value will be the memblock end. Set it explicitly to simplify further
297 * checks.
298 */
299 if (limit == 0 || limit > memblock_end)
300 limit = memblock_end;
301
302 if (base + size > limit) {
303 ret = -EINVAL;
304 pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
305 &size, &base, &limit);
306 goto err;
307 }
308
309 /* Reserve memory */
310 if (fixed) {
311 if (memblock_is_region_reserved(base, size) ||
312 memblock_reserve(base, size) < 0) {
313 ret = -EBUSY;
314 goto err;
315 }
316 } else {
317 phys_addr_t addr = 0;
318
319 /*
320 * If there is enough memory, try a bottom-up allocation first.
321 * It will place the new cma area close to the start of the node
322 * and guarantee that the compaction is moving pages out of the
323 * cma area and not into it.
324 * Avoid using first 4GB to not interfere with constrained zones
325 * like DMA/DMA32.
326 */
327 #ifdef CONFIG_PHYS_ADDR_T_64BIT
328 if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
329 memblock_set_bottom_up(true);
330 addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
331 limit, nid, true);
332 memblock_set_bottom_up(false);
333 }
334 #endif
335
336 /*
337 * All pages in the reserved area must come from the same zone.
338 * If the requested region crosses the low/high memory boundary,
339 * try allocating from high memory first and fall back to low
340 * memory in case of failure.
341 */
342 if (!addr && base < highmem_start && limit > highmem_start) {
343 addr = memblock_alloc_range_nid(size, alignment,
344 highmem_start, limit, nid, true);
345 limit = highmem_start;
346 }
347
348 if (!addr) {
349 addr = memblock_alloc_range_nid(size, alignment, base,
350 limit, nid, true);
351 if (!addr) {
352 ret = -ENOMEM;
353 goto err;
354 }
355 }
356
357 /*
358 * kmemleak scans/reads tracked objects for pointers to other
359 * objects but this address isn't mapped and accessible
360 */
361 kmemleak_ignore_phys(addr);
362 base = addr;
363 }
364
365 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
366 if (ret)
367 goto free_mem;
368
369 pr_info("Reserved %ld MiB at %pa on node %d\n", (unsigned long)size / SZ_1M,
370 &base, nid);
371 return 0;
372
373 free_mem:
374 memblock_phys_free(base, size);
375 err:
376 pr_err("Failed to reserve %ld MiB on node %d\n", (unsigned long)size / SZ_1M,
377 nid);
378 return ret;
379 }
380
cma_debug_show_areas(struct cma * cma)381 static void cma_debug_show_areas(struct cma *cma)
382 {
383 unsigned long next_zero_bit, next_set_bit, nr_zero;
384 unsigned long start = 0;
385 unsigned long nr_part, nr_total = 0;
386 unsigned long nbits = cma_bitmap_maxno(cma);
387
388 spin_lock_irq(&cma->lock);
389 pr_info("number of available pages: ");
390 for (;;) {
391 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
392 if (next_zero_bit >= nbits)
393 break;
394 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
395 nr_zero = next_set_bit - next_zero_bit;
396 nr_part = nr_zero << cma->order_per_bit;
397 pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
398 next_zero_bit);
399 nr_total += nr_part;
400 start = next_zero_bit + nr_zero;
401 }
402 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
403 spin_unlock_irq(&cma->lock);
404 }
405
__cma_alloc(struct cma * cma,unsigned long count,unsigned int align,gfp_t gfp)406 static struct page *__cma_alloc(struct cma *cma, unsigned long count,
407 unsigned int align, gfp_t gfp)
408 {
409 unsigned long mask, offset;
410 unsigned long pfn = -1;
411 unsigned long start = 0;
412 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
413 unsigned long i;
414 struct page *page = NULL;
415 int ret = -ENOMEM;
416 const char *name = cma ? cma->name : NULL;
417
418 trace_cma_alloc_start(name, count, align);
419
420 if (!cma || !cma->count || !cma->bitmap)
421 return page;
422
423 pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__,
424 (void *)cma, cma->name, count, align);
425
426 if (!count)
427 return page;
428
429 mask = cma_bitmap_aligned_mask(cma, align);
430 offset = cma_bitmap_aligned_offset(cma, align);
431 bitmap_maxno = cma_bitmap_maxno(cma);
432 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
433
434 if (bitmap_count > bitmap_maxno)
435 return page;
436
437 for (;;) {
438 spin_lock_irq(&cma->lock);
439 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
440 bitmap_maxno, start, bitmap_count, mask,
441 offset);
442 if (bitmap_no >= bitmap_maxno) {
443 spin_unlock_irq(&cma->lock);
444 break;
445 }
446 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
447 /*
448 * It's safe to drop the lock here. We've marked this region for
449 * our exclusive use. If the migration fails we will take the
450 * lock again and unmark it.
451 */
452 spin_unlock_irq(&cma->lock);
453
454 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
455 mutex_lock(&cma_mutex);
456 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp);
457 mutex_unlock(&cma_mutex);
458 if (ret == 0) {
459 page = pfn_to_page(pfn);
460 break;
461 }
462
463 cma_clear_bitmap(cma, pfn, count);
464 if (ret != -EBUSY)
465 break;
466
467 pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n",
468 __func__, pfn, pfn_to_page(pfn));
469
470 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
471 count, align);
472 /* try again with a bit different memory target */
473 start = bitmap_no + mask + 1;
474 }
475
476 /*
477 * CMA can allocate multiple page blocks, which results in different
478 * blocks being marked with different tags. Reset the tags to ignore
479 * those page blocks.
480 */
481 if (page) {
482 for (i = 0; i < count; i++)
483 page_kasan_tag_reset(nth_page(page, i));
484 }
485
486 if (ret && !(gfp & __GFP_NOWARN)) {
487 pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
488 __func__, cma->name, count, ret);
489 cma_debug_show_areas(cma);
490 }
491
492 pr_debug("%s(): returned %p\n", __func__, page);
493 trace_cma_alloc_finish(name, pfn, page, count, align, ret);
494 if (page) {
495 count_vm_event(CMA_ALLOC_SUCCESS);
496 cma_sysfs_account_success_pages(cma, count);
497 } else {
498 count_vm_event(CMA_ALLOC_FAIL);
499 cma_sysfs_account_fail_pages(cma, count);
500 }
501
502 return page;
503 }
504
505 /**
506 * cma_alloc() - allocate pages from contiguous area
507 * @cma: Contiguous memory region for which the allocation is performed.
508 * @count: Requested number of pages.
509 * @align: Requested alignment of pages (in PAGE_SIZE order).
510 * @no_warn: Avoid printing message about failed allocation
511 *
512 * This function allocates part of contiguous memory on specific
513 * contiguous memory area.
514 */
cma_alloc(struct cma * cma,unsigned long count,unsigned int align,bool no_warn)515 struct page *cma_alloc(struct cma *cma, unsigned long count,
516 unsigned int align, bool no_warn)
517 {
518 return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
519 }
520
cma_alloc_folio(struct cma * cma,int order,gfp_t gfp)521 struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
522 {
523 struct page *page;
524
525 if (WARN_ON(!order || !(gfp & __GFP_COMP)))
526 return NULL;
527
528 page = __cma_alloc(cma, 1 << order, order, gfp);
529
530 return page ? page_folio(page) : NULL;
531 }
532
cma_pages_valid(struct cma * cma,const struct page * pages,unsigned long count)533 bool cma_pages_valid(struct cma *cma, const struct page *pages,
534 unsigned long count)
535 {
536 unsigned long pfn;
537
538 if (!cma || !pages)
539 return false;
540
541 pfn = page_to_pfn(pages);
542
543 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) {
544 pr_debug("%s(page %p, count %lu)\n", __func__,
545 (void *)pages, count);
546 return false;
547 }
548
549 return true;
550 }
551
552 /**
553 * cma_release() - release allocated pages
554 * @cma: Contiguous memory region for which the allocation is performed.
555 * @pages: Allocated pages.
556 * @count: Number of allocated pages.
557 *
558 * This function releases memory allocated by cma_alloc().
559 * It returns false when provided pages do not belong to contiguous area and
560 * true otherwise.
561 */
cma_release(struct cma * cma,const struct page * pages,unsigned long count)562 bool cma_release(struct cma *cma, const struct page *pages,
563 unsigned long count)
564 {
565 unsigned long pfn;
566
567 if (!cma_pages_valid(cma, pages, count))
568 return false;
569
570 pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
571
572 pfn = page_to_pfn(pages);
573
574 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
575
576 free_contig_range(pfn, count);
577 cma_clear_bitmap(cma, pfn, count);
578 cma_sysfs_account_release_pages(cma, count);
579 trace_cma_release(cma->name, pfn, pages, count);
580
581 return true;
582 }
583
cma_free_folio(struct cma * cma,const struct folio * folio)584 bool cma_free_folio(struct cma *cma, const struct folio *folio)
585 {
586 if (WARN_ON(!folio_test_large(folio)))
587 return false;
588
589 return cma_release(cma, &folio->page, folio_nr_pages(folio));
590 }
591
cma_for_each_area(int (* it)(struct cma * cma,void * data),void * data)592 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
593 {
594 int i;
595
596 for (i = 0; i < cma_area_count; i++) {
597 int ret = it(&cma_areas[i], data);
598
599 if (ret)
600 return ret;
601 }
602
603 return 0;
604 }
605