1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Contiguous Memory Allocator 4 * 5 * Copyright (c) 2010-2011 by Samsung Electronics. 6 * Copyright IBM Corporation, 2013 7 * Copyright LG Electronics Inc., 2014 8 * Written by: 9 * Marek Szyprowski <m.szyprowski@samsung.com> 10 * Michal Nazarewicz <mina86@mina86.com> 11 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> 12 * Joonsoo Kim <iamjoonsoo.kim@lge.com> 13 */ 14 15 #define pr_fmt(fmt) "cma: " fmt 16 17 #ifdef CONFIG_CMA_DEBUG 18 #ifndef DEBUG 19 # define DEBUG 20 #endif 21 #endif 22 #define CREATE_TRACE_POINTS 23 24 #include <linux/memblock.h> 25 #include <linux/err.h> 26 #include <linux/mm.h> 27 #include <linux/mutex.h> 28 #include <linux/sizes.h> 29 #include <linux/slab.h> 30 #include <linux/log2.h> 31 #include <linux/cma.h> 32 #include <linux/highmem.h> 33 #include <linux/io.h> 34 #include <linux/kmemleak.h> 35 #include <trace/events/cma.h> 36 37 #include "cma.h" 38 39 struct cma cma_areas[MAX_CMA_AREAS]; 40 unsigned cma_area_count; 41 42 phys_addr_t cma_get_base(const struct cma *cma) 43 { 44 return PFN_PHYS(cma->base_pfn); 45 } 46 47 unsigned long cma_get_size(const struct cma *cma) 48 { 49 return cma->count << PAGE_SHIFT; 50 } 51 52 const char *cma_get_name(const struct cma *cma) 53 { 54 return cma->name; 55 } 56 57 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, 58 unsigned int align_order) 59 { 60 if (align_order <= cma->order_per_bit) 61 return 0; 62 return (1UL << (align_order - cma->order_per_bit)) - 1; 63 } 64 65 /* 66 * Find the offset of the base PFN from the specified align_order. 67 * The value returned is represented in order_per_bits. 68 */ 69 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, 70 unsigned int align_order) 71 { 72 return (cma->base_pfn & ((1UL << align_order) - 1)) 73 >> cma->order_per_bit; 74 } 75 76 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, 77 unsigned long pages) 78 { 79 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; 80 } 81 82 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, 83 unsigned int count) 84 { 85 unsigned long bitmap_no, bitmap_count; 86 87 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; 88 bitmap_count = cma_bitmap_pages_to_bits(cma, count); 89 90 mutex_lock(&cma->lock); 91 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); 92 mutex_unlock(&cma->lock); 93 } 94 95 static void __init cma_activate_area(struct cma *cma) 96 { 97 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; 98 unsigned i = cma->count >> pageblock_order; 99 struct zone *zone; 100 101 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL); 102 if (!cma->bitmap) 103 goto out_error; 104 105 WARN_ON_ONCE(!pfn_valid(pfn)); 106 zone = page_zone(pfn_to_page(pfn)); 107 108 do { 109 unsigned j; 110 111 base_pfn = pfn; 112 for (j = pageblock_nr_pages; j; --j, pfn++) { 113 WARN_ON_ONCE(!pfn_valid(pfn)); 114 /* 115 * alloc_contig_range requires the pfn range 116 * specified to be in the same zone. Make this 117 * simple by forcing the entire CMA resv range 118 * to be in the same zone. 119 */ 120 if (page_zone(pfn_to_page(pfn)) != zone) 121 goto not_in_zone; 122 } 123 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 124 } while (--i); 125 126 mutex_init(&cma->lock); 127 128 #ifdef CONFIG_CMA_DEBUGFS 129 INIT_HLIST_HEAD(&cma->mem_head); 130 spin_lock_init(&cma->mem_head_lock); 131 #endif 132 133 return; 134 135 not_in_zone: 136 bitmap_free(cma->bitmap); 137 out_error: 138 cma->count = 0; 139 pr_err("CMA area %s could not be activated\n", cma->name); 140 return; 141 } 142 143 static int __init cma_init_reserved_areas(void) 144 { 145 int i; 146 147 for (i = 0; i < cma_area_count; i++) 148 cma_activate_area(&cma_areas[i]); 149 150 return 0; 151 } 152 core_initcall(cma_init_reserved_areas); 153 154 /** 155 * cma_init_reserved_mem() - create custom contiguous area from reserved memory 156 * @base: Base address of the reserved area 157 * @size: Size of the reserved area (in bytes), 158 * @order_per_bit: Order of pages represented by one bit on bitmap. 159 * @name: The name of the area. If this parameter is NULL, the name of 160 * the area will be set to "cmaN", where N is a running counter of 161 * used areas. 162 * @res_cma: Pointer to store the created cma region. 163 * 164 * This function creates custom contiguous area from already reserved memory. 165 */ 166 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 167 unsigned int order_per_bit, 168 const char *name, 169 struct cma **res_cma) 170 { 171 struct cma *cma; 172 phys_addr_t alignment; 173 174 /* Sanity checks */ 175 if (cma_area_count == ARRAY_SIZE(cma_areas)) { 176 pr_err("Not enough slots for CMA reserved regions!\n"); 177 return -ENOSPC; 178 } 179 180 if (!size || !memblock_is_region_reserved(base, size)) 181 return -EINVAL; 182 183 /* ensure minimal alignment required by mm core */ 184 alignment = PAGE_SIZE << 185 max_t(unsigned long, MAX_ORDER - 1, pageblock_order); 186 187 /* alignment should be aligned with order_per_bit */ 188 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) 189 return -EINVAL; 190 191 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) 192 return -EINVAL; 193 194 /* 195 * Each reserved area must be initialised later, when more kernel 196 * subsystems (like slab allocator) are available. 197 */ 198 cma = &cma_areas[cma_area_count]; 199 200 if (name) 201 snprintf(cma->name, CMA_MAX_NAME, name); 202 else 203 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); 204 205 cma->base_pfn = PFN_DOWN(base); 206 cma->count = size >> PAGE_SHIFT; 207 cma->order_per_bit = order_per_bit; 208 *res_cma = cma; 209 cma_area_count++; 210 totalcma_pages += (size / PAGE_SIZE); 211 212 return 0; 213 } 214 215 /** 216 * cma_declare_contiguous_nid() - reserve custom contiguous area 217 * @base: Base address of the reserved area optional, use 0 for any 218 * @size: Size of the reserved area (in bytes), 219 * @limit: End address of the reserved memory (optional, 0 for any). 220 * @alignment: Alignment for the CMA area, should be power of 2 or zero 221 * @order_per_bit: Order of pages represented by one bit on bitmap. 222 * @fixed: hint about where to place the reserved area 223 * @name: The name of the area. See function cma_init_reserved_mem() 224 * @res_cma: Pointer to store the created cma region. 225 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 226 * 227 * This function reserves memory from early allocator. It should be 228 * called by arch specific code once the early allocator (memblock or bootmem) 229 * has been activated and all other subsystems have already allocated/reserved 230 * memory. This function allows to create custom reserved areas. 231 * 232 * If @fixed is true, reserve contiguous area at exactly @base. If false, 233 * reserve in range from @base to @limit. 234 */ 235 int __init cma_declare_contiguous_nid(phys_addr_t base, 236 phys_addr_t size, phys_addr_t limit, 237 phys_addr_t alignment, unsigned int order_per_bit, 238 bool fixed, const char *name, struct cma **res_cma, 239 int nid) 240 { 241 phys_addr_t memblock_end = memblock_end_of_DRAM(); 242 phys_addr_t highmem_start; 243 int ret = 0; 244 245 /* 246 * We can't use __pa(high_memory) directly, since high_memory 247 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) 248 * complain. Find the boundary by adding one to the last valid 249 * address. 250 */ 251 highmem_start = __pa(high_memory - 1) + 1; 252 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", 253 __func__, &size, &base, &limit, &alignment); 254 255 if (cma_area_count == ARRAY_SIZE(cma_areas)) { 256 pr_err("Not enough slots for CMA reserved regions!\n"); 257 return -ENOSPC; 258 } 259 260 if (!size) 261 return -EINVAL; 262 263 if (alignment && !is_power_of_2(alignment)) 264 return -EINVAL; 265 266 /* 267 * Sanitise input arguments. 268 * Pages both ends in CMA area could be merged into adjacent unmovable 269 * migratetype page by page allocator's buddy algorithm. In the case, 270 * you couldn't get a contiguous memory, which is not what we want. 271 */ 272 alignment = max(alignment, (phys_addr_t)PAGE_SIZE << 273 max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); 274 if (fixed && base & (alignment - 1)) { 275 ret = -EINVAL; 276 pr_err("Region at %pa must be aligned to %pa bytes\n", 277 &base, &alignment); 278 goto err; 279 } 280 base = ALIGN(base, alignment); 281 size = ALIGN(size, alignment); 282 limit &= ~(alignment - 1); 283 284 if (!base) 285 fixed = false; 286 287 /* size should be aligned with order_per_bit */ 288 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) 289 return -EINVAL; 290 291 /* 292 * If allocating at a fixed base the request region must not cross the 293 * low/high memory boundary. 294 */ 295 if (fixed && base < highmem_start && base + size > highmem_start) { 296 ret = -EINVAL; 297 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", 298 &base, &highmem_start); 299 goto err; 300 } 301 302 /* 303 * If the limit is unspecified or above the memblock end, its effective 304 * value will be the memblock end. Set it explicitly to simplify further 305 * checks. 306 */ 307 if (limit == 0 || limit > memblock_end) 308 limit = memblock_end; 309 310 if (base + size > limit) { 311 ret = -EINVAL; 312 pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", 313 &size, &base, &limit); 314 goto err; 315 } 316 317 /* Reserve memory */ 318 if (fixed) { 319 if (memblock_is_region_reserved(base, size) || 320 memblock_reserve(base, size) < 0) { 321 ret = -EBUSY; 322 goto err; 323 } 324 } else { 325 phys_addr_t addr = 0; 326 327 /* 328 * All pages in the reserved area must come from the same zone. 329 * If the requested region crosses the low/high memory boundary, 330 * try allocating from high memory first and fall back to low 331 * memory in case of failure. 332 */ 333 if (base < highmem_start && limit > highmem_start) { 334 addr = memblock_alloc_range_nid(size, alignment, 335 highmem_start, limit, nid, true); 336 limit = highmem_start; 337 } 338 339 if (!addr) { 340 addr = memblock_alloc_range_nid(size, alignment, base, 341 limit, nid, true); 342 if (!addr) { 343 ret = -ENOMEM; 344 goto err; 345 } 346 } 347 348 /* 349 * kmemleak scans/reads tracked objects for pointers to other 350 * objects but this address isn't mapped and accessible 351 */ 352 kmemleak_ignore_phys(addr); 353 base = addr; 354 } 355 356 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); 357 if (ret) 358 goto free_mem; 359 360 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, 361 &base); 362 return 0; 363 364 free_mem: 365 memblock_free(base, size); 366 err: 367 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 368 return ret; 369 } 370 371 #ifdef CONFIG_CMA_DEBUG 372 static void cma_debug_show_areas(struct cma *cma) 373 { 374 unsigned long next_zero_bit, next_set_bit, nr_zero; 375 unsigned long start = 0; 376 unsigned long nr_part, nr_total = 0; 377 unsigned long nbits = cma_bitmap_maxno(cma); 378 379 mutex_lock(&cma->lock); 380 pr_info("number of available pages: "); 381 for (;;) { 382 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); 383 if (next_zero_bit >= nbits) 384 break; 385 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); 386 nr_zero = next_set_bit - next_zero_bit; 387 nr_part = nr_zero << cma->order_per_bit; 388 pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part, 389 next_zero_bit); 390 nr_total += nr_part; 391 start = next_zero_bit + nr_zero; 392 } 393 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); 394 mutex_unlock(&cma->lock); 395 } 396 #else 397 static inline void cma_debug_show_areas(struct cma *cma) { } 398 #endif 399 400 /** 401 * cma_alloc() - allocate pages from contiguous area 402 * @cma: Contiguous memory region for which the allocation is performed. 403 * @count: Requested number of pages. 404 * @align: Requested alignment of pages (in PAGE_SIZE order). 405 * @no_warn: Avoid printing message about failed allocation 406 * 407 * This function allocates part of contiguous memory on specific 408 * contiguous memory area. 409 */ 410 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, 411 bool no_warn) 412 { 413 unsigned long mask, offset; 414 unsigned long pfn = -1; 415 unsigned long start = 0; 416 unsigned long bitmap_maxno, bitmap_no, bitmap_count; 417 size_t i; 418 struct page *page = NULL; 419 int ret = -ENOMEM; 420 421 if (!cma || !cma->count || !cma->bitmap) 422 return NULL; 423 424 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, 425 count, align); 426 427 if (!count) 428 return NULL; 429 430 mask = cma_bitmap_aligned_mask(cma, align); 431 offset = cma_bitmap_aligned_offset(cma, align); 432 bitmap_maxno = cma_bitmap_maxno(cma); 433 bitmap_count = cma_bitmap_pages_to_bits(cma, count); 434 435 if (bitmap_count > bitmap_maxno) 436 return NULL; 437 438 for (;;) { 439 mutex_lock(&cma->lock); 440 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, 441 bitmap_maxno, start, bitmap_count, mask, 442 offset); 443 if (bitmap_no >= bitmap_maxno) { 444 mutex_unlock(&cma->lock); 445 break; 446 } 447 bitmap_set(cma->bitmap, bitmap_no, bitmap_count); 448 /* 449 * It's safe to drop the lock here. We've marked this region for 450 * our exclusive use. If the migration fails we will take the 451 * lock again and unmark it. 452 */ 453 mutex_unlock(&cma->lock); 454 455 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); 456 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, 457 GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); 458 459 if (ret == 0) { 460 page = pfn_to_page(pfn); 461 break; 462 } 463 464 cma_clear_bitmap(cma, pfn, count); 465 if (ret != -EBUSY) 466 break; 467 468 pr_debug("%s(): memory range at %p is busy, retrying\n", 469 __func__, pfn_to_page(pfn)); 470 /* try again with a bit different memory target */ 471 start = bitmap_no + mask + 1; 472 } 473 474 trace_cma_alloc(pfn, page, count, align); 475 476 /* 477 * CMA can allocate multiple page blocks, which results in different 478 * blocks being marked with different tags. Reset the tags to ignore 479 * those page blocks. 480 */ 481 if (page) { 482 for (i = 0; i < count; i++) 483 page_kasan_tag_reset(page + i); 484 } 485 486 if (ret && !no_warn) { 487 pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n", 488 __func__, count, ret); 489 cma_debug_show_areas(cma); 490 } 491 492 pr_debug("%s(): returned %p\n", __func__, page); 493 return page; 494 } 495 496 /** 497 * cma_release() - release allocated pages 498 * @cma: Contiguous memory region for which the allocation is performed. 499 * @pages: Allocated pages. 500 * @count: Number of allocated pages. 501 * 502 * This function releases memory allocated by cma_alloc(). 503 * It returns false when provided pages do not belong to contiguous area and 504 * true otherwise. 505 */ 506 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) 507 { 508 unsigned long pfn; 509 510 if (!cma || !pages) 511 return false; 512 513 pr_debug("%s(page %p, count %u)\n", __func__, (void *)pages, count); 514 515 pfn = page_to_pfn(pages); 516 517 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) 518 return false; 519 520 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 521 522 free_contig_range(pfn, count); 523 cma_clear_bitmap(cma, pfn, count); 524 trace_cma_release(pfn, pages, count); 525 526 return true; 527 } 528 529 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) 530 { 531 int i; 532 533 for (i = 0; i < cma_area_count; i++) { 534 int ret = it(&cma_areas[i], data); 535 536 if (ret) 537 return ret; 538 } 539 540 return 0; 541 } 542