1 /* 2 * Contiguous Memory Allocator 3 * 4 * Copyright (c) 2010-2011 by Samsung Electronics. 5 * Copyright IBM Corporation, 2013 6 * Copyright LG Electronics Inc., 2014 7 * Written by: 8 * Marek Szyprowski <m.szyprowski@samsung.com> 9 * Michal Nazarewicz <mina86@mina86.com> 10 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> 11 * Joonsoo Kim <iamjoonsoo.kim@lge.com> 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License as 15 * published by the Free Software Foundation; either version 2 of the 16 * License or (at your optional) any later version of the license. 17 */ 18 19 #define pr_fmt(fmt) "cma: " fmt 20 21 #ifdef CONFIG_CMA_DEBUG 22 #ifndef DEBUG 23 # define DEBUG 24 #endif 25 #endif 26 #define CREATE_TRACE_POINTS 27 28 #include <linux/memblock.h> 29 #include <linux/err.h> 30 #include <linux/mm.h> 31 #include <linux/mutex.h> 32 #include <linux/sizes.h> 33 #include <linux/slab.h> 34 #include <linux/log2.h> 35 #include <linux/cma.h> 36 #include <linux/highmem.h> 37 #include <linux/io.h> 38 #include <linux/kmemleak.h> 39 #include <trace/events/cma.h> 40 41 #include "cma.h" 42 #include "internal.h" 43 44 struct cma cma_areas[MAX_CMA_AREAS]; 45 unsigned cma_area_count; 46 static DEFINE_MUTEX(cma_mutex); 47 48 phys_addr_t cma_get_base(const struct cma *cma) 49 { 50 return PFN_PHYS(cma->base_pfn); 51 } 52 53 unsigned long cma_get_size(const struct cma *cma) 54 { 55 return cma->count << PAGE_SHIFT; 56 } 57 58 const char *cma_get_name(const struct cma *cma) 59 { 60 return cma->name ? cma->name : "(undefined)"; 61 } 62 63 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, 64 unsigned int align_order) 65 { 66 if (align_order <= cma->order_per_bit) 67 return 0; 68 return (1UL << (align_order - cma->order_per_bit)) - 1; 69 } 70 71 /* 72 * Find the offset of the base PFN from the specified align_order. 73 * The value returned is represented in order_per_bits. 74 */ 75 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, 76 unsigned int align_order) 77 { 78 return (cma->base_pfn & ((1UL << align_order) - 1)) 79 >> cma->order_per_bit; 80 } 81 82 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, 83 unsigned long pages) 84 { 85 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; 86 } 87 88 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, 89 unsigned int count) 90 { 91 unsigned long bitmap_no, bitmap_count; 92 93 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; 94 bitmap_count = cma_bitmap_pages_to_bits(cma, count); 95 96 mutex_lock(&cma->lock); 97 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); 98 mutex_unlock(&cma->lock); 99 } 100 101 static int __init cma_activate_area(struct cma *cma) 102 { 103 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); 104 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; 105 unsigned i = cma->count >> pageblock_order; 106 struct zone *zone; 107 108 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 109 110 if (!cma->bitmap) 111 return -ENOMEM; 112 113 do { 114 unsigned j; 115 116 base_pfn = pfn; 117 if (!pfn_valid(base_pfn)) 118 goto err; 119 120 zone = page_zone(pfn_to_page(base_pfn)); 121 for (j = pageblock_nr_pages; j; --j, pfn++) { 122 if (!pfn_valid(pfn)) 123 goto err; 124 125 /* 126 * In init_cma_reserved_pageblock(), present_pages 127 * is adjusted with assumption that all pages in 128 * the pageblock come from a single zone. 129 */ 130 if (page_zone(pfn_to_page(pfn)) != zone) 131 goto err; 132 } 133 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 134 } while (--i); 135 136 mutex_init(&cma->lock); 137 138 #ifdef CONFIG_CMA_DEBUGFS 139 INIT_HLIST_HEAD(&cma->mem_head); 140 spin_lock_init(&cma->mem_head_lock); 141 #endif 142 143 return 0; 144 145 err: 146 pr_err("CMA area %s could not be activated\n", cma->name); 147 kfree(cma->bitmap); 148 cma->count = 0; 149 return -EINVAL; 150 } 151 152 static int __init cma_init_reserved_areas(void) 153 { 154 int i; 155 struct zone *zone; 156 pg_data_t *pgdat; 157 158 if (!cma_area_count) 159 return 0; 160 161 for_each_online_pgdat(pgdat) { 162 unsigned long start_pfn = UINT_MAX, end_pfn = 0; 163 164 zone = &pgdat->node_zones[ZONE_MOVABLE]; 165 166 /* 167 * In this case, we cannot adjust the zone range 168 * since it is now maximum node span and we don't 169 * know original zone range. 170 */ 171 if (populated_zone(zone)) 172 continue; 173 174 for (i = 0; i < cma_area_count; i++) { 175 if (pfn_to_nid(cma_areas[i].base_pfn) != 176 pgdat->node_id) 177 continue; 178 179 start_pfn = min(start_pfn, cma_areas[i].base_pfn); 180 end_pfn = max(end_pfn, cma_areas[i].base_pfn + 181 cma_areas[i].count); 182 } 183 184 if (!end_pfn) 185 continue; 186 187 zone->zone_start_pfn = start_pfn; 188 zone->spanned_pages = end_pfn - start_pfn; 189 } 190 191 for (i = 0; i < cma_area_count; i++) { 192 int ret = cma_activate_area(&cma_areas[i]); 193 194 if (ret) 195 return ret; 196 } 197 198 /* 199 * Reserved pages for ZONE_MOVABLE are now activated and 200 * this would change ZONE_MOVABLE's managed page counter and 201 * the other zones' present counter. We need to re-calculate 202 * various zone information that depends on this initialization. 203 */ 204 build_all_zonelists(NULL); 205 for_each_populated_zone(zone) { 206 if (zone_idx(zone) == ZONE_MOVABLE) { 207 zone_pcp_reset(zone); 208 setup_zone_pageset(zone); 209 } else 210 zone_pcp_update(zone); 211 212 set_zone_contiguous(zone); 213 } 214 215 /* 216 * We need to re-init per zone wmark by calling 217 * init_per_zone_wmark_min() but doesn't call here because it is 218 * registered on core_initcall and it will be called later than us. 219 */ 220 221 return 0; 222 } 223 pure_initcall(cma_init_reserved_areas); 224 225 /** 226 * cma_init_reserved_mem() - create custom contiguous area from reserved memory 227 * @base: Base address of the reserved area 228 * @size: Size of the reserved area (in bytes), 229 * @order_per_bit: Order of pages represented by one bit on bitmap. 230 * @name: The name of the area. If this parameter is NULL, the name of 231 * the area will be set to "cmaN", where N is a running counter of 232 * used areas. 233 * @res_cma: Pointer to store the created cma region. 234 * 235 * This function creates custom contiguous area from already reserved memory. 236 */ 237 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 238 unsigned int order_per_bit, 239 const char *name, 240 struct cma **res_cma) 241 { 242 struct cma *cma; 243 phys_addr_t alignment; 244 245 /* Sanity checks */ 246 if (cma_area_count == ARRAY_SIZE(cma_areas)) { 247 pr_err("Not enough slots for CMA reserved regions!\n"); 248 return -ENOSPC; 249 } 250 251 if (!size || !memblock_is_region_reserved(base, size)) 252 return -EINVAL; 253 254 /* ensure minimal alignment required by mm core */ 255 alignment = PAGE_SIZE << 256 max_t(unsigned long, MAX_ORDER - 1, pageblock_order); 257 258 /* alignment should be aligned with order_per_bit */ 259 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) 260 return -EINVAL; 261 262 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) 263 return -EINVAL; 264 265 /* 266 * Each reserved area must be initialised later, when more kernel 267 * subsystems (like slab allocator) are available. 268 */ 269 cma = &cma_areas[cma_area_count]; 270 if (name) { 271 cma->name = name; 272 } else { 273 cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count); 274 if (!cma->name) 275 return -ENOMEM; 276 } 277 cma->base_pfn = PFN_DOWN(base); 278 cma->count = size >> PAGE_SHIFT; 279 cma->order_per_bit = order_per_bit; 280 *res_cma = cma; 281 cma_area_count++; 282 totalcma_pages += (size / PAGE_SIZE); 283 284 return 0; 285 } 286 287 /** 288 * cma_declare_contiguous() - reserve custom contiguous area 289 * @base: Base address of the reserved area optional, use 0 for any 290 * @size: Size of the reserved area (in bytes), 291 * @limit: End address of the reserved memory (optional, 0 for any). 292 * @alignment: Alignment for the CMA area, should be power of 2 or zero 293 * @order_per_bit: Order of pages represented by one bit on bitmap. 294 * @fixed: hint about where to place the reserved area 295 * @name: The name of the area. See function cma_init_reserved_mem() 296 * @res_cma: Pointer to store the created cma region. 297 * 298 * This function reserves memory from early allocator. It should be 299 * called by arch specific code once the early allocator (memblock or bootmem) 300 * has been activated and all other subsystems have already allocated/reserved 301 * memory. This function allows to create custom reserved areas. 302 * 303 * If @fixed is true, reserve contiguous area at exactly @base. If false, 304 * reserve in range from @base to @limit. 305 */ 306 int __init cma_declare_contiguous(phys_addr_t base, 307 phys_addr_t size, phys_addr_t limit, 308 phys_addr_t alignment, unsigned int order_per_bit, 309 bool fixed, const char *name, struct cma **res_cma) 310 { 311 phys_addr_t memblock_end = memblock_end_of_DRAM(); 312 phys_addr_t highmem_start; 313 int ret = 0; 314 315 /* 316 * We can't use __pa(high_memory) directly, since high_memory 317 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) 318 * complain. Find the boundary by adding one to the last valid 319 * address. 320 */ 321 highmem_start = __pa(high_memory - 1) + 1; 322 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", 323 __func__, &size, &base, &limit, &alignment); 324 325 if (cma_area_count == ARRAY_SIZE(cma_areas)) { 326 pr_err("Not enough slots for CMA reserved regions!\n"); 327 return -ENOSPC; 328 } 329 330 if (!size) 331 return -EINVAL; 332 333 if (alignment && !is_power_of_2(alignment)) 334 return -EINVAL; 335 336 /* 337 * Sanitise input arguments. 338 * Pages both ends in CMA area could be merged into adjacent unmovable 339 * migratetype page by page allocator's buddy algorithm. In the case, 340 * you couldn't get a contiguous memory, which is not what we want. 341 */ 342 alignment = max(alignment, (phys_addr_t)PAGE_SIZE << 343 max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); 344 base = ALIGN(base, alignment); 345 size = ALIGN(size, alignment); 346 limit &= ~(alignment - 1); 347 348 if (!base) 349 fixed = false; 350 351 /* size should be aligned with order_per_bit */ 352 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) 353 return -EINVAL; 354 355 /* 356 * If allocating at a fixed base the request region must not cross the 357 * low/high memory boundary. 358 */ 359 if (fixed && base < highmem_start && base + size > highmem_start) { 360 ret = -EINVAL; 361 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", 362 &base, &highmem_start); 363 goto err; 364 } 365 366 /* 367 * If the limit is unspecified or above the memblock end, its effective 368 * value will be the memblock end. Set it explicitly to simplify further 369 * checks. 370 */ 371 if (limit == 0 || limit > memblock_end) 372 limit = memblock_end; 373 374 /* Reserve memory */ 375 if (fixed) { 376 if (memblock_is_region_reserved(base, size) || 377 memblock_reserve(base, size) < 0) { 378 ret = -EBUSY; 379 goto err; 380 } 381 } else { 382 phys_addr_t addr = 0; 383 384 /* 385 * All pages in the reserved area must come from the same zone. 386 * If the requested region crosses the low/high memory boundary, 387 * try allocating from high memory first and fall back to low 388 * memory in case of failure. 389 */ 390 if (base < highmem_start && limit > highmem_start) { 391 addr = memblock_alloc_range(size, alignment, 392 highmem_start, limit, 393 MEMBLOCK_NONE); 394 limit = highmem_start; 395 } 396 397 if (!addr) { 398 addr = memblock_alloc_range(size, alignment, base, 399 limit, 400 MEMBLOCK_NONE); 401 if (!addr) { 402 ret = -ENOMEM; 403 goto err; 404 } 405 } 406 407 /* 408 * kmemleak scans/reads tracked objects for pointers to other 409 * objects but this address isn't mapped and accessible 410 */ 411 kmemleak_ignore_phys(addr); 412 base = addr; 413 } 414 415 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); 416 if (ret) 417 goto err; 418 419 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, 420 &base); 421 return 0; 422 423 err: 424 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 425 return ret; 426 } 427 428 #ifdef CONFIG_CMA_DEBUG 429 static void cma_debug_show_areas(struct cma *cma) 430 { 431 unsigned long next_zero_bit, next_set_bit; 432 unsigned long start = 0; 433 unsigned int nr_zero, nr_total = 0; 434 435 mutex_lock(&cma->lock); 436 pr_info("number of available pages: "); 437 for (;;) { 438 next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start); 439 if (next_zero_bit >= cma->count) 440 break; 441 next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit); 442 nr_zero = next_set_bit - next_zero_bit; 443 pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit); 444 nr_total += nr_zero; 445 start = next_zero_bit + nr_zero; 446 } 447 pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count); 448 mutex_unlock(&cma->lock); 449 } 450 #else 451 static inline void cma_debug_show_areas(struct cma *cma) { } 452 #endif 453 454 /** 455 * cma_alloc() - allocate pages from contiguous area 456 * @cma: Contiguous memory region for which the allocation is performed. 457 * @count: Requested number of pages. 458 * @align: Requested alignment of pages (in PAGE_SIZE order). 459 * @gfp_mask: GFP mask to use during compaction 460 * 461 * This function allocates part of contiguous memory on specific 462 * contiguous memory area. 463 */ 464 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, 465 gfp_t gfp_mask) 466 { 467 unsigned long mask, offset; 468 unsigned long pfn = -1; 469 unsigned long start = 0; 470 unsigned long bitmap_maxno, bitmap_no, bitmap_count; 471 struct page *page = NULL; 472 int ret = -ENOMEM; 473 474 if (!cma || !cma->count) 475 return NULL; 476 477 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, 478 count, align); 479 480 if (!count) 481 return NULL; 482 483 mask = cma_bitmap_aligned_mask(cma, align); 484 offset = cma_bitmap_aligned_offset(cma, align); 485 bitmap_maxno = cma_bitmap_maxno(cma); 486 bitmap_count = cma_bitmap_pages_to_bits(cma, count); 487 488 if (bitmap_count > bitmap_maxno) 489 return NULL; 490 491 for (;;) { 492 mutex_lock(&cma->lock); 493 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, 494 bitmap_maxno, start, bitmap_count, mask, 495 offset); 496 if (bitmap_no >= bitmap_maxno) { 497 mutex_unlock(&cma->lock); 498 break; 499 } 500 bitmap_set(cma->bitmap, bitmap_no, bitmap_count); 501 /* 502 * It's safe to drop the lock here. We've marked this region for 503 * our exclusive use. If the migration fails we will take the 504 * lock again and unmark it. 505 */ 506 mutex_unlock(&cma->lock); 507 508 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); 509 mutex_lock(&cma_mutex); 510 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, 511 gfp_mask); 512 mutex_unlock(&cma_mutex); 513 if (ret == 0) { 514 page = pfn_to_page(pfn); 515 break; 516 } 517 518 cma_clear_bitmap(cma, pfn, count); 519 if (ret != -EBUSY) 520 break; 521 522 pr_debug("%s(): memory range at %p is busy, retrying\n", 523 __func__, pfn_to_page(pfn)); 524 /* try again with a bit different memory target */ 525 start = bitmap_no + mask + 1; 526 } 527 528 trace_cma_alloc(pfn, page, count, align); 529 530 if (ret && !(gfp_mask & __GFP_NOWARN)) { 531 pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n", 532 __func__, count, ret); 533 cma_debug_show_areas(cma); 534 } 535 536 pr_debug("%s(): returned %p\n", __func__, page); 537 return page; 538 } 539 540 /** 541 * cma_release() - release allocated pages 542 * @cma: Contiguous memory region for which the allocation is performed. 543 * @pages: Allocated pages. 544 * @count: Number of allocated pages. 545 * 546 * This function releases memory allocated by alloc_cma(). 547 * It returns false when provided pages do not belong to contiguous area and 548 * true otherwise. 549 */ 550 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) 551 { 552 unsigned long pfn; 553 554 if (!cma || !pages) 555 return false; 556 557 pr_debug("%s(page %p)\n", __func__, (void *)pages); 558 559 pfn = page_to_pfn(pages); 560 561 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) 562 return false; 563 564 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 565 566 free_contig_range(pfn, count); 567 cma_clear_bitmap(cma, pfn, count); 568 trace_cma_release(pfn, pages, count); 569 570 return true; 571 } 572 573 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) 574 { 575 int i; 576 577 for (i = 0; i < cma_area_count; i++) { 578 int ret = it(&cma_areas[i], data); 579 580 if (ret) 581 return ret; 582 } 583 584 return 0; 585 } 586