1 /* 2 * Procedures for maintaining information about logical memory blocks. 3 * 4 * Peter Bergner, IBM Corp. June 2001. 5 * Copyright (C) 2001 Peter Bergner. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/bitops.h> 17 #include <linux/poison.h> 18 #include <linux/pfn.h> 19 #include <linux/debugfs.h> 20 #include <linux/seq_file.h> 21 #include <linux/memblock.h> 22 23 #include <asm-generic/sections.h> 24 #include <linux/io.h> 25 26 #include "internal.h" 27 28 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 29 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 30 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 31 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; 32 #endif 33 34 struct memblock memblock __initdata_memblock = { 35 .memory.regions = memblock_memory_init_regions, 36 .memory.cnt = 1, /* empty dummy entry */ 37 .memory.max = INIT_MEMBLOCK_REGIONS, 38 39 .reserved.regions = memblock_reserved_init_regions, 40 .reserved.cnt = 1, /* empty dummy entry */ 41 .reserved.max = INIT_MEMBLOCK_REGIONS, 42 43 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 44 .physmem.regions = memblock_physmem_init_regions, 45 .physmem.cnt = 1, /* empty dummy entry */ 46 .physmem.max = INIT_PHYSMEM_REGIONS, 47 #endif 48 49 .bottom_up = false, 50 .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 51 }; 52 53 int memblock_debug __initdata_memblock; 54 #ifdef CONFIG_MOVABLE_NODE 55 bool movable_node_enabled __initdata_memblock = false; 56 #endif 57 static int memblock_can_resize __initdata_memblock; 58 static int memblock_memory_in_slab __initdata_memblock = 0; 59 static int memblock_reserved_in_slab __initdata_memblock = 0; 60 61 /* inline so we don't get a warning when pr_debug is compiled out */ 62 static __init_memblock const char * 63 memblock_type_name(struct memblock_type *type) 64 { 65 if (type == &memblock.memory) 66 return "memory"; 67 else if (type == &memblock.reserved) 68 return "reserved"; 69 else 70 return "unknown"; 71 } 72 73 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 74 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 75 { 76 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base); 77 } 78 79 /* 80 * Address comparison utilities 81 */ 82 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 83 phys_addr_t base2, phys_addr_t size2) 84 { 85 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 86 } 87 88 static long __init_memblock memblock_overlaps_region(struct memblock_type *type, 89 phys_addr_t base, phys_addr_t size) 90 { 91 unsigned long i; 92 93 for (i = 0; i < type->cnt; i++) { 94 phys_addr_t rgnbase = type->regions[i].base; 95 phys_addr_t rgnsize = type->regions[i].size; 96 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) 97 break; 98 } 99 100 return (i < type->cnt) ? i : -1; 101 } 102 103 /* 104 * __memblock_find_range_bottom_up - find free area utility in bottom-up 105 * @start: start of candidate range 106 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 107 * @size: size of free area to find 108 * @align: alignment of free area to find 109 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 110 * 111 * Utility called from memblock_find_in_range_node(), find free area bottom-up. 112 * 113 * RETURNS: 114 * Found address on success, 0 on failure. 115 */ 116 static phys_addr_t __init_memblock 117 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 118 phys_addr_t size, phys_addr_t align, int nid) 119 { 120 phys_addr_t this_start, this_end, cand; 121 u64 i; 122 123 for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) { 124 this_start = clamp(this_start, start, end); 125 this_end = clamp(this_end, start, end); 126 127 cand = round_up(this_start, align); 128 if (cand < this_end && this_end - cand >= size) 129 return cand; 130 } 131 132 return 0; 133 } 134 135 /** 136 * __memblock_find_range_top_down - find free area utility, in top-down 137 * @start: start of candidate range 138 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 139 * @size: size of free area to find 140 * @align: alignment of free area to find 141 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 142 * 143 * Utility called from memblock_find_in_range_node(), find free area top-down. 144 * 145 * RETURNS: 146 * Found address on success, 0 on failure. 147 */ 148 static phys_addr_t __init_memblock 149 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 150 phys_addr_t size, phys_addr_t align, int nid) 151 { 152 phys_addr_t this_start, this_end, cand; 153 u64 i; 154 155 for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { 156 this_start = clamp(this_start, start, end); 157 this_end = clamp(this_end, start, end); 158 159 if (this_end < size) 160 continue; 161 162 cand = round_down(this_end - size, align); 163 if (cand >= this_start) 164 return cand; 165 } 166 167 return 0; 168 } 169 170 /** 171 * memblock_find_in_range_node - find free area in given range and node 172 * @size: size of free area to find 173 * @align: alignment of free area to find 174 * @start: start of candidate range 175 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 176 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 177 * 178 * Find @size free area aligned to @align in the specified range and node. 179 * 180 * When allocation direction is bottom-up, the @start should be greater 181 * than the end of the kernel image. Otherwise, it will be trimmed. The 182 * reason is that we want the bottom-up allocation just near the kernel 183 * image so it is highly likely that the allocated memory and the kernel 184 * will reside in the same node. 185 * 186 * If bottom-up allocation failed, will try to allocate memory top-down. 187 * 188 * RETURNS: 189 * Found address on success, 0 on failure. 190 */ 191 phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 192 phys_addr_t align, phys_addr_t start, 193 phys_addr_t end, int nid) 194 { 195 int ret; 196 phys_addr_t kernel_end; 197 198 /* pump up @end */ 199 if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 200 end = memblock.current_limit; 201 202 /* avoid allocating the first page */ 203 start = max_t(phys_addr_t, start, PAGE_SIZE); 204 end = max(start, end); 205 kernel_end = __pa_symbol(_end); 206 207 /* 208 * try bottom-up allocation only when bottom-up mode 209 * is set and @end is above the kernel image. 210 */ 211 if (memblock_bottom_up() && end > kernel_end) { 212 phys_addr_t bottom_up_start; 213 214 /* make sure we will allocate above the kernel */ 215 bottom_up_start = max(start, kernel_end); 216 217 /* ok, try bottom-up allocation first */ 218 ret = __memblock_find_range_bottom_up(bottom_up_start, end, 219 size, align, nid); 220 if (ret) 221 return ret; 222 223 /* 224 * we always limit bottom-up allocation above the kernel, 225 * but top-down allocation doesn't have the limit, so 226 * retrying top-down allocation may succeed when bottom-up 227 * allocation failed. 228 * 229 * bottom-up allocation is expected to be fail very rarely, 230 * so we use WARN_ONCE() here to see the stack trace if 231 * fail happens. 232 */ 233 WARN_ONCE(1, "memblock: bottom-up allocation failed, " 234 "memory hotunplug may be affected\n"); 235 } 236 237 return __memblock_find_range_top_down(start, end, size, align, nid); 238 } 239 240 /** 241 * memblock_find_in_range - find free area in given range 242 * @start: start of candidate range 243 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 244 * @size: size of free area to find 245 * @align: alignment of free area to find 246 * 247 * Find @size free area aligned to @align in the specified range. 248 * 249 * RETURNS: 250 * Found address on success, 0 on failure. 251 */ 252 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 253 phys_addr_t end, phys_addr_t size, 254 phys_addr_t align) 255 { 256 return memblock_find_in_range_node(size, align, start, end, 257 NUMA_NO_NODE); 258 } 259 260 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 261 { 262 type->total_size -= type->regions[r].size; 263 memmove(&type->regions[r], &type->regions[r + 1], 264 (type->cnt - (r + 1)) * sizeof(type->regions[r])); 265 type->cnt--; 266 267 /* Special case for empty arrays */ 268 if (type->cnt == 0) { 269 WARN_ON(type->total_size != 0); 270 type->cnt = 1; 271 type->regions[0].base = 0; 272 type->regions[0].size = 0; 273 type->regions[0].flags = 0; 274 memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 275 } 276 } 277 278 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK 279 280 phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( 281 phys_addr_t *addr) 282 { 283 if (memblock.reserved.regions == memblock_reserved_init_regions) 284 return 0; 285 286 *addr = __pa(memblock.reserved.regions); 287 288 return PAGE_ALIGN(sizeof(struct memblock_region) * 289 memblock.reserved.max); 290 } 291 292 phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info( 293 phys_addr_t *addr) 294 { 295 if (memblock.memory.regions == memblock_memory_init_regions) 296 return 0; 297 298 *addr = __pa(memblock.memory.regions); 299 300 return PAGE_ALIGN(sizeof(struct memblock_region) * 301 memblock.memory.max); 302 } 303 304 #endif 305 306 /** 307 * memblock_double_array - double the size of the memblock regions array 308 * @type: memblock type of the regions array being doubled 309 * @new_area_start: starting address of memory range to avoid overlap with 310 * @new_area_size: size of memory range to avoid overlap with 311 * 312 * Double the size of the @type regions array. If memblock is being used to 313 * allocate memory for a new reserved regions array and there is a previously 314 * allocated memory range [@new_area_start,@new_area_start+@new_area_size] 315 * waiting to be reserved, ensure the memory used by the new array does 316 * not overlap. 317 * 318 * RETURNS: 319 * 0 on success, -1 on failure. 320 */ 321 static int __init_memblock memblock_double_array(struct memblock_type *type, 322 phys_addr_t new_area_start, 323 phys_addr_t new_area_size) 324 { 325 struct memblock_region *new_array, *old_array; 326 phys_addr_t old_alloc_size, new_alloc_size; 327 phys_addr_t old_size, new_size, addr; 328 int use_slab = slab_is_available(); 329 int *in_slab; 330 331 /* We don't allow resizing until we know about the reserved regions 332 * of memory that aren't suitable for allocation 333 */ 334 if (!memblock_can_resize) 335 return -1; 336 337 /* Calculate new doubled size */ 338 old_size = type->max * sizeof(struct memblock_region); 339 new_size = old_size << 1; 340 /* 341 * We need to allocated new one align to PAGE_SIZE, 342 * so we can free them completely later. 343 */ 344 old_alloc_size = PAGE_ALIGN(old_size); 345 new_alloc_size = PAGE_ALIGN(new_size); 346 347 /* Retrieve the slab flag */ 348 if (type == &memblock.memory) 349 in_slab = &memblock_memory_in_slab; 350 else 351 in_slab = &memblock_reserved_in_slab; 352 353 /* Try to find some space for it. 354 * 355 * WARNING: We assume that either slab_is_available() and we use it or 356 * we use MEMBLOCK for allocations. That means that this is unsafe to 357 * use when bootmem is currently active (unless bootmem itself is 358 * implemented on top of MEMBLOCK which isn't the case yet) 359 * 360 * This should however not be an issue for now, as we currently only 361 * call into MEMBLOCK while it's still active, or much later when slab 362 * is active for memory hotplug operations 363 */ 364 if (use_slab) { 365 new_array = kmalloc(new_size, GFP_KERNEL); 366 addr = new_array ? __pa(new_array) : 0; 367 } else { 368 /* only exclude range when trying to double reserved.regions */ 369 if (type != &memblock.reserved) 370 new_area_start = new_area_size = 0; 371 372 addr = memblock_find_in_range(new_area_start + new_area_size, 373 memblock.current_limit, 374 new_alloc_size, PAGE_SIZE); 375 if (!addr && new_area_size) 376 addr = memblock_find_in_range(0, 377 min(new_area_start, memblock.current_limit), 378 new_alloc_size, PAGE_SIZE); 379 380 new_array = addr ? __va(addr) : NULL; 381 } 382 if (!addr) { 383 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 384 memblock_type_name(type), type->max, type->max * 2); 385 return -1; 386 } 387 388 memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]", 389 memblock_type_name(type), type->max * 2, (u64)addr, 390 (u64)addr + new_size - 1); 391 392 /* 393 * Found space, we now need to move the array over before we add the 394 * reserved region since it may be our reserved array itself that is 395 * full. 396 */ 397 memcpy(new_array, type->regions, old_size); 398 memset(new_array + type->max, 0, old_size); 399 old_array = type->regions; 400 type->regions = new_array; 401 type->max <<= 1; 402 403 /* Free old array. We needn't free it if the array is the static one */ 404 if (*in_slab) 405 kfree(old_array); 406 else if (old_array != memblock_memory_init_regions && 407 old_array != memblock_reserved_init_regions) 408 memblock_free(__pa(old_array), old_alloc_size); 409 410 /* 411 * Reserve the new array if that comes from the memblock. Otherwise, we 412 * needn't do it 413 */ 414 if (!use_slab) 415 BUG_ON(memblock_reserve(addr, new_alloc_size)); 416 417 /* Update slab flag */ 418 *in_slab = use_slab; 419 420 return 0; 421 } 422 423 /** 424 * memblock_merge_regions - merge neighboring compatible regions 425 * @type: memblock type to scan 426 * 427 * Scan @type and merge neighboring compatible regions. 428 */ 429 static void __init_memblock memblock_merge_regions(struct memblock_type *type) 430 { 431 int i = 0; 432 433 /* cnt never goes below 1 */ 434 while (i < type->cnt - 1) { 435 struct memblock_region *this = &type->regions[i]; 436 struct memblock_region *next = &type->regions[i + 1]; 437 438 if (this->base + this->size != next->base || 439 memblock_get_region_node(this) != 440 memblock_get_region_node(next) || 441 this->flags != next->flags) { 442 BUG_ON(this->base + this->size > next->base); 443 i++; 444 continue; 445 } 446 447 this->size += next->size; 448 /* move forward from next + 1, index of which is i + 2 */ 449 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); 450 type->cnt--; 451 } 452 } 453 454 /** 455 * memblock_insert_region - insert new memblock region 456 * @type: memblock type to insert into 457 * @idx: index for the insertion point 458 * @base: base address of the new region 459 * @size: size of the new region 460 * @nid: node id of the new region 461 * @flags: flags of the new region 462 * 463 * Insert new memblock region [@base,@base+@size) into @type at @idx. 464 * @type must already have extra room to accomodate the new region. 465 */ 466 static void __init_memblock memblock_insert_region(struct memblock_type *type, 467 int idx, phys_addr_t base, 468 phys_addr_t size, 469 int nid, unsigned long flags) 470 { 471 struct memblock_region *rgn = &type->regions[idx]; 472 473 BUG_ON(type->cnt >= type->max); 474 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 475 rgn->base = base; 476 rgn->size = size; 477 rgn->flags = flags; 478 memblock_set_region_node(rgn, nid); 479 type->cnt++; 480 type->total_size += size; 481 } 482 483 /** 484 * memblock_add_range - add new memblock region 485 * @type: memblock type to add new region into 486 * @base: base address of the new region 487 * @size: size of the new region 488 * @nid: nid of the new region 489 * @flags: flags of the new region 490 * 491 * Add new memblock region [@base,@base+@size) into @type. The new region 492 * is allowed to overlap with existing ones - overlaps don't affect already 493 * existing regions. @type is guaranteed to be minimal (all neighbouring 494 * compatible regions are merged) after the addition. 495 * 496 * RETURNS: 497 * 0 on success, -errno on failure. 498 */ 499 int __init_memblock memblock_add_range(struct memblock_type *type, 500 phys_addr_t base, phys_addr_t size, 501 int nid, unsigned long flags) 502 { 503 bool insert = false; 504 phys_addr_t obase = base; 505 phys_addr_t end = base + memblock_cap_size(base, &size); 506 int i, nr_new; 507 508 if (!size) 509 return 0; 510 511 /* special case for empty array */ 512 if (type->regions[0].size == 0) { 513 WARN_ON(type->cnt != 1 || type->total_size); 514 type->regions[0].base = base; 515 type->regions[0].size = size; 516 type->regions[0].flags = flags; 517 memblock_set_region_node(&type->regions[0], nid); 518 type->total_size = size; 519 return 0; 520 } 521 repeat: 522 /* 523 * The following is executed twice. Once with %false @insert and 524 * then with %true. The first counts the number of regions needed 525 * to accomodate the new area. The second actually inserts them. 526 */ 527 base = obase; 528 nr_new = 0; 529 530 for (i = 0; i < type->cnt; i++) { 531 struct memblock_region *rgn = &type->regions[i]; 532 phys_addr_t rbase = rgn->base; 533 phys_addr_t rend = rbase + rgn->size; 534 535 if (rbase >= end) 536 break; 537 if (rend <= base) 538 continue; 539 /* 540 * @rgn overlaps. If it separates the lower part of new 541 * area, insert that portion. 542 */ 543 if (rbase > base) { 544 nr_new++; 545 if (insert) 546 memblock_insert_region(type, i++, base, 547 rbase - base, nid, 548 flags); 549 } 550 /* area below @rend is dealt with, forget about it */ 551 base = min(rend, end); 552 } 553 554 /* insert the remaining portion */ 555 if (base < end) { 556 nr_new++; 557 if (insert) 558 memblock_insert_region(type, i, base, end - base, 559 nid, flags); 560 } 561 562 /* 563 * If this was the first round, resize array and repeat for actual 564 * insertions; otherwise, merge and return. 565 */ 566 if (!insert) { 567 while (type->cnt + nr_new > type->max) 568 if (memblock_double_array(type, obase, size) < 0) 569 return -ENOMEM; 570 insert = true; 571 goto repeat; 572 } else { 573 memblock_merge_regions(type); 574 return 0; 575 } 576 } 577 578 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 579 int nid) 580 { 581 return memblock_add_range(&memblock.memory, base, size, nid, 0); 582 } 583 584 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 585 { 586 return memblock_add_range(&memblock.memory, base, size, 587 MAX_NUMNODES, 0); 588 } 589 590 /** 591 * memblock_isolate_range - isolate given range into disjoint memblocks 592 * @type: memblock type to isolate range for 593 * @base: base of range to isolate 594 * @size: size of range to isolate 595 * @start_rgn: out parameter for the start of isolated region 596 * @end_rgn: out parameter for the end of isolated region 597 * 598 * Walk @type and ensure that regions don't cross the boundaries defined by 599 * [@base,@base+@size). Crossing regions are split at the boundaries, 600 * which may create at most two more regions. The index of the first 601 * region inside the range is returned in *@start_rgn and end in *@end_rgn. 602 * 603 * RETURNS: 604 * 0 on success, -errno on failure. 605 */ 606 static int __init_memblock memblock_isolate_range(struct memblock_type *type, 607 phys_addr_t base, phys_addr_t size, 608 int *start_rgn, int *end_rgn) 609 { 610 phys_addr_t end = base + memblock_cap_size(base, &size); 611 int i; 612 613 *start_rgn = *end_rgn = 0; 614 615 if (!size) 616 return 0; 617 618 /* we'll create at most two more regions */ 619 while (type->cnt + 2 > type->max) 620 if (memblock_double_array(type, base, size) < 0) 621 return -ENOMEM; 622 623 for (i = 0; i < type->cnt; i++) { 624 struct memblock_region *rgn = &type->regions[i]; 625 phys_addr_t rbase = rgn->base; 626 phys_addr_t rend = rbase + rgn->size; 627 628 if (rbase >= end) 629 break; 630 if (rend <= base) 631 continue; 632 633 if (rbase < base) { 634 /* 635 * @rgn intersects from below. Split and continue 636 * to process the next region - the new top half. 637 */ 638 rgn->base = base; 639 rgn->size -= base - rbase; 640 type->total_size -= base - rbase; 641 memblock_insert_region(type, i, rbase, base - rbase, 642 memblock_get_region_node(rgn), 643 rgn->flags); 644 } else if (rend > end) { 645 /* 646 * @rgn intersects from above. Split and redo the 647 * current region - the new bottom half. 648 */ 649 rgn->base = end; 650 rgn->size -= end - rbase; 651 type->total_size -= end - rbase; 652 memblock_insert_region(type, i--, rbase, end - rbase, 653 memblock_get_region_node(rgn), 654 rgn->flags); 655 } else { 656 /* @rgn is fully contained, record it */ 657 if (!*end_rgn) 658 *start_rgn = i; 659 *end_rgn = i + 1; 660 } 661 } 662 663 return 0; 664 } 665 666 int __init_memblock memblock_remove_range(struct memblock_type *type, 667 phys_addr_t base, phys_addr_t size) 668 { 669 int start_rgn, end_rgn; 670 int i, ret; 671 672 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 673 if (ret) 674 return ret; 675 676 for (i = end_rgn - 1; i >= start_rgn; i--) 677 memblock_remove_region(type, i); 678 return 0; 679 } 680 681 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 682 { 683 return memblock_remove_range(&memblock.memory, base, size); 684 } 685 686 687 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 688 { 689 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", 690 (unsigned long long)base, 691 (unsigned long long)base + size - 1, 692 (void *)_RET_IP_); 693 694 return memblock_remove_range(&memblock.reserved, base, size); 695 } 696 697 static int __init_memblock memblock_reserve_region(phys_addr_t base, 698 phys_addr_t size, 699 int nid, 700 unsigned long flags) 701 { 702 struct memblock_type *_rgn = &memblock.reserved; 703 704 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n", 705 (unsigned long long)base, 706 (unsigned long long)base + size - 1, 707 flags, (void *)_RET_IP_); 708 709 return memblock_add_range(_rgn, base, size, nid, flags); 710 } 711 712 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 713 { 714 return memblock_reserve_region(base, size, MAX_NUMNODES, 0); 715 } 716 717 /** 718 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. 719 * @base: the base phys addr of the region 720 * @size: the size of the region 721 * 722 * This function isolates region [@base, @base + @size), and mark it with flag 723 * MEMBLOCK_HOTPLUG. 724 * 725 * Return 0 on succees, -errno on failure. 726 */ 727 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) 728 { 729 struct memblock_type *type = &memblock.memory; 730 int i, ret, start_rgn, end_rgn; 731 732 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 733 if (ret) 734 return ret; 735 736 for (i = start_rgn; i < end_rgn; i++) 737 memblock_set_region_flags(&type->regions[i], MEMBLOCK_HOTPLUG); 738 739 memblock_merge_regions(type); 740 return 0; 741 } 742 743 /** 744 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. 745 * @base: the base phys addr of the region 746 * @size: the size of the region 747 * 748 * This function isolates region [@base, @base + @size), and clear flag 749 * MEMBLOCK_HOTPLUG for the isolated regions. 750 * 751 * Return 0 on succees, -errno on failure. 752 */ 753 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) 754 { 755 struct memblock_type *type = &memblock.memory; 756 int i, ret, start_rgn, end_rgn; 757 758 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 759 if (ret) 760 return ret; 761 762 for (i = start_rgn; i < end_rgn; i++) 763 memblock_clear_region_flags(&type->regions[i], 764 MEMBLOCK_HOTPLUG); 765 766 memblock_merge_regions(type); 767 return 0; 768 } 769 770 /** 771 * __next__mem_range - next function for for_each_free_mem_range() etc. 772 * @idx: pointer to u64 loop variable 773 * @nid: node selector, %NUMA_NO_NODE for all nodes 774 * @type_a: pointer to memblock_type from where the range is taken 775 * @type_b: pointer to memblock_type which excludes memory from being taken 776 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 777 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 778 * @out_nid: ptr to int for nid of the range, can be %NULL 779 * 780 * Find the first area from *@idx which matches @nid, fill the out 781 * parameters, and update *@idx for the next iteration. The lower 32bit of 782 * *@idx contains index into type_a and the upper 32bit indexes the 783 * areas before each region in type_b. For example, if type_b regions 784 * look like the following, 785 * 786 * 0:[0-16), 1:[32-48), 2:[128-130) 787 * 788 * The upper 32bit indexes the following regions. 789 * 790 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 791 * 792 * As both region arrays are sorted, the function advances the two indices 793 * in lockstep and returns each intersection. 794 */ 795 void __init_memblock __next_mem_range(u64 *idx, int nid, 796 struct memblock_type *type_a, 797 struct memblock_type *type_b, 798 phys_addr_t *out_start, 799 phys_addr_t *out_end, int *out_nid) 800 { 801 int idx_a = *idx & 0xffffffff; 802 int idx_b = *idx >> 32; 803 804 if (WARN_ONCE(nid == MAX_NUMNODES, 805 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 806 nid = NUMA_NO_NODE; 807 808 for (; idx_a < type_a->cnt; idx_a++) { 809 struct memblock_region *m = &type_a->regions[idx_a]; 810 811 phys_addr_t m_start = m->base; 812 phys_addr_t m_end = m->base + m->size; 813 int m_nid = memblock_get_region_node(m); 814 815 /* only memory regions are associated with nodes, check it */ 816 if (nid != NUMA_NO_NODE && nid != m_nid) 817 continue; 818 819 if (!type_b) { 820 if (out_start) 821 *out_start = m_start; 822 if (out_end) 823 *out_end = m_end; 824 if (out_nid) 825 *out_nid = m_nid; 826 idx_a++; 827 *idx = (u32)idx_a | (u64)idx_b << 32; 828 return; 829 } 830 831 /* scan areas before each reservation */ 832 for (; idx_b < type_b->cnt + 1; idx_b++) { 833 struct memblock_region *r; 834 phys_addr_t r_start; 835 phys_addr_t r_end; 836 837 r = &type_b->regions[idx_b]; 838 r_start = idx_b ? r[-1].base + r[-1].size : 0; 839 r_end = idx_b < type_b->cnt ? 840 r->base : ULLONG_MAX; 841 842 /* 843 * if idx_b advanced past idx_a, 844 * break out to advance idx_a 845 */ 846 if (r_start >= m_end) 847 break; 848 /* if the two regions intersect, we're done */ 849 if (m_start < r_end) { 850 if (out_start) 851 *out_start = 852 max(m_start, r_start); 853 if (out_end) 854 *out_end = min(m_end, r_end); 855 if (out_nid) 856 *out_nid = m_nid; 857 /* 858 * The region which ends first is 859 * advanced for the next iteration. 860 */ 861 if (m_end <= r_end) 862 idx_a++; 863 else 864 idx_b++; 865 *idx = (u32)idx_a | (u64)idx_b << 32; 866 return; 867 } 868 } 869 } 870 871 /* signal end of iteration */ 872 *idx = ULLONG_MAX; 873 } 874 875 /** 876 * __next_mem_range_rev - generic next function for for_each_*_range_rev() 877 * 878 * Finds the next range from type_a which is not marked as unsuitable 879 * in type_b. 880 * 881 * @idx: pointer to u64 loop variable 882 * @nid: nid: node selector, %NUMA_NO_NODE for all nodes 883 * @type_a: pointer to memblock_type from where the range is taken 884 * @type_b: pointer to memblock_type which excludes memory from being taken 885 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 886 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 887 * @out_nid: ptr to int for nid of the range, can be %NULL 888 * 889 * Reverse of __next_mem_range(). 890 */ 891 void __init_memblock __next_mem_range_rev(u64 *idx, int nid, 892 struct memblock_type *type_a, 893 struct memblock_type *type_b, 894 phys_addr_t *out_start, 895 phys_addr_t *out_end, int *out_nid) 896 { 897 int idx_a = *idx & 0xffffffff; 898 int idx_b = *idx >> 32; 899 900 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 901 nid = NUMA_NO_NODE; 902 903 if (*idx == (u64)ULLONG_MAX) { 904 idx_a = type_a->cnt - 1; 905 idx_b = type_b->cnt; 906 } 907 908 for (; idx_a >= 0; idx_a--) { 909 struct memblock_region *m = &type_a->regions[idx_a]; 910 911 phys_addr_t m_start = m->base; 912 phys_addr_t m_end = m->base + m->size; 913 int m_nid = memblock_get_region_node(m); 914 915 /* only memory regions are associated with nodes, check it */ 916 if (nid != NUMA_NO_NODE && nid != m_nid) 917 continue; 918 919 /* skip hotpluggable memory regions if needed */ 920 if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 921 continue; 922 923 if (!type_b) { 924 if (out_start) 925 *out_start = m_start; 926 if (out_end) 927 *out_end = m_end; 928 if (out_nid) 929 *out_nid = m_nid; 930 idx_a++; 931 *idx = (u32)idx_a | (u64)idx_b << 32; 932 return; 933 } 934 935 /* scan areas before each reservation */ 936 for (; idx_b >= 0; idx_b--) { 937 struct memblock_region *r; 938 phys_addr_t r_start; 939 phys_addr_t r_end; 940 941 r = &type_b->regions[idx_b]; 942 r_start = idx_b ? r[-1].base + r[-1].size : 0; 943 r_end = idx_b < type_b->cnt ? 944 r->base : ULLONG_MAX; 945 /* 946 * if idx_b advanced past idx_a, 947 * break out to advance idx_a 948 */ 949 950 if (r_end <= m_start) 951 break; 952 /* if the two regions intersect, we're done */ 953 if (m_end > r_start) { 954 if (out_start) 955 *out_start = max(m_start, r_start); 956 if (out_end) 957 *out_end = min(m_end, r_end); 958 if (out_nid) 959 *out_nid = m_nid; 960 if (m_start >= r_start) 961 idx_a--; 962 else 963 idx_b--; 964 *idx = (u32)idx_a | (u64)idx_b << 32; 965 return; 966 } 967 } 968 } 969 /* signal end of iteration */ 970 *idx = ULLONG_MAX; 971 } 972 973 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 974 /* 975 * Common iterator interface used to define for_each_mem_range(). 976 */ 977 void __init_memblock __next_mem_pfn_range(int *idx, int nid, 978 unsigned long *out_start_pfn, 979 unsigned long *out_end_pfn, int *out_nid) 980 { 981 struct memblock_type *type = &memblock.memory; 982 struct memblock_region *r; 983 984 while (++*idx < type->cnt) { 985 r = &type->regions[*idx]; 986 987 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 988 continue; 989 if (nid == MAX_NUMNODES || nid == r->nid) 990 break; 991 } 992 if (*idx >= type->cnt) { 993 *idx = -1; 994 return; 995 } 996 997 if (out_start_pfn) 998 *out_start_pfn = PFN_UP(r->base); 999 if (out_end_pfn) 1000 *out_end_pfn = PFN_DOWN(r->base + r->size); 1001 if (out_nid) 1002 *out_nid = r->nid; 1003 } 1004 1005 /** 1006 * memblock_set_node - set node ID on memblock regions 1007 * @base: base of area to set node ID for 1008 * @size: size of area to set node ID for 1009 * @type: memblock type to set node ID for 1010 * @nid: node ID to set 1011 * 1012 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid. 1013 * Regions which cross the area boundaries are split as necessary. 1014 * 1015 * RETURNS: 1016 * 0 on success, -errno on failure. 1017 */ 1018 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 1019 struct memblock_type *type, int nid) 1020 { 1021 int start_rgn, end_rgn; 1022 int i, ret; 1023 1024 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 1025 if (ret) 1026 return ret; 1027 1028 for (i = start_rgn; i < end_rgn; i++) 1029 memblock_set_region_node(&type->regions[i], nid); 1030 1031 memblock_merge_regions(type); 1032 return 0; 1033 } 1034 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1035 1036 static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 1037 phys_addr_t align, phys_addr_t start, 1038 phys_addr_t end, int nid) 1039 { 1040 phys_addr_t found; 1041 1042 if (!align) 1043 align = SMP_CACHE_BYTES; 1044 1045 found = memblock_find_in_range_node(size, align, start, end, nid); 1046 if (found && !memblock_reserve(found, size)) 1047 return found; 1048 1049 return 0; 1050 } 1051 1052 phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, 1053 phys_addr_t start, phys_addr_t end) 1054 { 1055 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE); 1056 } 1057 1058 static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, 1059 phys_addr_t align, phys_addr_t max_addr, 1060 int nid) 1061 { 1062 return memblock_alloc_range_nid(size, align, 0, max_addr, nid); 1063 } 1064 1065 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) 1066 { 1067 return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 1068 } 1069 1070 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1071 { 1072 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE); 1073 } 1074 1075 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1076 { 1077 phys_addr_t alloc; 1078 1079 alloc = __memblock_alloc_base(size, align, max_addr); 1080 1081 if (alloc == 0) 1082 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", 1083 (unsigned long long) size, (unsigned long long) max_addr); 1084 1085 return alloc; 1086 } 1087 1088 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) 1089 { 1090 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 1091 } 1092 1093 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 1094 { 1095 phys_addr_t res = memblock_alloc_nid(size, align, nid); 1096 1097 if (res) 1098 return res; 1099 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 1100 } 1101 1102 /** 1103 * memblock_virt_alloc_internal - allocate boot memory block 1104 * @size: size of memory block to be allocated in bytes 1105 * @align: alignment of the region and block's size 1106 * @min_addr: the lower bound of the memory region to allocate (phys address) 1107 * @max_addr: the upper bound of the memory region to allocate (phys address) 1108 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1109 * 1110 * The @min_addr limit is dropped if it can not be satisfied and the allocation 1111 * will fall back to memory below @min_addr. Also, allocation may fall back 1112 * to any node in the system if the specified node can not 1113 * hold the requested memory. 1114 * 1115 * The allocation is performed from memory region limited by 1116 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. 1117 * 1118 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0. 1119 * 1120 * The phys address of allocated boot memory block is converted to virtual and 1121 * allocated memory is reset to 0. 1122 * 1123 * In addition, function sets the min_count to 0 using kmemleak_alloc for 1124 * allocated boot memory block, so that it is never reported as leaks. 1125 * 1126 * RETURNS: 1127 * Virtual address of allocated memory block on success, NULL on failure. 1128 */ 1129 static void * __init memblock_virt_alloc_internal( 1130 phys_addr_t size, phys_addr_t align, 1131 phys_addr_t min_addr, phys_addr_t max_addr, 1132 int nid) 1133 { 1134 phys_addr_t alloc; 1135 void *ptr; 1136 1137 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1138 nid = NUMA_NO_NODE; 1139 1140 /* 1141 * Detect any accidental use of these APIs after slab is ready, as at 1142 * this moment memblock may be deinitialized already and its 1143 * internal data may be destroyed (after execution of free_all_bootmem) 1144 */ 1145 if (WARN_ON_ONCE(slab_is_available())) 1146 return kzalloc_node(size, GFP_NOWAIT, nid); 1147 1148 if (!align) 1149 align = SMP_CACHE_BYTES; 1150 1151 if (max_addr > memblock.current_limit) 1152 max_addr = memblock.current_limit; 1153 1154 again: 1155 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, 1156 nid); 1157 if (alloc) 1158 goto done; 1159 1160 if (nid != NUMA_NO_NODE) { 1161 alloc = memblock_find_in_range_node(size, align, min_addr, 1162 max_addr, NUMA_NO_NODE); 1163 if (alloc) 1164 goto done; 1165 } 1166 1167 if (min_addr) { 1168 min_addr = 0; 1169 goto again; 1170 } else { 1171 goto error; 1172 } 1173 1174 done: 1175 memblock_reserve(alloc, size); 1176 ptr = phys_to_virt(alloc); 1177 memset(ptr, 0, size); 1178 1179 /* 1180 * The min_count is set to 0 so that bootmem allocated blocks 1181 * are never reported as leaks. This is because many of these blocks 1182 * are only referred via the physical address which is not 1183 * looked up by kmemleak. 1184 */ 1185 kmemleak_alloc(ptr, size, 0, 0); 1186 1187 return ptr; 1188 1189 error: 1190 return NULL; 1191 } 1192 1193 /** 1194 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block 1195 * @size: size of memory block to be allocated in bytes 1196 * @align: alignment of the region and block's size 1197 * @min_addr: the lower bound of the memory region from where the allocation 1198 * is preferred (phys address) 1199 * @max_addr: the upper bound of the memory region from where the allocation 1200 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1201 * allocate only from memory limited by memblock.current_limit value 1202 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1203 * 1204 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides 1205 * additional debug information (including caller info), if enabled. 1206 * 1207 * RETURNS: 1208 * Virtual address of allocated memory block on success, NULL on failure. 1209 */ 1210 void * __init memblock_virt_alloc_try_nid_nopanic( 1211 phys_addr_t size, phys_addr_t align, 1212 phys_addr_t min_addr, phys_addr_t max_addr, 1213 int nid) 1214 { 1215 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 1216 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1217 (u64)max_addr, (void *)_RET_IP_); 1218 return memblock_virt_alloc_internal(size, align, min_addr, 1219 max_addr, nid); 1220 } 1221 1222 /** 1223 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking 1224 * @size: size of memory block to be allocated in bytes 1225 * @align: alignment of the region and block's size 1226 * @min_addr: the lower bound of the memory region from where the allocation 1227 * is preferred (phys address) 1228 * @max_addr: the upper bound of the memory region from where the allocation 1229 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1230 * allocate only from memory limited by memblock.current_limit value 1231 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1232 * 1233 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic() 1234 * which provides debug information (including caller info), if enabled, 1235 * and panics if the request can not be satisfied. 1236 * 1237 * RETURNS: 1238 * Virtual address of allocated memory block on success, NULL on failure. 1239 */ 1240 void * __init memblock_virt_alloc_try_nid( 1241 phys_addr_t size, phys_addr_t align, 1242 phys_addr_t min_addr, phys_addr_t max_addr, 1243 int nid) 1244 { 1245 void *ptr; 1246 1247 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 1248 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1249 (u64)max_addr, (void *)_RET_IP_); 1250 ptr = memblock_virt_alloc_internal(size, align, 1251 min_addr, max_addr, nid); 1252 if (ptr) 1253 return ptr; 1254 1255 panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n", 1256 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1257 (u64)max_addr); 1258 return NULL; 1259 } 1260 1261 /** 1262 * __memblock_free_early - free boot memory block 1263 * @base: phys starting address of the boot memory block 1264 * @size: size of the boot memory block in bytes 1265 * 1266 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API. 1267 * The freeing memory will not be released to the buddy allocator. 1268 */ 1269 void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) 1270 { 1271 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", 1272 __func__, (u64)base, (u64)base + size - 1, 1273 (void *)_RET_IP_); 1274 kmemleak_free_part(__va(base), size); 1275 memblock_remove_range(&memblock.reserved, base, size); 1276 } 1277 1278 /* 1279 * __memblock_free_late - free bootmem block pages directly to buddy allocator 1280 * @addr: phys starting address of the boot memory block 1281 * @size: size of the boot memory block in bytes 1282 * 1283 * This is only useful when the bootmem allocator has already been torn 1284 * down, but we are still initializing the system. Pages are released directly 1285 * to the buddy allocator, no bootmem metadata is updated because it is gone. 1286 */ 1287 void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) 1288 { 1289 u64 cursor, end; 1290 1291 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", 1292 __func__, (u64)base, (u64)base + size - 1, 1293 (void *)_RET_IP_); 1294 kmemleak_free_part(__va(base), size); 1295 cursor = PFN_UP(base); 1296 end = PFN_DOWN(base + size); 1297 1298 for (; cursor < end; cursor++) { 1299 __free_pages_bootmem(pfn_to_page(cursor), 0); 1300 totalram_pages++; 1301 } 1302 } 1303 1304 /* 1305 * Remaining API functions 1306 */ 1307 1308 phys_addr_t __init memblock_phys_mem_size(void) 1309 { 1310 return memblock.memory.total_size; 1311 } 1312 1313 phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) 1314 { 1315 unsigned long pages = 0; 1316 struct memblock_region *r; 1317 unsigned long start_pfn, end_pfn; 1318 1319 for_each_memblock(memory, r) { 1320 start_pfn = memblock_region_memory_base_pfn(r); 1321 end_pfn = memblock_region_memory_end_pfn(r); 1322 start_pfn = min_t(unsigned long, start_pfn, limit_pfn); 1323 end_pfn = min_t(unsigned long, end_pfn, limit_pfn); 1324 pages += end_pfn - start_pfn; 1325 } 1326 1327 return PFN_PHYS(pages); 1328 } 1329 1330 /* lowest address */ 1331 phys_addr_t __init_memblock memblock_start_of_DRAM(void) 1332 { 1333 return memblock.memory.regions[0].base; 1334 } 1335 1336 phys_addr_t __init_memblock memblock_end_of_DRAM(void) 1337 { 1338 int idx = memblock.memory.cnt - 1; 1339 1340 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 1341 } 1342 1343 void __init memblock_enforce_memory_limit(phys_addr_t limit) 1344 { 1345 phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; 1346 struct memblock_region *r; 1347 1348 if (!limit) 1349 return; 1350 1351 /* find out max address */ 1352 for_each_memblock(memory, r) { 1353 if (limit <= r->size) { 1354 max_addr = r->base + limit; 1355 break; 1356 } 1357 limit -= r->size; 1358 } 1359 1360 /* truncate both memory and reserved regions */ 1361 memblock_remove_range(&memblock.memory, max_addr, 1362 (phys_addr_t)ULLONG_MAX); 1363 memblock_remove_range(&memblock.reserved, max_addr, 1364 (phys_addr_t)ULLONG_MAX); 1365 } 1366 1367 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 1368 { 1369 unsigned int left = 0, right = type->cnt; 1370 1371 do { 1372 unsigned int mid = (right + left) / 2; 1373 1374 if (addr < type->regions[mid].base) 1375 right = mid; 1376 else if (addr >= (type->regions[mid].base + 1377 type->regions[mid].size)) 1378 left = mid + 1; 1379 else 1380 return mid; 1381 } while (left < right); 1382 return -1; 1383 } 1384 1385 int __init memblock_is_reserved(phys_addr_t addr) 1386 { 1387 return memblock_search(&memblock.reserved, addr) != -1; 1388 } 1389 1390 int __init_memblock memblock_is_memory(phys_addr_t addr) 1391 { 1392 return memblock_search(&memblock.memory, addr) != -1; 1393 } 1394 1395 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1396 int __init_memblock memblock_search_pfn_nid(unsigned long pfn, 1397 unsigned long *start_pfn, unsigned long *end_pfn) 1398 { 1399 struct memblock_type *type = &memblock.memory; 1400 int mid = memblock_search(type, PFN_PHYS(pfn)); 1401 1402 if (mid == -1) 1403 return -1; 1404 1405 *start_pfn = PFN_DOWN(type->regions[mid].base); 1406 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); 1407 1408 return type->regions[mid].nid; 1409 } 1410 #endif 1411 1412 /** 1413 * memblock_is_region_memory - check if a region is a subset of memory 1414 * @base: base of region to check 1415 * @size: size of region to check 1416 * 1417 * Check if the region [@base, @base+@size) is a subset of a memory block. 1418 * 1419 * RETURNS: 1420 * 0 if false, non-zero if true 1421 */ 1422 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 1423 { 1424 int idx = memblock_search(&memblock.memory, base); 1425 phys_addr_t end = base + memblock_cap_size(base, &size); 1426 1427 if (idx == -1) 1428 return 0; 1429 return memblock.memory.regions[idx].base <= base && 1430 (memblock.memory.regions[idx].base + 1431 memblock.memory.regions[idx].size) >= end; 1432 } 1433 1434 /** 1435 * memblock_is_region_reserved - check if a region intersects reserved memory 1436 * @base: base of region to check 1437 * @size: size of region to check 1438 * 1439 * Check if the region [@base, @base+@size) intersects a reserved memory block. 1440 * 1441 * RETURNS: 1442 * 0 if false, non-zero if true 1443 */ 1444 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 1445 { 1446 memblock_cap_size(base, &size); 1447 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 1448 } 1449 1450 void __init_memblock memblock_trim_memory(phys_addr_t align) 1451 { 1452 phys_addr_t start, end, orig_start, orig_end; 1453 struct memblock_region *r; 1454 1455 for_each_memblock(memory, r) { 1456 orig_start = r->base; 1457 orig_end = r->base + r->size; 1458 start = round_up(orig_start, align); 1459 end = round_down(orig_end, align); 1460 1461 if (start == orig_start && end == orig_end) 1462 continue; 1463 1464 if (start < end) { 1465 r->base = start; 1466 r->size = end - start; 1467 } else { 1468 memblock_remove_region(&memblock.memory, 1469 r - memblock.memory.regions); 1470 r--; 1471 } 1472 } 1473 } 1474 1475 void __init_memblock memblock_set_current_limit(phys_addr_t limit) 1476 { 1477 memblock.current_limit = limit; 1478 } 1479 1480 phys_addr_t __init_memblock memblock_get_current_limit(void) 1481 { 1482 return memblock.current_limit; 1483 } 1484 1485 static void __init_memblock memblock_dump(struct memblock_type *type, char *name) 1486 { 1487 unsigned long long base, size; 1488 unsigned long flags; 1489 int i; 1490 1491 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); 1492 1493 for (i = 0; i < type->cnt; i++) { 1494 struct memblock_region *rgn = &type->regions[i]; 1495 char nid_buf[32] = ""; 1496 1497 base = rgn->base; 1498 size = rgn->size; 1499 flags = rgn->flags; 1500 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1501 if (memblock_get_region_node(rgn) != MAX_NUMNODES) 1502 snprintf(nid_buf, sizeof(nid_buf), " on node %d", 1503 memblock_get_region_node(rgn)); 1504 #endif 1505 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n", 1506 name, i, base, base + size - 1, size, nid_buf, flags); 1507 } 1508 } 1509 1510 void __init_memblock __memblock_dump_all(void) 1511 { 1512 pr_info("MEMBLOCK configuration:\n"); 1513 pr_info(" memory size = %#llx reserved size = %#llx\n", 1514 (unsigned long long)memblock.memory.total_size, 1515 (unsigned long long)memblock.reserved.total_size); 1516 1517 memblock_dump(&memblock.memory, "memory"); 1518 memblock_dump(&memblock.reserved, "reserved"); 1519 } 1520 1521 void __init memblock_allow_resize(void) 1522 { 1523 memblock_can_resize = 1; 1524 } 1525 1526 static int __init early_memblock(char *p) 1527 { 1528 if (p && strstr(p, "debug")) 1529 memblock_debug = 1; 1530 return 0; 1531 } 1532 early_param("memblock", early_memblock); 1533 1534 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) 1535 1536 static int memblock_debug_show(struct seq_file *m, void *private) 1537 { 1538 struct memblock_type *type = m->private; 1539 struct memblock_region *reg; 1540 int i; 1541 1542 for (i = 0; i < type->cnt; i++) { 1543 reg = &type->regions[i]; 1544 seq_printf(m, "%4d: ", i); 1545 if (sizeof(phys_addr_t) == 4) 1546 seq_printf(m, "0x%08lx..0x%08lx\n", 1547 (unsigned long)reg->base, 1548 (unsigned long)(reg->base + reg->size - 1)); 1549 else 1550 seq_printf(m, "0x%016llx..0x%016llx\n", 1551 (unsigned long long)reg->base, 1552 (unsigned long long)(reg->base + reg->size - 1)); 1553 1554 } 1555 return 0; 1556 } 1557 1558 static int memblock_debug_open(struct inode *inode, struct file *file) 1559 { 1560 return single_open(file, memblock_debug_show, inode->i_private); 1561 } 1562 1563 static const struct file_operations memblock_debug_fops = { 1564 .open = memblock_debug_open, 1565 .read = seq_read, 1566 .llseek = seq_lseek, 1567 .release = single_release, 1568 }; 1569 1570 static int __init memblock_init_debugfs(void) 1571 { 1572 struct dentry *root = debugfs_create_dir("memblock", NULL); 1573 if (!root) 1574 return -ENXIO; 1575 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); 1576 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); 1577 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 1578 debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops); 1579 #endif 1580 1581 return 0; 1582 } 1583 __initcall(memblock_init_debugfs); 1584 1585 #endif /* CONFIG_DEBUG_FS */ 1586