1 /* 2 * Procedures for maintaining information about logical memory blocks. 3 * 4 * Peter Bergner, IBM Corp. June 2001. 5 * Copyright (C) 2001 Peter Bergner. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/bitops.h> 17 #include <linux/poison.h> 18 #include <linux/pfn.h> 19 #include <linux/debugfs.h> 20 #include <linux/seq_file.h> 21 #include <linux/memblock.h> 22 23 #include <asm-generic/sections.h> 24 #include <linux/io.h> 25 26 #include "internal.h" 27 28 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 29 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 30 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 31 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; 32 #endif 33 34 struct memblock memblock __initdata_memblock = { 35 .memory.regions = memblock_memory_init_regions, 36 .memory.cnt = 1, /* empty dummy entry */ 37 .memory.max = INIT_MEMBLOCK_REGIONS, 38 39 .reserved.regions = memblock_reserved_init_regions, 40 .reserved.cnt = 1, /* empty dummy entry */ 41 .reserved.max = INIT_MEMBLOCK_REGIONS, 42 43 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 44 .physmem.regions = memblock_physmem_init_regions, 45 .physmem.cnt = 1, /* empty dummy entry */ 46 .physmem.max = INIT_PHYSMEM_REGIONS, 47 #endif 48 49 .bottom_up = false, 50 .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 51 }; 52 53 int memblock_debug __initdata_memblock; 54 #ifdef CONFIG_MOVABLE_NODE 55 bool movable_node_enabled __initdata_memblock = false; 56 #endif 57 static bool system_has_some_mirror __initdata_memblock = false; 58 static int memblock_can_resize __initdata_memblock; 59 static int memblock_memory_in_slab __initdata_memblock = 0; 60 static int memblock_reserved_in_slab __initdata_memblock = 0; 61 62 ulong __init_memblock choose_memblock_flags(void) 63 { 64 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; 65 } 66 67 /* inline so we don't get a warning when pr_debug is compiled out */ 68 static __init_memblock const char * 69 memblock_type_name(struct memblock_type *type) 70 { 71 if (type == &memblock.memory) 72 return "memory"; 73 else if (type == &memblock.reserved) 74 return "reserved"; 75 else 76 return "unknown"; 77 } 78 79 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 80 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 81 { 82 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base); 83 } 84 85 /* 86 * Address comparison utilities 87 */ 88 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 89 phys_addr_t base2, phys_addr_t size2) 90 { 91 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 92 } 93 94 static long __init_memblock memblock_overlaps_region(struct memblock_type *type, 95 phys_addr_t base, phys_addr_t size) 96 { 97 unsigned long i; 98 99 for (i = 0; i < type->cnt; i++) { 100 phys_addr_t rgnbase = type->regions[i].base; 101 phys_addr_t rgnsize = type->regions[i].size; 102 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) 103 break; 104 } 105 106 return (i < type->cnt) ? i : -1; 107 } 108 109 /* 110 * __memblock_find_range_bottom_up - find free area utility in bottom-up 111 * @start: start of candidate range 112 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 113 * @size: size of free area to find 114 * @align: alignment of free area to find 115 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 116 * @flags: pick from blocks based on memory attributes 117 * 118 * Utility called from memblock_find_in_range_node(), find free area bottom-up. 119 * 120 * RETURNS: 121 * Found address on success, 0 on failure. 122 */ 123 static phys_addr_t __init_memblock 124 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 125 phys_addr_t size, phys_addr_t align, int nid, 126 ulong flags) 127 { 128 phys_addr_t this_start, this_end, cand; 129 u64 i; 130 131 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { 132 this_start = clamp(this_start, start, end); 133 this_end = clamp(this_end, start, end); 134 135 cand = round_up(this_start, align); 136 if (cand < this_end && this_end - cand >= size) 137 return cand; 138 } 139 140 return 0; 141 } 142 143 /** 144 * __memblock_find_range_top_down - find free area utility, in top-down 145 * @start: start of candidate range 146 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 147 * @size: size of free area to find 148 * @align: alignment of free area to find 149 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 150 * @flags: pick from blocks based on memory attributes 151 * 152 * Utility called from memblock_find_in_range_node(), find free area top-down. 153 * 154 * RETURNS: 155 * Found address on success, 0 on failure. 156 */ 157 static phys_addr_t __init_memblock 158 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 159 phys_addr_t size, phys_addr_t align, int nid, 160 ulong flags) 161 { 162 phys_addr_t this_start, this_end, cand; 163 u64 i; 164 165 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, 166 NULL) { 167 this_start = clamp(this_start, start, end); 168 this_end = clamp(this_end, start, end); 169 170 if (this_end < size) 171 continue; 172 173 cand = round_down(this_end - size, align); 174 if (cand >= this_start) 175 return cand; 176 } 177 178 return 0; 179 } 180 181 /** 182 * memblock_find_in_range_node - find free area in given range and node 183 * @size: size of free area to find 184 * @align: alignment of free area to find 185 * @start: start of candidate range 186 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 187 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 188 * @flags: pick from blocks based on memory attributes 189 * 190 * Find @size free area aligned to @align in the specified range and node. 191 * 192 * When allocation direction is bottom-up, the @start should be greater 193 * than the end of the kernel image. Otherwise, it will be trimmed. The 194 * reason is that we want the bottom-up allocation just near the kernel 195 * image so it is highly likely that the allocated memory and the kernel 196 * will reside in the same node. 197 * 198 * If bottom-up allocation failed, will try to allocate memory top-down. 199 * 200 * RETURNS: 201 * Found address on success, 0 on failure. 202 */ 203 phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 204 phys_addr_t align, phys_addr_t start, 205 phys_addr_t end, int nid, ulong flags) 206 { 207 phys_addr_t kernel_end, ret; 208 209 /* pump up @end */ 210 if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 211 end = memblock.current_limit; 212 213 /* avoid allocating the first page */ 214 start = max_t(phys_addr_t, start, PAGE_SIZE); 215 end = max(start, end); 216 kernel_end = __pa_symbol(_end); 217 218 /* 219 * try bottom-up allocation only when bottom-up mode 220 * is set and @end is above the kernel image. 221 */ 222 if (memblock_bottom_up() && end > kernel_end) { 223 phys_addr_t bottom_up_start; 224 225 /* make sure we will allocate above the kernel */ 226 bottom_up_start = max(start, kernel_end); 227 228 /* ok, try bottom-up allocation first */ 229 ret = __memblock_find_range_bottom_up(bottom_up_start, end, 230 size, align, nid, flags); 231 if (ret) 232 return ret; 233 234 /* 235 * we always limit bottom-up allocation above the kernel, 236 * but top-down allocation doesn't have the limit, so 237 * retrying top-down allocation may succeed when bottom-up 238 * allocation failed. 239 * 240 * bottom-up allocation is expected to be fail very rarely, 241 * so we use WARN_ONCE() here to see the stack trace if 242 * fail happens. 243 */ 244 WARN_ONCE(1, "memblock: bottom-up allocation failed, " 245 "memory hotunplug may be affected\n"); 246 } 247 248 return __memblock_find_range_top_down(start, end, size, align, nid, 249 flags); 250 } 251 252 /** 253 * memblock_find_in_range - find free area in given range 254 * @start: start of candidate range 255 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 256 * @size: size of free area to find 257 * @align: alignment of free area to find 258 * 259 * Find @size free area aligned to @align in the specified range. 260 * 261 * RETURNS: 262 * Found address on success, 0 on failure. 263 */ 264 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 265 phys_addr_t end, phys_addr_t size, 266 phys_addr_t align) 267 { 268 phys_addr_t ret; 269 ulong flags = choose_memblock_flags(); 270 271 again: 272 ret = memblock_find_in_range_node(size, align, start, end, 273 NUMA_NO_NODE, flags); 274 275 if (!ret && (flags & MEMBLOCK_MIRROR)) { 276 pr_warn("Could not allocate %pap bytes of mirrored memory\n", 277 &size); 278 flags &= ~MEMBLOCK_MIRROR; 279 goto again; 280 } 281 282 return ret; 283 } 284 285 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 286 { 287 type->total_size -= type->regions[r].size; 288 memmove(&type->regions[r], &type->regions[r + 1], 289 (type->cnt - (r + 1)) * sizeof(type->regions[r])); 290 type->cnt--; 291 292 /* Special case for empty arrays */ 293 if (type->cnt == 0) { 294 WARN_ON(type->total_size != 0); 295 type->cnt = 1; 296 type->regions[0].base = 0; 297 type->regions[0].size = 0; 298 type->regions[0].flags = 0; 299 memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 300 } 301 } 302 303 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK 304 305 phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( 306 phys_addr_t *addr) 307 { 308 if (memblock.reserved.regions == memblock_reserved_init_regions) 309 return 0; 310 311 *addr = __pa(memblock.reserved.regions); 312 313 return PAGE_ALIGN(sizeof(struct memblock_region) * 314 memblock.reserved.max); 315 } 316 317 phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info( 318 phys_addr_t *addr) 319 { 320 if (memblock.memory.regions == memblock_memory_init_regions) 321 return 0; 322 323 *addr = __pa(memblock.memory.regions); 324 325 return PAGE_ALIGN(sizeof(struct memblock_region) * 326 memblock.memory.max); 327 } 328 329 #endif 330 331 /** 332 * memblock_double_array - double the size of the memblock regions array 333 * @type: memblock type of the regions array being doubled 334 * @new_area_start: starting address of memory range to avoid overlap with 335 * @new_area_size: size of memory range to avoid overlap with 336 * 337 * Double the size of the @type regions array. If memblock is being used to 338 * allocate memory for a new reserved regions array and there is a previously 339 * allocated memory range [@new_area_start,@new_area_start+@new_area_size] 340 * waiting to be reserved, ensure the memory used by the new array does 341 * not overlap. 342 * 343 * RETURNS: 344 * 0 on success, -1 on failure. 345 */ 346 static int __init_memblock memblock_double_array(struct memblock_type *type, 347 phys_addr_t new_area_start, 348 phys_addr_t new_area_size) 349 { 350 struct memblock_region *new_array, *old_array; 351 phys_addr_t old_alloc_size, new_alloc_size; 352 phys_addr_t old_size, new_size, addr; 353 int use_slab = slab_is_available(); 354 int *in_slab; 355 356 /* We don't allow resizing until we know about the reserved regions 357 * of memory that aren't suitable for allocation 358 */ 359 if (!memblock_can_resize) 360 return -1; 361 362 /* Calculate new doubled size */ 363 old_size = type->max * sizeof(struct memblock_region); 364 new_size = old_size << 1; 365 /* 366 * We need to allocated new one align to PAGE_SIZE, 367 * so we can free them completely later. 368 */ 369 old_alloc_size = PAGE_ALIGN(old_size); 370 new_alloc_size = PAGE_ALIGN(new_size); 371 372 /* Retrieve the slab flag */ 373 if (type == &memblock.memory) 374 in_slab = &memblock_memory_in_slab; 375 else 376 in_slab = &memblock_reserved_in_slab; 377 378 /* Try to find some space for it. 379 * 380 * WARNING: We assume that either slab_is_available() and we use it or 381 * we use MEMBLOCK for allocations. That means that this is unsafe to 382 * use when bootmem is currently active (unless bootmem itself is 383 * implemented on top of MEMBLOCK which isn't the case yet) 384 * 385 * This should however not be an issue for now, as we currently only 386 * call into MEMBLOCK while it's still active, or much later when slab 387 * is active for memory hotplug operations 388 */ 389 if (use_slab) { 390 new_array = kmalloc(new_size, GFP_KERNEL); 391 addr = new_array ? __pa(new_array) : 0; 392 } else { 393 /* only exclude range when trying to double reserved.regions */ 394 if (type != &memblock.reserved) 395 new_area_start = new_area_size = 0; 396 397 addr = memblock_find_in_range(new_area_start + new_area_size, 398 memblock.current_limit, 399 new_alloc_size, PAGE_SIZE); 400 if (!addr && new_area_size) 401 addr = memblock_find_in_range(0, 402 min(new_area_start, memblock.current_limit), 403 new_alloc_size, PAGE_SIZE); 404 405 new_array = addr ? __va(addr) : NULL; 406 } 407 if (!addr) { 408 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 409 memblock_type_name(type), type->max, type->max * 2); 410 return -1; 411 } 412 413 memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]", 414 memblock_type_name(type), type->max * 2, (u64)addr, 415 (u64)addr + new_size - 1); 416 417 /* 418 * Found space, we now need to move the array over before we add the 419 * reserved region since it may be our reserved array itself that is 420 * full. 421 */ 422 memcpy(new_array, type->regions, old_size); 423 memset(new_array + type->max, 0, old_size); 424 old_array = type->regions; 425 type->regions = new_array; 426 type->max <<= 1; 427 428 /* Free old array. We needn't free it if the array is the static one */ 429 if (*in_slab) 430 kfree(old_array); 431 else if (old_array != memblock_memory_init_regions && 432 old_array != memblock_reserved_init_regions) 433 memblock_free(__pa(old_array), old_alloc_size); 434 435 /* 436 * Reserve the new array if that comes from the memblock. Otherwise, we 437 * needn't do it 438 */ 439 if (!use_slab) 440 BUG_ON(memblock_reserve(addr, new_alloc_size)); 441 442 /* Update slab flag */ 443 *in_slab = use_slab; 444 445 return 0; 446 } 447 448 /** 449 * memblock_merge_regions - merge neighboring compatible regions 450 * @type: memblock type to scan 451 * 452 * Scan @type and merge neighboring compatible regions. 453 */ 454 static void __init_memblock memblock_merge_regions(struct memblock_type *type) 455 { 456 int i = 0; 457 458 /* cnt never goes below 1 */ 459 while (i < type->cnt - 1) { 460 struct memblock_region *this = &type->regions[i]; 461 struct memblock_region *next = &type->regions[i + 1]; 462 463 if (this->base + this->size != next->base || 464 memblock_get_region_node(this) != 465 memblock_get_region_node(next) || 466 this->flags != next->flags) { 467 BUG_ON(this->base + this->size > next->base); 468 i++; 469 continue; 470 } 471 472 this->size += next->size; 473 /* move forward from next + 1, index of which is i + 2 */ 474 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); 475 type->cnt--; 476 } 477 } 478 479 /** 480 * memblock_insert_region - insert new memblock region 481 * @type: memblock type to insert into 482 * @idx: index for the insertion point 483 * @base: base address of the new region 484 * @size: size of the new region 485 * @nid: node id of the new region 486 * @flags: flags of the new region 487 * 488 * Insert new memblock region [@base,@base+@size) into @type at @idx. 489 * @type must already have extra room to accomodate the new region. 490 */ 491 static void __init_memblock memblock_insert_region(struct memblock_type *type, 492 int idx, phys_addr_t base, 493 phys_addr_t size, 494 int nid, unsigned long flags) 495 { 496 struct memblock_region *rgn = &type->regions[idx]; 497 498 BUG_ON(type->cnt >= type->max); 499 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 500 rgn->base = base; 501 rgn->size = size; 502 rgn->flags = flags; 503 memblock_set_region_node(rgn, nid); 504 type->cnt++; 505 type->total_size += size; 506 } 507 508 /** 509 * memblock_add_range - add new memblock region 510 * @type: memblock type to add new region into 511 * @base: base address of the new region 512 * @size: size of the new region 513 * @nid: nid of the new region 514 * @flags: flags of the new region 515 * 516 * Add new memblock region [@base,@base+@size) into @type. The new region 517 * is allowed to overlap with existing ones - overlaps don't affect already 518 * existing regions. @type is guaranteed to be minimal (all neighbouring 519 * compatible regions are merged) after the addition. 520 * 521 * RETURNS: 522 * 0 on success, -errno on failure. 523 */ 524 int __init_memblock memblock_add_range(struct memblock_type *type, 525 phys_addr_t base, phys_addr_t size, 526 int nid, unsigned long flags) 527 { 528 bool insert = false; 529 phys_addr_t obase = base; 530 phys_addr_t end = base + memblock_cap_size(base, &size); 531 int i, nr_new; 532 533 if (!size) 534 return 0; 535 536 /* special case for empty array */ 537 if (type->regions[0].size == 0) { 538 WARN_ON(type->cnt != 1 || type->total_size); 539 type->regions[0].base = base; 540 type->regions[0].size = size; 541 type->regions[0].flags = flags; 542 memblock_set_region_node(&type->regions[0], nid); 543 type->total_size = size; 544 return 0; 545 } 546 repeat: 547 /* 548 * The following is executed twice. Once with %false @insert and 549 * then with %true. The first counts the number of regions needed 550 * to accomodate the new area. The second actually inserts them. 551 */ 552 base = obase; 553 nr_new = 0; 554 555 for (i = 0; i < type->cnt; i++) { 556 struct memblock_region *rgn = &type->regions[i]; 557 phys_addr_t rbase = rgn->base; 558 phys_addr_t rend = rbase + rgn->size; 559 560 if (rbase >= end) 561 break; 562 if (rend <= base) 563 continue; 564 /* 565 * @rgn overlaps. If it separates the lower part of new 566 * area, insert that portion. 567 */ 568 if (rbase > base) { 569 nr_new++; 570 if (insert) 571 memblock_insert_region(type, i++, base, 572 rbase - base, nid, 573 flags); 574 } 575 /* area below @rend is dealt with, forget about it */ 576 base = min(rend, end); 577 } 578 579 /* insert the remaining portion */ 580 if (base < end) { 581 nr_new++; 582 if (insert) 583 memblock_insert_region(type, i, base, end - base, 584 nid, flags); 585 } 586 587 /* 588 * If this was the first round, resize array and repeat for actual 589 * insertions; otherwise, merge and return. 590 */ 591 if (!insert) { 592 while (type->cnt + nr_new > type->max) 593 if (memblock_double_array(type, obase, size) < 0) 594 return -ENOMEM; 595 insert = true; 596 goto repeat; 597 } else { 598 memblock_merge_regions(type); 599 return 0; 600 } 601 } 602 603 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 604 int nid) 605 { 606 return memblock_add_range(&memblock.memory, base, size, nid, 0); 607 } 608 609 static int __init_memblock memblock_add_region(phys_addr_t base, 610 phys_addr_t size, 611 int nid, 612 unsigned long flags) 613 { 614 struct memblock_type *_rgn = &memblock.memory; 615 616 memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n", 617 (unsigned long long)base, 618 (unsigned long long)base + size - 1, 619 flags, (void *)_RET_IP_); 620 621 return memblock_add_range(_rgn, base, size, nid, flags); 622 } 623 624 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 625 { 626 return memblock_add_region(base, size, MAX_NUMNODES, 0); 627 } 628 629 /** 630 * memblock_isolate_range - isolate given range into disjoint memblocks 631 * @type: memblock type to isolate range for 632 * @base: base of range to isolate 633 * @size: size of range to isolate 634 * @start_rgn: out parameter for the start of isolated region 635 * @end_rgn: out parameter for the end of isolated region 636 * 637 * Walk @type and ensure that regions don't cross the boundaries defined by 638 * [@base,@base+@size). Crossing regions are split at the boundaries, 639 * which may create at most two more regions. The index of the first 640 * region inside the range is returned in *@start_rgn and end in *@end_rgn. 641 * 642 * RETURNS: 643 * 0 on success, -errno on failure. 644 */ 645 static int __init_memblock memblock_isolate_range(struct memblock_type *type, 646 phys_addr_t base, phys_addr_t size, 647 int *start_rgn, int *end_rgn) 648 { 649 phys_addr_t end = base + memblock_cap_size(base, &size); 650 int i; 651 652 *start_rgn = *end_rgn = 0; 653 654 if (!size) 655 return 0; 656 657 /* we'll create at most two more regions */ 658 while (type->cnt + 2 > type->max) 659 if (memblock_double_array(type, base, size) < 0) 660 return -ENOMEM; 661 662 for (i = 0; i < type->cnt; i++) { 663 struct memblock_region *rgn = &type->regions[i]; 664 phys_addr_t rbase = rgn->base; 665 phys_addr_t rend = rbase + rgn->size; 666 667 if (rbase >= end) 668 break; 669 if (rend <= base) 670 continue; 671 672 if (rbase < base) { 673 /* 674 * @rgn intersects from below. Split and continue 675 * to process the next region - the new top half. 676 */ 677 rgn->base = base; 678 rgn->size -= base - rbase; 679 type->total_size -= base - rbase; 680 memblock_insert_region(type, i, rbase, base - rbase, 681 memblock_get_region_node(rgn), 682 rgn->flags); 683 } else if (rend > end) { 684 /* 685 * @rgn intersects from above. Split and redo the 686 * current region - the new bottom half. 687 */ 688 rgn->base = end; 689 rgn->size -= end - rbase; 690 type->total_size -= end - rbase; 691 memblock_insert_region(type, i--, rbase, end - rbase, 692 memblock_get_region_node(rgn), 693 rgn->flags); 694 } else { 695 /* @rgn is fully contained, record it */ 696 if (!*end_rgn) 697 *start_rgn = i; 698 *end_rgn = i + 1; 699 } 700 } 701 702 return 0; 703 } 704 705 int __init_memblock memblock_remove_range(struct memblock_type *type, 706 phys_addr_t base, phys_addr_t size) 707 { 708 int start_rgn, end_rgn; 709 int i, ret; 710 711 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 712 if (ret) 713 return ret; 714 715 for (i = end_rgn - 1; i >= start_rgn; i--) 716 memblock_remove_region(type, i); 717 return 0; 718 } 719 720 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 721 { 722 return memblock_remove_range(&memblock.memory, base, size); 723 } 724 725 726 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 727 { 728 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", 729 (unsigned long long)base, 730 (unsigned long long)base + size - 1, 731 (void *)_RET_IP_); 732 733 kmemleak_free_part(__va(base), size); 734 return memblock_remove_range(&memblock.reserved, base, size); 735 } 736 737 static int __init_memblock memblock_reserve_region(phys_addr_t base, 738 phys_addr_t size, 739 int nid, 740 unsigned long flags) 741 { 742 struct memblock_type *type = &memblock.reserved; 743 744 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n", 745 (unsigned long long)base, 746 (unsigned long long)base + size - 1, 747 flags, (void *)_RET_IP_); 748 749 return memblock_add_range(type, base, size, nid, flags); 750 } 751 752 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 753 { 754 return memblock_reserve_region(base, size, MAX_NUMNODES, 0); 755 } 756 757 /** 758 * 759 * This function isolates region [@base, @base + @size), and sets/clears flag 760 * 761 * Return 0 on succees, -errno on failure. 762 */ 763 static int __init_memblock memblock_setclr_flag(phys_addr_t base, 764 phys_addr_t size, int set, int flag) 765 { 766 struct memblock_type *type = &memblock.memory; 767 int i, ret, start_rgn, end_rgn; 768 769 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 770 if (ret) 771 return ret; 772 773 for (i = start_rgn; i < end_rgn; i++) 774 if (set) 775 memblock_set_region_flags(&type->regions[i], flag); 776 else 777 memblock_clear_region_flags(&type->regions[i], flag); 778 779 memblock_merge_regions(type); 780 return 0; 781 } 782 783 /** 784 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. 785 * @base: the base phys addr of the region 786 * @size: the size of the region 787 * 788 * Return 0 on succees, -errno on failure. 789 */ 790 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) 791 { 792 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); 793 } 794 795 /** 796 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. 797 * @base: the base phys addr of the region 798 * @size: the size of the region 799 * 800 * Return 0 on succees, -errno on failure. 801 */ 802 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) 803 { 804 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); 805 } 806 807 /** 808 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. 809 * @base: the base phys addr of the region 810 * @size: the size of the region 811 * 812 * Return 0 on succees, -errno on failure. 813 */ 814 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) 815 { 816 system_has_some_mirror = true; 817 818 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); 819 } 820 821 822 /** 823 * __next__mem_range - next function for for_each_free_mem_range() etc. 824 * @idx: pointer to u64 loop variable 825 * @nid: node selector, %NUMA_NO_NODE for all nodes 826 * @flags: pick from blocks based on memory attributes 827 * @type_a: pointer to memblock_type from where the range is taken 828 * @type_b: pointer to memblock_type which excludes memory from being taken 829 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 830 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 831 * @out_nid: ptr to int for nid of the range, can be %NULL 832 * 833 * Find the first area from *@idx which matches @nid, fill the out 834 * parameters, and update *@idx for the next iteration. The lower 32bit of 835 * *@idx contains index into type_a and the upper 32bit indexes the 836 * areas before each region in type_b. For example, if type_b regions 837 * look like the following, 838 * 839 * 0:[0-16), 1:[32-48), 2:[128-130) 840 * 841 * The upper 32bit indexes the following regions. 842 * 843 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 844 * 845 * As both region arrays are sorted, the function advances the two indices 846 * in lockstep and returns each intersection. 847 */ 848 void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags, 849 struct memblock_type *type_a, 850 struct memblock_type *type_b, 851 phys_addr_t *out_start, 852 phys_addr_t *out_end, int *out_nid) 853 { 854 int idx_a = *idx & 0xffffffff; 855 int idx_b = *idx >> 32; 856 857 if (WARN_ONCE(nid == MAX_NUMNODES, 858 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 859 nid = NUMA_NO_NODE; 860 861 for (; idx_a < type_a->cnt; idx_a++) { 862 struct memblock_region *m = &type_a->regions[idx_a]; 863 864 phys_addr_t m_start = m->base; 865 phys_addr_t m_end = m->base + m->size; 866 int m_nid = memblock_get_region_node(m); 867 868 /* only memory regions are associated with nodes, check it */ 869 if (nid != NUMA_NO_NODE && nid != m_nid) 870 continue; 871 872 /* skip hotpluggable memory regions if needed */ 873 if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 874 continue; 875 876 /* if we want mirror memory skip non-mirror memory regions */ 877 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 878 continue; 879 880 if (!type_b) { 881 if (out_start) 882 *out_start = m_start; 883 if (out_end) 884 *out_end = m_end; 885 if (out_nid) 886 *out_nid = m_nid; 887 idx_a++; 888 *idx = (u32)idx_a | (u64)idx_b << 32; 889 return; 890 } 891 892 /* scan areas before each reservation */ 893 for (; idx_b < type_b->cnt + 1; idx_b++) { 894 struct memblock_region *r; 895 phys_addr_t r_start; 896 phys_addr_t r_end; 897 898 r = &type_b->regions[idx_b]; 899 r_start = idx_b ? r[-1].base + r[-1].size : 0; 900 r_end = idx_b < type_b->cnt ? 901 r->base : ULLONG_MAX; 902 903 /* 904 * if idx_b advanced past idx_a, 905 * break out to advance idx_a 906 */ 907 if (r_start >= m_end) 908 break; 909 /* if the two regions intersect, we're done */ 910 if (m_start < r_end) { 911 if (out_start) 912 *out_start = 913 max(m_start, r_start); 914 if (out_end) 915 *out_end = min(m_end, r_end); 916 if (out_nid) 917 *out_nid = m_nid; 918 /* 919 * The region which ends first is 920 * advanced for the next iteration. 921 */ 922 if (m_end <= r_end) 923 idx_a++; 924 else 925 idx_b++; 926 *idx = (u32)idx_a | (u64)idx_b << 32; 927 return; 928 } 929 } 930 } 931 932 /* signal end of iteration */ 933 *idx = ULLONG_MAX; 934 } 935 936 /** 937 * __next_mem_range_rev - generic next function for for_each_*_range_rev() 938 * 939 * Finds the next range from type_a which is not marked as unsuitable 940 * in type_b. 941 * 942 * @idx: pointer to u64 loop variable 943 * @nid: nid: node selector, %NUMA_NO_NODE for all nodes 944 * @flags: pick from blocks based on memory attributes 945 * @type_a: pointer to memblock_type from where the range is taken 946 * @type_b: pointer to memblock_type which excludes memory from being taken 947 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 948 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 949 * @out_nid: ptr to int for nid of the range, can be %NULL 950 * 951 * Reverse of __next_mem_range(). 952 */ 953 void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags, 954 struct memblock_type *type_a, 955 struct memblock_type *type_b, 956 phys_addr_t *out_start, 957 phys_addr_t *out_end, int *out_nid) 958 { 959 int idx_a = *idx & 0xffffffff; 960 int idx_b = *idx >> 32; 961 962 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 963 nid = NUMA_NO_NODE; 964 965 if (*idx == (u64)ULLONG_MAX) { 966 idx_a = type_a->cnt - 1; 967 idx_b = type_b->cnt; 968 } 969 970 for (; idx_a >= 0; idx_a--) { 971 struct memblock_region *m = &type_a->regions[idx_a]; 972 973 phys_addr_t m_start = m->base; 974 phys_addr_t m_end = m->base + m->size; 975 int m_nid = memblock_get_region_node(m); 976 977 /* only memory regions are associated with nodes, check it */ 978 if (nid != NUMA_NO_NODE && nid != m_nid) 979 continue; 980 981 /* skip hotpluggable memory regions if needed */ 982 if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 983 continue; 984 985 /* if we want mirror memory skip non-mirror memory regions */ 986 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 987 continue; 988 989 if (!type_b) { 990 if (out_start) 991 *out_start = m_start; 992 if (out_end) 993 *out_end = m_end; 994 if (out_nid) 995 *out_nid = m_nid; 996 idx_a++; 997 *idx = (u32)idx_a | (u64)idx_b << 32; 998 return; 999 } 1000 1001 /* scan areas before each reservation */ 1002 for (; idx_b >= 0; idx_b--) { 1003 struct memblock_region *r; 1004 phys_addr_t r_start; 1005 phys_addr_t r_end; 1006 1007 r = &type_b->regions[idx_b]; 1008 r_start = idx_b ? r[-1].base + r[-1].size : 0; 1009 r_end = idx_b < type_b->cnt ? 1010 r->base : ULLONG_MAX; 1011 /* 1012 * if idx_b advanced past idx_a, 1013 * break out to advance idx_a 1014 */ 1015 1016 if (r_end <= m_start) 1017 break; 1018 /* if the two regions intersect, we're done */ 1019 if (m_end > r_start) { 1020 if (out_start) 1021 *out_start = max(m_start, r_start); 1022 if (out_end) 1023 *out_end = min(m_end, r_end); 1024 if (out_nid) 1025 *out_nid = m_nid; 1026 if (m_start >= r_start) 1027 idx_a--; 1028 else 1029 idx_b--; 1030 *idx = (u32)idx_a | (u64)idx_b << 32; 1031 return; 1032 } 1033 } 1034 } 1035 /* signal end of iteration */ 1036 *idx = ULLONG_MAX; 1037 } 1038 1039 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1040 /* 1041 * Common iterator interface used to define for_each_mem_range(). 1042 */ 1043 void __init_memblock __next_mem_pfn_range(int *idx, int nid, 1044 unsigned long *out_start_pfn, 1045 unsigned long *out_end_pfn, int *out_nid) 1046 { 1047 struct memblock_type *type = &memblock.memory; 1048 struct memblock_region *r; 1049 1050 while (++*idx < type->cnt) { 1051 r = &type->regions[*idx]; 1052 1053 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 1054 continue; 1055 if (nid == MAX_NUMNODES || nid == r->nid) 1056 break; 1057 } 1058 if (*idx >= type->cnt) { 1059 *idx = -1; 1060 return; 1061 } 1062 1063 if (out_start_pfn) 1064 *out_start_pfn = PFN_UP(r->base); 1065 if (out_end_pfn) 1066 *out_end_pfn = PFN_DOWN(r->base + r->size); 1067 if (out_nid) 1068 *out_nid = r->nid; 1069 } 1070 1071 /** 1072 * memblock_set_node - set node ID on memblock regions 1073 * @base: base of area to set node ID for 1074 * @size: size of area to set node ID for 1075 * @type: memblock type to set node ID for 1076 * @nid: node ID to set 1077 * 1078 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid. 1079 * Regions which cross the area boundaries are split as necessary. 1080 * 1081 * RETURNS: 1082 * 0 on success, -errno on failure. 1083 */ 1084 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 1085 struct memblock_type *type, int nid) 1086 { 1087 int start_rgn, end_rgn; 1088 int i, ret; 1089 1090 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 1091 if (ret) 1092 return ret; 1093 1094 for (i = start_rgn; i < end_rgn; i++) 1095 memblock_set_region_node(&type->regions[i], nid); 1096 1097 memblock_merge_regions(type); 1098 return 0; 1099 } 1100 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1101 1102 static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 1103 phys_addr_t align, phys_addr_t start, 1104 phys_addr_t end, int nid, ulong flags) 1105 { 1106 phys_addr_t found; 1107 1108 if (!align) 1109 align = SMP_CACHE_BYTES; 1110 1111 found = memblock_find_in_range_node(size, align, start, end, nid, 1112 flags); 1113 if (found && !memblock_reserve(found, size)) { 1114 /* 1115 * The min_count is set to 0 so that memblock allocations are 1116 * never reported as leaks. 1117 */ 1118 kmemleak_alloc(__va(found), size, 0, 0); 1119 return found; 1120 } 1121 return 0; 1122 } 1123 1124 phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, 1125 phys_addr_t start, phys_addr_t end, 1126 ulong flags) 1127 { 1128 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, 1129 flags); 1130 } 1131 1132 static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, 1133 phys_addr_t align, phys_addr_t max_addr, 1134 int nid, ulong flags) 1135 { 1136 return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags); 1137 } 1138 1139 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) 1140 { 1141 ulong flags = choose_memblock_flags(); 1142 phys_addr_t ret; 1143 1144 again: 1145 ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, 1146 nid, flags); 1147 1148 if (!ret && (flags & MEMBLOCK_MIRROR)) { 1149 flags &= ~MEMBLOCK_MIRROR; 1150 goto again; 1151 } 1152 return ret; 1153 } 1154 1155 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1156 { 1157 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE, 1158 MEMBLOCK_NONE); 1159 } 1160 1161 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1162 { 1163 phys_addr_t alloc; 1164 1165 alloc = __memblock_alloc_base(size, align, max_addr); 1166 1167 if (alloc == 0) 1168 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", 1169 (unsigned long long) size, (unsigned long long) max_addr); 1170 1171 return alloc; 1172 } 1173 1174 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) 1175 { 1176 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 1177 } 1178 1179 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 1180 { 1181 phys_addr_t res = memblock_alloc_nid(size, align, nid); 1182 1183 if (res) 1184 return res; 1185 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 1186 } 1187 1188 /** 1189 * memblock_virt_alloc_internal - allocate boot memory block 1190 * @size: size of memory block to be allocated in bytes 1191 * @align: alignment of the region and block's size 1192 * @min_addr: the lower bound of the memory region to allocate (phys address) 1193 * @max_addr: the upper bound of the memory region to allocate (phys address) 1194 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1195 * 1196 * The @min_addr limit is dropped if it can not be satisfied and the allocation 1197 * will fall back to memory below @min_addr. Also, allocation may fall back 1198 * to any node in the system if the specified node can not 1199 * hold the requested memory. 1200 * 1201 * The allocation is performed from memory region limited by 1202 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. 1203 * 1204 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0. 1205 * 1206 * The phys address of allocated boot memory block is converted to virtual and 1207 * allocated memory is reset to 0. 1208 * 1209 * In addition, function sets the min_count to 0 using kmemleak_alloc for 1210 * allocated boot memory block, so that it is never reported as leaks. 1211 * 1212 * RETURNS: 1213 * Virtual address of allocated memory block on success, NULL on failure. 1214 */ 1215 static void * __init memblock_virt_alloc_internal( 1216 phys_addr_t size, phys_addr_t align, 1217 phys_addr_t min_addr, phys_addr_t max_addr, 1218 int nid) 1219 { 1220 phys_addr_t alloc; 1221 void *ptr; 1222 ulong flags = choose_memblock_flags(); 1223 1224 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1225 nid = NUMA_NO_NODE; 1226 1227 /* 1228 * Detect any accidental use of these APIs after slab is ready, as at 1229 * this moment memblock may be deinitialized already and its 1230 * internal data may be destroyed (after execution of free_all_bootmem) 1231 */ 1232 if (WARN_ON_ONCE(slab_is_available())) 1233 return kzalloc_node(size, GFP_NOWAIT, nid); 1234 1235 if (!align) 1236 align = SMP_CACHE_BYTES; 1237 1238 if (max_addr > memblock.current_limit) 1239 max_addr = memblock.current_limit; 1240 1241 again: 1242 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, 1243 nid, flags); 1244 if (alloc) 1245 goto done; 1246 1247 if (nid != NUMA_NO_NODE) { 1248 alloc = memblock_find_in_range_node(size, align, min_addr, 1249 max_addr, NUMA_NO_NODE, 1250 flags); 1251 if (alloc) 1252 goto done; 1253 } 1254 1255 if (min_addr) { 1256 min_addr = 0; 1257 goto again; 1258 } 1259 1260 if (flags & MEMBLOCK_MIRROR) { 1261 flags &= ~MEMBLOCK_MIRROR; 1262 pr_warn("Could not allocate %pap bytes of mirrored memory\n", 1263 &size); 1264 goto again; 1265 } 1266 1267 return NULL; 1268 done: 1269 memblock_reserve(alloc, size); 1270 ptr = phys_to_virt(alloc); 1271 memset(ptr, 0, size); 1272 1273 /* 1274 * The min_count is set to 0 so that bootmem allocated blocks 1275 * are never reported as leaks. This is because many of these blocks 1276 * are only referred via the physical address which is not 1277 * looked up by kmemleak. 1278 */ 1279 kmemleak_alloc(ptr, size, 0, 0); 1280 1281 return ptr; 1282 } 1283 1284 /** 1285 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block 1286 * @size: size of memory block to be allocated in bytes 1287 * @align: alignment of the region and block's size 1288 * @min_addr: the lower bound of the memory region from where the allocation 1289 * is preferred (phys address) 1290 * @max_addr: the upper bound of the memory region from where the allocation 1291 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1292 * allocate only from memory limited by memblock.current_limit value 1293 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1294 * 1295 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides 1296 * additional debug information (including caller info), if enabled. 1297 * 1298 * RETURNS: 1299 * Virtual address of allocated memory block on success, NULL on failure. 1300 */ 1301 void * __init memblock_virt_alloc_try_nid_nopanic( 1302 phys_addr_t size, phys_addr_t align, 1303 phys_addr_t min_addr, phys_addr_t max_addr, 1304 int nid) 1305 { 1306 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 1307 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1308 (u64)max_addr, (void *)_RET_IP_); 1309 return memblock_virt_alloc_internal(size, align, min_addr, 1310 max_addr, nid); 1311 } 1312 1313 /** 1314 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking 1315 * @size: size of memory block to be allocated in bytes 1316 * @align: alignment of the region and block's size 1317 * @min_addr: the lower bound of the memory region from where the allocation 1318 * is preferred (phys address) 1319 * @max_addr: the upper bound of the memory region from where the allocation 1320 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1321 * allocate only from memory limited by memblock.current_limit value 1322 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1323 * 1324 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic() 1325 * which provides debug information (including caller info), if enabled, 1326 * and panics if the request can not be satisfied. 1327 * 1328 * RETURNS: 1329 * Virtual address of allocated memory block on success, NULL on failure. 1330 */ 1331 void * __init memblock_virt_alloc_try_nid( 1332 phys_addr_t size, phys_addr_t align, 1333 phys_addr_t min_addr, phys_addr_t max_addr, 1334 int nid) 1335 { 1336 void *ptr; 1337 1338 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 1339 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1340 (u64)max_addr, (void *)_RET_IP_); 1341 ptr = memblock_virt_alloc_internal(size, align, 1342 min_addr, max_addr, nid); 1343 if (ptr) 1344 return ptr; 1345 1346 panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n", 1347 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1348 (u64)max_addr); 1349 return NULL; 1350 } 1351 1352 /** 1353 * __memblock_free_early - free boot memory block 1354 * @base: phys starting address of the boot memory block 1355 * @size: size of the boot memory block in bytes 1356 * 1357 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API. 1358 * The freeing memory will not be released to the buddy allocator. 1359 */ 1360 void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) 1361 { 1362 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", 1363 __func__, (u64)base, (u64)base + size - 1, 1364 (void *)_RET_IP_); 1365 kmemleak_free_part(__va(base), size); 1366 memblock_remove_range(&memblock.reserved, base, size); 1367 } 1368 1369 /* 1370 * __memblock_free_late - free bootmem block pages directly to buddy allocator 1371 * @addr: phys starting address of the boot memory block 1372 * @size: size of the boot memory block in bytes 1373 * 1374 * This is only useful when the bootmem allocator has already been torn 1375 * down, but we are still initializing the system. Pages are released directly 1376 * to the buddy allocator, no bootmem metadata is updated because it is gone. 1377 */ 1378 void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) 1379 { 1380 u64 cursor, end; 1381 1382 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", 1383 __func__, (u64)base, (u64)base + size - 1, 1384 (void *)_RET_IP_); 1385 kmemleak_free_part(__va(base), size); 1386 cursor = PFN_UP(base); 1387 end = PFN_DOWN(base + size); 1388 1389 for (; cursor < end; cursor++) { 1390 __free_pages_bootmem(pfn_to_page(cursor), 0); 1391 totalram_pages++; 1392 } 1393 } 1394 1395 /* 1396 * Remaining API functions 1397 */ 1398 1399 phys_addr_t __init memblock_phys_mem_size(void) 1400 { 1401 return memblock.memory.total_size; 1402 } 1403 1404 phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) 1405 { 1406 unsigned long pages = 0; 1407 struct memblock_region *r; 1408 unsigned long start_pfn, end_pfn; 1409 1410 for_each_memblock(memory, r) { 1411 start_pfn = memblock_region_memory_base_pfn(r); 1412 end_pfn = memblock_region_memory_end_pfn(r); 1413 start_pfn = min_t(unsigned long, start_pfn, limit_pfn); 1414 end_pfn = min_t(unsigned long, end_pfn, limit_pfn); 1415 pages += end_pfn - start_pfn; 1416 } 1417 1418 return PFN_PHYS(pages); 1419 } 1420 1421 /* lowest address */ 1422 phys_addr_t __init_memblock memblock_start_of_DRAM(void) 1423 { 1424 return memblock.memory.regions[0].base; 1425 } 1426 1427 phys_addr_t __init_memblock memblock_end_of_DRAM(void) 1428 { 1429 int idx = memblock.memory.cnt - 1; 1430 1431 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 1432 } 1433 1434 void __init memblock_enforce_memory_limit(phys_addr_t limit) 1435 { 1436 phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; 1437 struct memblock_region *r; 1438 1439 if (!limit) 1440 return; 1441 1442 /* find out max address */ 1443 for_each_memblock(memory, r) { 1444 if (limit <= r->size) { 1445 max_addr = r->base + limit; 1446 break; 1447 } 1448 limit -= r->size; 1449 } 1450 1451 /* truncate both memory and reserved regions */ 1452 memblock_remove_range(&memblock.memory, max_addr, 1453 (phys_addr_t)ULLONG_MAX); 1454 memblock_remove_range(&memblock.reserved, max_addr, 1455 (phys_addr_t)ULLONG_MAX); 1456 } 1457 1458 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 1459 { 1460 unsigned int left = 0, right = type->cnt; 1461 1462 do { 1463 unsigned int mid = (right + left) / 2; 1464 1465 if (addr < type->regions[mid].base) 1466 right = mid; 1467 else if (addr >= (type->regions[mid].base + 1468 type->regions[mid].size)) 1469 left = mid + 1; 1470 else 1471 return mid; 1472 } while (left < right); 1473 return -1; 1474 } 1475 1476 int __init memblock_is_reserved(phys_addr_t addr) 1477 { 1478 return memblock_search(&memblock.reserved, addr) != -1; 1479 } 1480 1481 int __init_memblock memblock_is_memory(phys_addr_t addr) 1482 { 1483 return memblock_search(&memblock.memory, addr) != -1; 1484 } 1485 1486 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1487 int __init_memblock memblock_search_pfn_nid(unsigned long pfn, 1488 unsigned long *start_pfn, unsigned long *end_pfn) 1489 { 1490 struct memblock_type *type = &memblock.memory; 1491 int mid = memblock_search(type, PFN_PHYS(pfn)); 1492 1493 if (mid == -1) 1494 return -1; 1495 1496 *start_pfn = PFN_DOWN(type->regions[mid].base); 1497 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); 1498 1499 return type->regions[mid].nid; 1500 } 1501 #endif 1502 1503 /** 1504 * memblock_is_region_memory - check if a region is a subset of memory 1505 * @base: base of region to check 1506 * @size: size of region to check 1507 * 1508 * Check if the region [@base, @base+@size) is a subset of a memory block. 1509 * 1510 * RETURNS: 1511 * 0 if false, non-zero if true 1512 */ 1513 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 1514 { 1515 int idx = memblock_search(&memblock.memory, base); 1516 phys_addr_t end = base + memblock_cap_size(base, &size); 1517 1518 if (idx == -1) 1519 return 0; 1520 return memblock.memory.regions[idx].base <= base && 1521 (memblock.memory.regions[idx].base + 1522 memblock.memory.regions[idx].size) >= end; 1523 } 1524 1525 /** 1526 * memblock_is_region_reserved - check if a region intersects reserved memory 1527 * @base: base of region to check 1528 * @size: size of region to check 1529 * 1530 * Check if the region [@base, @base+@size) intersects a reserved memory block. 1531 * 1532 * RETURNS: 1533 * 0 if false, non-zero if true 1534 */ 1535 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 1536 { 1537 memblock_cap_size(base, &size); 1538 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 1539 } 1540 1541 void __init_memblock memblock_trim_memory(phys_addr_t align) 1542 { 1543 phys_addr_t start, end, orig_start, orig_end; 1544 struct memblock_region *r; 1545 1546 for_each_memblock(memory, r) { 1547 orig_start = r->base; 1548 orig_end = r->base + r->size; 1549 start = round_up(orig_start, align); 1550 end = round_down(orig_end, align); 1551 1552 if (start == orig_start && end == orig_end) 1553 continue; 1554 1555 if (start < end) { 1556 r->base = start; 1557 r->size = end - start; 1558 } else { 1559 memblock_remove_region(&memblock.memory, 1560 r - memblock.memory.regions); 1561 r--; 1562 } 1563 } 1564 } 1565 1566 void __init_memblock memblock_set_current_limit(phys_addr_t limit) 1567 { 1568 memblock.current_limit = limit; 1569 } 1570 1571 phys_addr_t __init_memblock memblock_get_current_limit(void) 1572 { 1573 return memblock.current_limit; 1574 } 1575 1576 static void __init_memblock memblock_dump(struct memblock_type *type, char *name) 1577 { 1578 unsigned long long base, size; 1579 unsigned long flags; 1580 int i; 1581 1582 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); 1583 1584 for (i = 0; i < type->cnt; i++) { 1585 struct memblock_region *rgn = &type->regions[i]; 1586 char nid_buf[32] = ""; 1587 1588 base = rgn->base; 1589 size = rgn->size; 1590 flags = rgn->flags; 1591 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1592 if (memblock_get_region_node(rgn) != MAX_NUMNODES) 1593 snprintf(nid_buf, sizeof(nid_buf), " on node %d", 1594 memblock_get_region_node(rgn)); 1595 #endif 1596 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n", 1597 name, i, base, base + size - 1, size, nid_buf, flags); 1598 } 1599 } 1600 1601 void __init_memblock __memblock_dump_all(void) 1602 { 1603 pr_info("MEMBLOCK configuration:\n"); 1604 pr_info(" memory size = %#llx reserved size = %#llx\n", 1605 (unsigned long long)memblock.memory.total_size, 1606 (unsigned long long)memblock.reserved.total_size); 1607 1608 memblock_dump(&memblock.memory, "memory"); 1609 memblock_dump(&memblock.reserved, "reserved"); 1610 } 1611 1612 void __init memblock_allow_resize(void) 1613 { 1614 memblock_can_resize = 1; 1615 } 1616 1617 static int __init early_memblock(char *p) 1618 { 1619 if (p && strstr(p, "debug")) 1620 memblock_debug = 1; 1621 return 0; 1622 } 1623 early_param("memblock", early_memblock); 1624 1625 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) 1626 1627 static int memblock_debug_show(struct seq_file *m, void *private) 1628 { 1629 struct memblock_type *type = m->private; 1630 struct memblock_region *reg; 1631 int i; 1632 1633 for (i = 0; i < type->cnt; i++) { 1634 reg = &type->regions[i]; 1635 seq_printf(m, "%4d: ", i); 1636 if (sizeof(phys_addr_t) == 4) 1637 seq_printf(m, "0x%08lx..0x%08lx\n", 1638 (unsigned long)reg->base, 1639 (unsigned long)(reg->base + reg->size - 1)); 1640 else 1641 seq_printf(m, "0x%016llx..0x%016llx\n", 1642 (unsigned long long)reg->base, 1643 (unsigned long long)(reg->base + reg->size - 1)); 1644 1645 } 1646 return 0; 1647 } 1648 1649 static int memblock_debug_open(struct inode *inode, struct file *file) 1650 { 1651 return single_open(file, memblock_debug_show, inode->i_private); 1652 } 1653 1654 static const struct file_operations memblock_debug_fops = { 1655 .open = memblock_debug_open, 1656 .read = seq_read, 1657 .llseek = seq_lseek, 1658 .release = single_release, 1659 }; 1660 1661 static int __init memblock_init_debugfs(void) 1662 { 1663 struct dentry *root = debugfs_create_dir("memblock", NULL); 1664 if (!root) 1665 return -ENXIO; 1666 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); 1667 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); 1668 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 1669 debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops); 1670 #endif 1671 1672 return 0; 1673 } 1674 __initcall(memblock_init_debugfs); 1675 1676 #endif /* CONFIG_DEBUG_FS */ 1677