1 /* 2 * Procedures for maintaining information about logical memory blocks. 3 * 4 * Peter Bergner, IBM Corp. June 2001. 5 * Copyright (C) 2001 Peter Bergner. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/bitops.h> 17 #include <linux/poison.h> 18 #include <linux/pfn.h> 19 #include <linux/debugfs.h> 20 #include <linux/seq_file.h> 21 #include <linux/memblock.h> 22 23 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 24 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 25 26 struct memblock memblock __initdata_memblock = { 27 .memory.regions = memblock_memory_init_regions, 28 .memory.cnt = 1, /* empty dummy entry */ 29 .memory.max = INIT_MEMBLOCK_REGIONS, 30 31 .reserved.regions = memblock_reserved_init_regions, 32 .reserved.cnt = 1, /* empty dummy entry */ 33 .reserved.max = INIT_MEMBLOCK_REGIONS, 34 35 .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 36 }; 37 38 int memblock_debug __initdata_memblock; 39 static int memblock_can_resize __initdata_memblock; 40 static int memblock_memory_in_slab __initdata_memblock = 0; 41 static int memblock_reserved_in_slab __initdata_memblock = 0; 42 43 /* inline so we don't get a warning when pr_debug is compiled out */ 44 static inline const char *memblock_type_name(struct memblock_type *type) 45 { 46 if (type == &memblock.memory) 47 return "memory"; 48 else if (type == &memblock.reserved) 49 return "reserved"; 50 else 51 return "unknown"; 52 } 53 54 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 55 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 56 { 57 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base); 58 } 59 60 /* 61 * Address comparison utilities 62 */ 63 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 64 phys_addr_t base2, phys_addr_t size2) 65 { 66 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 67 } 68 69 static long __init_memblock memblock_overlaps_region(struct memblock_type *type, 70 phys_addr_t base, phys_addr_t size) 71 { 72 unsigned long i; 73 74 for (i = 0; i < type->cnt; i++) { 75 phys_addr_t rgnbase = type->regions[i].base; 76 phys_addr_t rgnsize = type->regions[i].size; 77 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) 78 break; 79 } 80 81 return (i < type->cnt) ? i : -1; 82 } 83 84 /** 85 * memblock_find_in_range_node - find free area in given range and node 86 * @start: start of candidate range 87 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 88 * @size: size of free area to find 89 * @align: alignment of free area to find 90 * @nid: nid of the free area to find, %MAX_NUMNODES for any node 91 * 92 * Find @size free area aligned to @align in the specified range and node. 93 * 94 * RETURNS: 95 * Found address on success, %0 on failure. 96 */ 97 phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, 98 phys_addr_t end, phys_addr_t size, 99 phys_addr_t align, int nid) 100 { 101 phys_addr_t this_start, this_end, cand; 102 u64 i; 103 104 /* pump up @end */ 105 if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 106 end = memblock.current_limit; 107 108 /* avoid allocating the first page */ 109 start = max_t(phys_addr_t, start, PAGE_SIZE); 110 end = max(start, end); 111 112 for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { 113 this_start = clamp(this_start, start, end); 114 this_end = clamp(this_end, start, end); 115 116 if (this_end < size) 117 continue; 118 119 cand = round_down(this_end - size, align); 120 if (cand >= this_start) 121 return cand; 122 } 123 return 0; 124 } 125 126 /** 127 * memblock_find_in_range - find free area in given range 128 * @start: start of candidate range 129 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 130 * @size: size of free area to find 131 * @align: alignment of free area to find 132 * 133 * Find @size free area aligned to @align in the specified range. 134 * 135 * RETURNS: 136 * Found address on success, %0 on failure. 137 */ 138 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 139 phys_addr_t end, phys_addr_t size, 140 phys_addr_t align) 141 { 142 return memblock_find_in_range_node(start, end, size, align, 143 MAX_NUMNODES); 144 } 145 146 /* 147 * Free memblock.reserved.regions 148 */ 149 int __init_memblock memblock_free_reserved_regions(void) 150 { 151 if (memblock.reserved.regions == memblock_reserved_init_regions) 152 return 0; 153 154 return memblock_free(__pa(memblock.reserved.regions), 155 sizeof(struct memblock_region) * memblock.reserved.max); 156 } 157 158 /* 159 * Reserve memblock.reserved.regions 160 */ 161 int __init_memblock memblock_reserve_reserved_regions(void) 162 { 163 if (memblock.reserved.regions == memblock_reserved_init_regions) 164 return 0; 165 166 return memblock_reserve(__pa(memblock.reserved.regions), 167 sizeof(struct memblock_region) * memblock.reserved.max); 168 } 169 170 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 171 { 172 type->total_size -= type->regions[r].size; 173 memmove(&type->regions[r], &type->regions[r + 1], 174 (type->cnt - (r + 1)) * sizeof(type->regions[r])); 175 type->cnt--; 176 177 /* Special case for empty arrays */ 178 if (type->cnt == 0) { 179 WARN_ON(type->total_size != 0); 180 type->cnt = 1; 181 type->regions[0].base = 0; 182 type->regions[0].size = 0; 183 memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 184 } 185 } 186 187 /** 188 * memblock_double_array - double the size of the memblock regions array 189 * @type: memblock type of the regions array being doubled 190 * @new_area_start: starting address of memory range to avoid overlap with 191 * @new_area_size: size of memory range to avoid overlap with 192 * 193 * Double the size of the @type regions array. If memblock is being used to 194 * allocate memory for a new reserved regions array and there is a previously 195 * allocated memory range [@new_area_start,@new_area_start+@new_area_size] 196 * waiting to be reserved, ensure the memory used by the new array does 197 * not overlap. 198 * 199 * RETURNS: 200 * 0 on success, -1 on failure. 201 */ 202 static int __init_memblock memblock_double_array(struct memblock_type *type, 203 phys_addr_t new_area_start, 204 phys_addr_t new_area_size) 205 { 206 struct memblock_region *new_array, *old_array; 207 phys_addr_t old_size, new_size, addr; 208 int use_slab = slab_is_available(); 209 int *in_slab; 210 211 /* We don't allow resizing until we know about the reserved regions 212 * of memory that aren't suitable for allocation 213 */ 214 if (!memblock_can_resize) 215 return -1; 216 217 /* Calculate new doubled size */ 218 old_size = type->max * sizeof(struct memblock_region); 219 new_size = old_size << 1; 220 221 /* Retrieve the slab flag */ 222 if (type == &memblock.memory) 223 in_slab = &memblock_memory_in_slab; 224 else 225 in_slab = &memblock_reserved_in_slab; 226 227 /* Try to find some space for it. 228 * 229 * WARNING: We assume that either slab_is_available() and we use it or 230 * we use MEMBLOCK for allocations. That means that this is unsafe to use 231 * when bootmem is currently active (unless bootmem itself is implemented 232 * on top of MEMBLOCK which isn't the case yet) 233 * 234 * This should however not be an issue for now, as we currently only 235 * call into MEMBLOCK while it's still active, or much later when slab is 236 * active for memory hotplug operations 237 */ 238 if (use_slab) { 239 new_array = kmalloc(new_size, GFP_KERNEL); 240 addr = new_array ? __pa(new_array) : 0; 241 } else { 242 /* only exclude range when trying to double reserved.regions */ 243 if (type != &memblock.reserved) 244 new_area_start = new_area_size = 0; 245 246 addr = memblock_find_in_range(new_area_start + new_area_size, 247 memblock.current_limit, 248 new_size, sizeof(phys_addr_t)); 249 if (!addr && new_area_size) 250 addr = memblock_find_in_range(0, 251 min(new_area_start, memblock.current_limit), 252 new_size, sizeof(phys_addr_t)); 253 254 new_array = addr ? __va(addr) : 0; 255 } 256 if (!addr) { 257 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 258 memblock_type_name(type), type->max, type->max * 2); 259 return -1; 260 } 261 262 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]", 263 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1); 264 265 /* Found space, we now need to move the array over before 266 * we add the reserved region since it may be our reserved 267 * array itself that is full. 268 */ 269 memcpy(new_array, type->regions, old_size); 270 memset(new_array + type->max, 0, old_size); 271 old_array = type->regions; 272 type->regions = new_array; 273 type->max <<= 1; 274 275 /* Free old array. We needn't free it if the array is the 276 * static one 277 */ 278 if (*in_slab) 279 kfree(old_array); 280 else if (old_array != memblock_memory_init_regions && 281 old_array != memblock_reserved_init_regions) 282 memblock_free(__pa(old_array), old_size); 283 284 /* Reserve the new array if that comes from the memblock. 285 * Otherwise, we needn't do it 286 */ 287 if (!use_slab) 288 BUG_ON(memblock_reserve(addr, new_size)); 289 290 /* Update slab flag */ 291 *in_slab = use_slab; 292 293 return 0; 294 } 295 296 /** 297 * memblock_merge_regions - merge neighboring compatible regions 298 * @type: memblock type to scan 299 * 300 * Scan @type and merge neighboring compatible regions. 301 */ 302 static void __init_memblock memblock_merge_regions(struct memblock_type *type) 303 { 304 int i = 0; 305 306 /* cnt never goes below 1 */ 307 while (i < type->cnt - 1) { 308 struct memblock_region *this = &type->regions[i]; 309 struct memblock_region *next = &type->regions[i + 1]; 310 311 if (this->base + this->size != next->base || 312 memblock_get_region_node(this) != 313 memblock_get_region_node(next)) { 314 BUG_ON(this->base + this->size > next->base); 315 i++; 316 continue; 317 } 318 319 this->size += next->size; 320 memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next)); 321 type->cnt--; 322 } 323 } 324 325 /** 326 * memblock_insert_region - insert new memblock region 327 * @type: memblock type to insert into 328 * @idx: index for the insertion point 329 * @base: base address of the new region 330 * @size: size of the new region 331 * 332 * Insert new memblock region [@base,@base+@size) into @type at @idx. 333 * @type must already have extra room to accomodate the new region. 334 */ 335 static void __init_memblock memblock_insert_region(struct memblock_type *type, 336 int idx, phys_addr_t base, 337 phys_addr_t size, int nid) 338 { 339 struct memblock_region *rgn = &type->regions[idx]; 340 341 BUG_ON(type->cnt >= type->max); 342 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 343 rgn->base = base; 344 rgn->size = size; 345 memblock_set_region_node(rgn, nid); 346 type->cnt++; 347 type->total_size += size; 348 } 349 350 /** 351 * memblock_add_region - add new memblock region 352 * @type: memblock type to add new region into 353 * @base: base address of the new region 354 * @size: size of the new region 355 * @nid: nid of the new region 356 * 357 * Add new memblock region [@base,@base+@size) into @type. The new region 358 * is allowed to overlap with existing ones - overlaps don't affect already 359 * existing regions. @type is guaranteed to be minimal (all neighbouring 360 * compatible regions are merged) after the addition. 361 * 362 * RETURNS: 363 * 0 on success, -errno on failure. 364 */ 365 static int __init_memblock memblock_add_region(struct memblock_type *type, 366 phys_addr_t base, phys_addr_t size, int nid) 367 { 368 bool insert = false; 369 phys_addr_t obase = base; 370 phys_addr_t end = base + memblock_cap_size(base, &size); 371 int i, nr_new; 372 373 if (!size) 374 return 0; 375 376 /* special case for empty array */ 377 if (type->regions[0].size == 0) { 378 WARN_ON(type->cnt != 1 || type->total_size); 379 type->regions[0].base = base; 380 type->regions[0].size = size; 381 memblock_set_region_node(&type->regions[0], nid); 382 type->total_size = size; 383 return 0; 384 } 385 repeat: 386 /* 387 * The following is executed twice. Once with %false @insert and 388 * then with %true. The first counts the number of regions needed 389 * to accomodate the new area. The second actually inserts them. 390 */ 391 base = obase; 392 nr_new = 0; 393 394 for (i = 0; i < type->cnt; i++) { 395 struct memblock_region *rgn = &type->regions[i]; 396 phys_addr_t rbase = rgn->base; 397 phys_addr_t rend = rbase + rgn->size; 398 399 if (rbase >= end) 400 break; 401 if (rend <= base) 402 continue; 403 /* 404 * @rgn overlaps. If it separates the lower part of new 405 * area, insert that portion. 406 */ 407 if (rbase > base) { 408 nr_new++; 409 if (insert) 410 memblock_insert_region(type, i++, base, 411 rbase - base, nid); 412 } 413 /* area below @rend is dealt with, forget about it */ 414 base = min(rend, end); 415 } 416 417 /* insert the remaining portion */ 418 if (base < end) { 419 nr_new++; 420 if (insert) 421 memblock_insert_region(type, i, base, end - base, nid); 422 } 423 424 /* 425 * If this was the first round, resize array and repeat for actual 426 * insertions; otherwise, merge and return. 427 */ 428 if (!insert) { 429 while (type->cnt + nr_new > type->max) 430 if (memblock_double_array(type, obase, size) < 0) 431 return -ENOMEM; 432 insert = true; 433 goto repeat; 434 } else { 435 memblock_merge_regions(type); 436 return 0; 437 } 438 } 439 440 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 441 int nid) 442 { 443 return memblock_add_region(&memblock.memory, base, size, nid); 444 } 445 446 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 447 { 448 return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES); 449 } 450 451 /** 452 * memblock_isolate_range - isolate given range into disjoint memblocks 453 * @type: memblock type to isolate range for 454 * @base: base of range to isolate 455 * @size: size of range to isolate 456 * @start_rgn: out parameter for the start of isolated region 457 * @end_rgn: out parameter for the end of isolated region 458 * 459 * Walk @type and ensure that regions don't cross the boundaries defined by 460 * [@base,@base+@size). Crossing regions are split at the boundaries, 461 * which may create at most two more regions. The index of the first 462 * region inside the range is returned in *@start_rgn and end in *@end_rgn. 463 * 464 * RETURNS: 465 * 0 on success, -errno on failure. 466 */ 467 static int __init_memblock memblock_isolate_range(struct memblock_type *type, 468 phys_addr_t base, phys_addr_t size, 469 int *start_rgn, int *end_rgn) 470 { 471 phys_addr_t end = base + memblock_cap_size(base, &size); 472 int i; 473 474 *start_rgn = *end_rgn = 0; 475 476 if (!size) 477 return 0; 478 479 /* we'll create at most two more regions */ 480 while (type->cnt + 2 > type->max) 481 if (memblock_double_array(type, base, size) < 0) 482 return -ENOMEM; 483 484 for (i = 0; i < type->cnt; i++) { 485 struct memblock_region *rgn = &type->regions[i]; 486 phys_addr_t rbase = rgn->base; 487 phys_addr_t rend = rbase + rgn->size; 488 489 if (rbase >= end) 490 break; 491 if (rend <= base) 492 continue; 493 494 if (rbase < base) { 495 /* 496 * @rgn intersects from below. Split and continue 497 * to process the next region - the new top half. 498 */ 499 rgn->base = base; 500 rgn->size -= base - rbase; 501 type->total_size -= base - rbase; 502 memblock_insert_region(type, i, rbase, base - rbase, 503 memblock_get_region_node(rgn)); 504 } else if (rend > end) { 505 /* 506 * @rgn intersects from above. Split and redo the 507 * current region - the new bottom half. 508 */ 509 rgn->base = end; 510 rgn->size -= end - rbase; 511 type->total_size -= end - rbase; 512 memblock_insert_region(type, i--, rbase, end - rbase, 513 memblock_get_region_node(rgn)); 514 } else { 515 /* @rgn is fully contained, record it */ 516 if (!*end_rgn) 517 *start_rgn = i; 518 *end_rgn = i + 1; 519 } 520 } 521 522 return 0; 523 } 524 525 static int __init_memblock __memblock_remove(struct memblock_type *type, 526 phys_addr_t base, phys_addr_t size) 527 { 528 int start_rgn, end_rgn; 529 int i, ret; 530 531 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 532 if (ret) 533 return ret; 534 535 for (i = end_rgn - 1; i >= start_rgn; i--) 536 memblock_remove_region(type, i); 537 return 0; 538 } 539 540 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 541 { 542 return __memblock_remove(&memblock.memory, base, size); 543 } 544 545 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 546 { 547 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", 548 (unsigned long long)base, 549 (unsigned long long)base + size, 550 (void *)_RET_IP_); 551 552 return __memblock_remove(&memblock.reserved, base, size); 553 } 554 555 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 556 { 557 struct memblock_type *_rgn = &memblock.reserved; 558 559 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n", 560 (unsigned long long)base, 561 (unsigned long long)base + size, 562 (void *)_RET_IP_); 563 564 return memblock_add_region(_rgn, base, size, MAX_NUMNODES); 565 } 566 567 /** 568 * __next_free_mem_range - next function for for_each_free_mem_range() 569 * @idx: pointer to u64 loop variable 570 * @nid: nid: node selector, %MAX_NUMNODES for all nodes 571 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 572 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 573 * @out_nid: ptr to int for nid of the range, can be %NULL 574 * 575 * Find the first free area from *@idx which matches @nid, fill the out 576 * parameters, and update *@idx for the next iteration. The lower 32bit of 577 * *@idx contains index into memory region and the upper 32bit indexes the 578 * areas before each reserved region. For example, if reserved regions 579 * look like the following, 580 * 581 * 0:[0-16), 1:[32-48), 2:[128-130) 582 * 583 * The upper 32bit indexes the following regions. 584 * 585 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 586 * 587 * As both region arrays are sorted, the function advances the two indices 588 * in lockstep and returns each intersection. 589 */ 590 void __init_memblock __next_free_mem_range(u64 *idx, int nid, 591 phys_addr_t *out_start, 592 phys_addr_t *out_end, int *out_nid) 593 { 594 struct memblock_type *mem = &memblock.memory; 595 struct memblock_type *rsv = &memblock.reserved; 596 int mi = *idx & 0xffffffff; 597 int ri = *idx >> 32; 598 599 for ( ; mi < mem->cnt; mi++) { 600 struct memblock_region *m = &mem->regions[mi]; 601 phys_addr_t m_start = m->base; 602 phys_addr_t m_end = m->base + m->size; 603 604 /* only memory regions are associated with nodes, check it */ 605 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) 606 continue; 607 608 /* scan areas before each reservation for intersection */ 609 for ( ; ri < rsv->cnt + 1; ri++) { 610 struct memblock_region *r = &rsv->regions[ri]; 611 phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; 612 phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; 613 614 /* if ri advanced past mi, break out to advance mi */ 615 if (r_start >= m_end) 616 break; 617 /* if the two regions intersect, we're done */ 618 if (m_start < r_end) { 619 if (out_start) 620 *out_start = max(m_start, r_start); 621 if (out_end) 622 *out_end = min(m_end, r_end); 623 if (out_nid) 624 *out_nid = memblock_get_region_node(m); 625 /* 626 * The region which ends first is advanced 627 * for the next iteration. 628 */ 629 if (m_end <= r_end) 630 mi++; 631 else 632 ri++; 633 *idx = (u32)mi | (u64)ri << 32; 634 return; 635 } 636 } 637 } 638 639 /* signal end of iteration */ 640 *idx = ULLONG_MAX; 641 } 642 643 /** 644 * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() 645 * @idx: pointer to u64 loop variable 646 * @nid: nid: node selector, %MAX_NUMNODES for all nodes 647 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 648 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 649 * @out_nid: ptr to int for nid of the range, can be %NULL 650 * 651 * Reverse of __next_free_mem_range(). 652 */ 653 void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, 654 phys_addr_t *out_start, 655 phys_addr_t *out_end, int *out_nid) 656 { 657 struct memblock_type *mem = &memblock.memory; 658 struct memblock_type *rsv = &memblock.reserved; 659 int mi = *idx & 0xffffffff; 660 int ri = *idx >> 32; 661 662 if (*idx == (u64)ULLONG_MAX) { 663 mi = mem->cnt - 1; 664 ri = rsv->cnt; 665 } 666 667 for ( ; mi >= 0; mi--) { 668 struct memblock_region *m = &mem->regions[mi]; 669 phys_addr_t m_start = m->base; 670 phys_addr_t m_end = m->base + m->size; 671 672 /* only memory regions are associated with nodes, check it */ 673 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) 674 continue; 675 676 /* scan areas before each reservation for intersection */ 677 for ( ; ri >= 0; ri--) { 678 struct memblock_region *r = &rsv->regions[ri]; 679 phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; 680 phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; 681 682 /* if ri advanced past mi, break out to advance mi */ 683 if (r_end <= m_start) 684 break; 685 /* if the two regions intersect, we're done */ 686 if (m_end > r_start) { 687 if (out_start) 688 *out_start = max(m_start, r_start); 689 if (out_end) 690 *out_end = min(m_end, r_end); 691 if (out_nid) 692 *out_nid = memblock_get_region_node(m); 693 694 if (m_start >= r_start) 695 mi--; 696 else 697 ri--; 698 *idx = (u32)mi | (u64)ri << 32; 699 return; 700 } 701 } 702 } 703 704 *idx = ULLONG_MAX; 705 } 706 707 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 708 /* 709 * Common iterator interface used to define for_each_mem_range(). 710 */ 711 void __init_memblock __next_mem_pfn_range(int *idx, int nid, 712 unsigned long *out_start_pfn, 713 unsigned long *out_end_pfn, int *out_nid) 714 { 715 struct memblock_type *type = &memblock.memory; 716 struct memblock_region *r; 717 718 while (++*idx < type->cnt) { 719 r = &type->regions[*idx]; 720 721 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 722 continue; 723 if (nid == MAX_NUMNODES || nid == r->nid) 724 break; 725 } 726 if (*idx >= type->cnt) { 727 *idx = -1; 728 return; 729 } 730 731 if (out_start_pfn) 732 *out_start_pfn = PFN_UP(r->base); 733 if (out_end_pfn) 734 *out_end_pfn = PFN_DOWN(r->base + r->size); 735 if (out_nid) 736 *out_nid = r->nid; 737 } 738 739 /** 740 * memblock_set_node - set node ID on memblock regions 741 * @base: base of area to set node ID for 742 * @size: size of area to set node ID for 743 * @nid: node ID to set 744 * 745 * Set the nid of memblock memory regions in [@base,@base+@size) to @nid. 746 * Regions which cross the area boundaries are split as necessary. 747 * 748 * RETURNS: 749 * 0 on success, -errno on failure. 750 */ 751 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 752 int nid) 753 { 754 struct memblock_type *type = &memblock.memory; 755 int start_rgn, end_rgn; 756 int i, ret; 757 758 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 759 if (ret) 760 return ret; 761 762 for (i = start_rgn; i < end_rgn; i++) 763 type->regions[i].nid = nid; 764 765 memblock_merge_regions(type); 766 return 0; 767 } 768 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 769 770 static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, 771 phys_addr_t align, phys_addr_t max_addr, 772 int nid) 773 { 774 phys_addr_t found; 775 776 /* align @size to avoid excessive fragmentation on reserved array */ 777 size = round_up(size, align); 778 779 found = memblock_find_in_range_node(0, max_addr, size, align, nid); 780 if (found && !memblock_reserve(found, size)) 781 return found; 782 783 return 0; 784 } 785 786 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) 787 { 788 return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 789 } 790 791 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 792 { 793 return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES); 794 } 795 796 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 797 { 798 phys_addr_t alloc; 799 800 alloc = __memblock_alloc_base(size, align, max_addr); 801 802 if (alloc == 0) 803 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", 804 (unsigned long long) size, (unsigned long long) max_addr); 805 806 return alloc; 807 } 808 809 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) 810 { 811 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 812 } 813 814 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 815 { 816 phys_addr_t res = memblock_alloc_nid(size, align, nid); 817 818 if (res) 819 return res; 820 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 821 } 822 823 824 /* 825 * Remaining API functions 826 */ 827 828 phys_addr_t __init memblock_phys_mem_size(void) 829 { 830 return memblock.memory.total_size; 831 } 832 833 /* lowest address */ 834 phys_addr_t __init_memblock memblock_start_of_DRAM(void) 835 { 836 return memblock.memory.regions[0].base; 837 } 838 839 phys_addr_t __init_memblock memblock_end_of_DRAM(void) 840 { 841 int idx = memblock.memory.cnt - 1; 842 843 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 844 } 845 846 void __init memblock_enforce_memory_limit(phys_addr_t limit) 847 { 848 unsigned long i; 849 phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; 850 851 if (!limit) 852 return; 853 854 /* find out max address */ 855 for (i = 0; i < memblock.memory.cnt; i++) { 856 struct memblock_region *r = &memblock.memory.regions[i]; 857 858 if (limit <= r->size) { 859 max_addr = r->base + limit; 860 break; 861 } 862 limit -= r->size; 863 } 864 865 /* truncate both memory and reserved regions */ 866 __memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX); 867 __memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX); 868 } 869 870 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 871 { 872 unsigned int left = 0, right = type->cnt; 873 874 do { 875 unsigned int mid = (right + left) / 2; 876 877 if (addr < type->regions[mid].base) 878 right = mid; 879 else if (addr >= (type->regions[mid].base + 880 type->regions[mid].size)) 881 left = mid + 1; 882 else 883 return mid; 884 } while (left < right); 885 return -1; 886 } 887 888 int __init memblock_is_reserved(phys_addr_t addr) 889 { 890 return memblock_search(&memblock.reserved, addr) != -1; 891 } 892 893 int __init_memblock memblock_is_memory(phys_addr_t addr) 894 { 895 return memblock_search(&memblock.memory, addr) != -1; 896 } 897 898 /** 899 * memblock_is_region_memory - check if a region is a subset of memory 900 * @base: base of region to check 901 * @size: size of region to check 902 * 903 * Check if the region [@base, @base+@size) is a subset of a memory block. 904 * 905 * RETURNS: 906 * 0 if false, non-zero if true 907 */ 908 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 909 { 910 int idx = memblock_search(&memblock.memory, base); 911 phys_addr_t end = base + memblock_cap_size(base, &size); 912 913 if (idx == -1) 914 return 0; 915 return memblock.memory.regions[idx].base <= base && 916 (memblock.memory.regions[idx].base + 917 memblock.memory.regions[idx].size) >= end; 918 } 919 920 /** 921 * memblock_is_region_reserved - check if a region intersects reserved memory 922 * @base: base of region to check 923 * @size: size of region to check 924 * 925 * Check if the region [@base, @base+@size) intersects a reserved memory block. 926 * 927 * RETURNS: 928 * 0 if false, non-zero if true 929 */ 930 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 931 { 932 memblock_cap_size(base, &size); 933 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 934 } 935 936 937 void __init_memblock memblock_set_current_limit(phys_addr_t limit) 938 { 939 memblock.current_limit = limit; 940 } 941 942 static void __init_memblock memblock_dump(struct memblock_type *type, char *name) 943 { 944 unsigned long long base, size; 945 int i; 946 947 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); 948 949 for (i = 0; i < type->cnt; i++) { 950 struct memblock_region *rgn = &type->regions[i]; 951 char nid_buf[32] = ""; 952 953 base = rgn->base; 954 size = rgn->size; 955 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 956 if (memblock_get_region_node(rgn) != MAX_NUMNODES) 957 snprintf(nid_buf, sizeof(nid_buf), " on node %d", 958 memblock_get_region_node(rgn)); 959 #endif 960 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n", 961 name, i, base, base + size - 1, size, nid_buf); 962 } 963 } 964 965 void __init_memblock __memblock_dump_all(void) 966 { 967 pr_info("MEMBLOCK configuration:\n"); 968 pr_info(" memory size = %#llx reserved size = %#llx\n", 969 (unsigned long long)memblock.memory.total_size, 970 (unsigned long long)memblock.reserved.total_size); 971 972 memblock_dump(&memblock.memory, "memory"); 973 memblock_dump(&memblock.reserved, "reserved"); 974 } 975 976 void __init memblock_allow_resize(void) 977 { 978 memblock_can_resize = 1; 979 } 980 981 static int __init early_memblock(char *p) 982 { 983 if (p && strstr(p, "debug")) 984 memblock_debug = 1; 985 return 0; 986 } 987 early_param("memblock", early_memblock); 988 989 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) 990 991 static int memblock_debug_show(struct seq_file *m, void *private) 992 { 993 struct memblock_type *type = m->private; 994 struct memblock_region *reg; 995 int i; 996 997 for (i = 0; i < type->cnt; i++) { 998 reg = &type->regions[i]; 999 seq_printf(m, "%4d: ", i); 1000 if (sizeof(phys_addr_t) == 4) 1001 seq_printf(m, "0x%08lx..0x%08lx\n", 1002 (unsigned long)reg->base, 1003 (unsigned long)(reg->base + reg->size - 1)); 1004 else 1005 seq_printf(m, "0x%016llx..0x%016llx\n", 1006 (unsigned long long)reg->base, 1007 (unsigned long long)(reg->base + reg->size - 1)); 1008 1009 } 1010 return 0; 1011 } 1012 1013 static int memblock_debug_open(struct inode *inode, struct file *file) 1014 { 1015 return single_open(file, memblock_debug_show, inode->i_private); 1016 } 1017 1018 static const struct file_operations memblock_debug_fops = { 1019 .open = memblock_debug_open, 1020 .read = seq_read, 1021 .llseek = seq_lseek, 1022 .release = single_release, 1023 }; 1024 1025 static int __init memblock_init_debugfs(void) 1026 { 1027 struct dentry *root = debugfs_create_dir("memblock", NULL); 1028 if (!root) 1029 return -ENXIO; 1030 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); 1031 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); 1032 1033 return 0; 1034 } 1035 __initcall(memblock_init_debugfs); 1036 1037 #endif /* CONFIG_DEBUG_FS */ 1038