195f72d1eSYinghai Lu /* 295f72d1eSYinghai Lu * Procedures for maintaining information about logical memory blocks. 395f72d1eSYinghai Lu * 495f72d1eSYinghai Lu * Peter Bergner, IBM Corp. June 2001. 595f72d1eSYinghai Lu * Copyright (C) 2001 Peter Bergner. 695f72d1eSYinghai Lu * 795f72d1eSYinghai Lu * This program is free software; you can redistribute it and/or 895f72d1eSYinghai Lu * modify it under the terms of the GNU General Public License 995f72d1eSYinghai Lu * as published by the Free Software Foundation; either version 1095f72d1eSYinghai Lu * 2 of the License, or (at your option) any later version. 1195f72d1eSYinghai Lu */ 1295f72d1eSYinghai Lu 1395f72d1eSYinghai Lu #include <linux/kernel.h> 14142b45a7SBenjamin Herrenschmidt #include <linux/slab.h> 1595f72d1eSYinghai Lu #include <linux/init.h> 1695f72d1eSYinghai Lu #include <linux/bitops.h> 17449e8df3SBenjamin Herrenschmidt #include <linux/poison.h> 18c196f76fSBenjamin Herrenschmidt #include <linux/pfn.h> 196d03b885SBenjamin Herrenschmidt #include <linux/debugfs.h> 206d03b885SBenjamin Herrenschmidt #include <linux/seq_file.h> 2195f72d1eSYinghai Lu #include <linux/memblock.h> 2295f72d1eSYinghai Lu 2379442ed1STang Chen #include <asm-generic/sections.h> 2426f09e9bSSantosh Shilimkar #include <linux/io.h> 2526f09e9bSSantosh Shilimkar 2626f09e9bSSantosh Shilimkar #include "internal.h" 2779442ed1STang Chen 28fe091c20STejun Heo static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 29fe091c20STejun Heo static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 3070210ed9SPhilipp Hachtmann #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 3170210ed9SPhilipp Hachtmann static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; 3270210ed9SPhilipp Hachtmann #endif 33fe091c20STejun Heo 34fe091c20STejun Heo struct memblock memblock __initdata_memblock = { 35fe091c20STejun Heo .memory.regions = memblock_memory_init_regions, 36fe091c20STejun Heo .memory.cnt = 1, /* empty dummy entry */ 37fe091c20STejun Heo .memory.max = INIT_MEMBLOCK_REGIONS, 38fe091c20STejun Heo 39fe091c20STejun Heo .reserved.regions = memblock_reserved_init_regions, 40fe091c20STejun Heo .reserved.cnt = 1, /* empty dummy entry */ 41fe091c20STejun Heo .reserved.max = INIT_MEMBLOCK_REGIONS, 42fe091c20STejun Heo 4370210ed9SPhilipp Hachtmann #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 4470210ed9SPhilipp Hachtmann .physmem.regions = memblock_physmem_init_regions, 4570210ed9SPhilipp Hachtmann .physmem.cnt = 1, /* empty dummy entry */ 4670210ed9SPhilipp Hachtmann .physmem.max = INIT_PHYSMEM_REGIONS, 4770210ed9SPhilipp Hachtmann #endif 4870210ed9SPhilipp Hachtmann 4979442ed1STang Chen .bottom_up = false, 50fe091c20STejun Heo .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 51fe091c20STejun Heo }; 5295f72d1eSYinghai Lu 5310d06439SYinghai Lu int memblock_debug __initdata_memblock; 5455ac590cSTang Chen #ifdef CONFIG_MOVABLE_NODE 5555ac590cSTang Chen bool movable_node_enabled __initdata_memblock = false; 5655ac590cSTang Chen #endif 571aadc056STejun Heo static int memblock_can_resize __initdata_memblock; 58181eb394SGavin Shan static int memblock_memory_in_slab __initdata_memblock = 0; 59181eb394SGavin Shan static int memblock_reserved_in_slab __initdata_memblock = 0; 6095f72d1eSYinghai Lu 61142b45a7SBenjamin Herrenschmidt /* inline so we don't get a warning when pr_debug is compiled out */ 62c2233116SRaghavendra D Prabhu static __init_memblock const char * 63c2233116SRaghavendra D Prabhu memblock_type_name(struct memblock_type *type) 64142b45a7SBenjamin Herrenschmidt { 65142b45a7SBenjamin Herrenschmidt if (type == &memblock.memory) 66142b45a7SBenjamin Herrenschmidt return "memory"; 67142b45a7SBenjamin Herrenschmidt else if (type == &memblock.reserved) 68142b45a7SBenjamin Herrenschmidt return "reserved"; 69142b45a7SBenjamin Herrenschmidt else 70142b45a7SBenjamin Herrenschmidt return "unknown"; 71142b45a7SBenjamin Herrenschmidt } 72142b45a7SBenjamin Herrenschmidt 73eb18f1b5STejun Heo /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 74eb18f1b5STejun Heo static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 75eb18f1b5STejun Heo { 76eb18f1b5STejun Heo return *size = min(*size, (phys_addr_t)ULLONG_MAX - base); 77eb18f1b5STejun Heo } 78eb18f1b5STejun Heo 796ed311b2SBenjamin Herrenschmidt /* 806ed311b2SBenjamin Herrenschmidt * Address comparison utilities 816ed311b2SBenjamin Herrenschmidt */ 8210d06439SYinghai Lu static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 832898cc4cSBenjamin Herrenschmidt phys_addr_t base2, phys_addr_t size2) 8495f72d1eSYinghai Lu { 8595f72d1eSYinghai Lu return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 8695f72d1eSYinghai Lu } 8795f72d1eSYinghai Lu 882d7d3eb2SH Hartley Sweeten static long __init_memblock memblock_overlaps_region(struct memblock_type *type, 892d7d3eb2SH Hartley Sweeten phys_addr_t base, phys_addr_t size) 906ed311b2SBenjamin Herrenschmidt { 916ed311b2SBenjamin Herrenschmidt unsigned long i; 926ed311b2SBenjamin Herrenschmidt 936ed311b2SBenjamin Herrenschmidt for (i = 0; i < type->cnt; i++) { 946ed311b2SBenjamin Herrenschmidt phys_addr_t rgnbase = type->regions[i].base; 956ed311b2SBenjamin Herrenschmidt phys_addr_t rgnsize = type->regions[i].size; 966ed311b2SBenjamin Herrenschmidt if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) 976ed311b2SBenjamin Herrenschmidt break; 986ed311b2SBenjamin Herrenschmidt } 996ed311b2SBenjamin Herrenschmidt 1006ed311b2SBenjamin Herrenschmidt return (i < type->cnt) ? i : -1; 1016ed311b2SBenjamin Herrenschmidt } 1026ed311b2SBenjamin Herrenschmidt 10379442ed1STang Chen /* 10479442ed1STang Chen * __memblock_find_range_bottom_up - find free area utility in bottom-up 10579442ed1STang Chen * @start: start of candidate range 10679442ed1STang Chen * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 10779442ed1STang Chen * @size: size of free area to find 10879442ed1STang Chen * @align: alignment of free area to find 109b1154233SGrygorii Strashko * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 11079442ed1STang Chen * 11179442ed1STang Chen * Utility called from memblock_find_in_range_node(), find free area bottom-up. 11279442ed1STang Chen * 11379442ed1STang Chen * RETURNS: 11479442ed1STang Chen * Found address on success, 0 on failure. 11579442ed1STang Chen */ 11679442ed1STang Chen static phys_addr_t __init_memblock 11779442ed1STang Chen __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 11879442ed1STang Chen phys_addr_t size, phys_addr_t align, int nid) 11979442ed1STang Chen { 12079442ed1STang Chen phys_addr_t this_start, this_end, cand; 12179442ed1STang Chen u64 i; 12279442ed1STang Chen 12379442ed1STang Chen for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) { 12479442ed1STang Chen this_start = clamp(this_start, start, end); 12579442ed1STang Chen this_end = clamp(this_end, start, end); 12679442ed1STang Chen 12779442ed1STang Chen cand = round_up(this_start, align); 12879442ed1STang Chen if (cand < this_end && this_end - cand >= size) 12979442ed1STang Chen return cand; 13079442ed1STang Chen } 13179442ed1STang Chen 13279442ed1STang Chen return 0; 13379442ed1STang Chen } 13479442ed1STang Chen 1357bd0b0f0STejun Heo /** 1361402899eSTang Chen * __memblock_find_range_top_down - find free area utility, in top-down 1371402899eSTang Chen * @start: start of candidate range 1381402899eSTang Chen * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 1391402899eSTang Chen * @size: size of free area to find 1401402899eSTang Chen * @align: alignment of free area to find 141b1154233SGrygorii Strashko * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1421402899eSTang Chen * 1431402899eSTang Chen * Utility called from memblock_find_in_range_node(), find free area top-down. 1441402899eSTang Chen * 1451402899eSTang Chen * RETURNS: 14679442ed1STang Chen * Found address on success, 0 on failure. 1471402899eSTang Chen */ 1481402899eSTang Chen static phys_addr_t __init_memblock 1491402899eSTang Chen __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 1501402899eSTang Chen phys_addr_t size, phys_addr_t align, int nid) 1511402899eSTang Chen { 1521402899eSTang Chen phys_addr_t this_start, this_end, cand; 1531402899eSTang Chen u64 i; 1541402899eSTang Chen 1551402899eSTang Chen for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { 1561402899eSTang Chen this_start = clamp(this_start, start, end); 1571402899eSTang Chen this_end = clamp(this_end, start, end); 1581402899eSTang Chen 1591402899eSTang Chen if (this_end < size) 1601402899eSTang Chen continue; 1611402899eSTang Chen 1621402899eSTang Chen cand = round_down(this_end - size, align); 1631402899eSTang Chen if (cand >= this_start) 1641402899eSTang Chen return cand; 1651402899eSTang Chen } 1661402899eSTang Chen 1671402899eSTang Chen return 0; 1681402899eSTang Chen } 1691402899eSTang Chen 1701402899eSTang Chen /** 1717bd0b0f0STejun Heo * memblock_find_in_range_node - find free area in given range and node 1727bd0b0f0STejun Heo * @size: size of free area to find 1737bd0b0f0STejun Heo * @align: alignment of free area to find 17487029ee9SGrygorii Strashko * @start: start of candidate range 17587029ee9SGrygorii Strashko * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 176b1154233SGrygorii Strashko * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1777bd0b0f0STejun Heo * 1787bd0b0f0STejun Heo * Find @size free area aligned to @align in the specified range and node. 1797bd0b0f0STejun Heo * 18079442ed1STang Chen * When allocation direction is bottom-up, the @start should be greater 18179442ed1STang Chen * than the end of the kernel image. Otherwise, it will be trimmed. The 18279442ed1STang Chen * reason is that we want the bottom-up allocation just near the kernel 18379442ed1STang Chen * image so it is highly likely that the allocated memory and the kernel 18479442ed1STang Chen * will reside in the same node. 18579442ed1STang Chen * 18679442ed1STang Chen * If bottom-up allocation failed, will try to allocate memory top-down. 18779442ed1STang Chen * 1887bd0b0f0STejun Heo * RETURNS: 18979442ed1STang Chen * Found address on success, 0 on failure. 1906ed311b2SBenjamin Herrenschmidt */ 19187029ee9SGrygorii Strashko phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 19287029ee9SGrygorii Strashko phys_addr_t align, phys_addr_t start, 19387029ee9SGrygorii Strashko phys_addr_t end, int nid) 194f7210e6cSTang Chen { 1950cfb8f0cSTang Chen phys_addr_t kernel_end, ret; 19679442ed1STang Chen 197f7210e6cSTang Chen /* pump up @end */ 198f7210e6cSTang Chen if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 199f7210e6cSTang Chen end = memblock.current_limit; 200f7210e6cSTang Chen 201f7210e6cSTang Chen /* avoid allocating the first page */ 202f7210e6cSTang Chen start = max_t(phys_addr_t, start, PAGE_SIZE); 203f7210e6cSTang Chen end = max(start, end); 20479442ed1STang Chen kernel_end = __pa_symbol(_end); 20579442ed1STang Chen 20679442ed1STang Chen /* 20779442ed1STang Chen * try bottom-up allocation only when bottom-up mode 20879442ed1STang Chen * is set and @end is above the kernel image. 20979442ed1STang Chen */ 21079442ed1STang Chen if (memblock_bottom_up() && end > kernel_end) { 21179442ed1STang Chen phys_addr_t bottom_up_start; 21279442ed1STang Chen 21379442ed1STang Chen /* make sure we will allocate above the kernel */ 21479442ed1STang Chen bottom_up_start = max(start, kernel_end); 21579442ed1STang Chen 21679442ed1STang Chen /* ok, try bottom-up allocation first */ 21779442ed1STang Chen ret = __memblock_find_range_bottom_up(bottom_up_start, end, 21879442ed1STang Chen size, align, nid); 21979442ed1STang Chen if (ret) 22079442ed1STang Chen return ret; 22179442ed1STang Chen 22279442ed1STang Chen /* 22379442ed1STang Chen * we always limit bottom-up allocation above the kernel, 22479442ed1STang Chen * but top-down allocation doesn't have the limit, so 22579442ed1STang Chen * retrying top-down allocation may succeed when bottom-up 22679442ed1STang Chen * allocation failed. 22779442ed1STang Chen * 22879442ed1STang Chen * bottom-up allocation is expected to be fail very rarely, 22979442ed1STang Chen * so we use WARN_ONCE() here to see the stack trace if 23079442ed1STang Chen * fail happens. 23179442ed1STang Chen */ 23279442ed1STang Chen WARN_ONCE(1, "memblock: bottom-up allocation failed, " 23379442ed1STang Chen "memory hotunplug may be affected\n"); 23479442ed1STang Chen } 235f7210e6cSTang Chen 2361402899eSTang Chen return __memblock_find_range_top_down(start, end, size, align, nid); 237f7210e6cSTang Chen } 2386ed311b2SBenjamin Herrenschmidt 2397bd0b0f0STejun Heo /** 2407bd0b0f0STejun Heo * memblock_find_in_range - find free area in given range 2417bd0b0f0STejun Heo * @start: start of candidate range 2427bd0b0f0STejun Heo * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 2437bd0b0f0STejun Heo * @size: size of free area to find 2447bd0b0f0STejun Heo * @align: alignment of free area to find 2457bd0b0f0STejun Heo * 2467bd0b0f0STejun Heo * Find @size free area aligned to @align in the specified range. 2477bd0b0f0STejun Heo * 2487bd0b0f0STejun Heo * RETURNS: 24979442ed1STang Chen * Found address on success, 0 on failure. 2507bd0b0f0STejun Heo */ 2517bd0b0f0STejun Heo phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 2527bd0b0f0STejun Heo phys_addr_t end, phys_addr_t size, 2537bd0b0f0STejun Heo phys_addr_t align) 2547bd0b0f0STejun Heo { 25587029ee9SGrygorii Strashko return memblock_find_in_range_node(size, align, start, end, 256b1154233SGrygorii Strashko NUMA_NO_NODE); 2577bd0b0f0STejun Heo } 2587bd0b0f0STejun Heo 25910d06439SYinghai Lu static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 26095f72d1eSYinghai Lu { 2611440c4e2STejun Heo type->total_size -= type->regions[r].size; 2627c0caeb8STejun Heo memmove(&type->regions[r], &type->regions[r + 1], 2637c0caeb8STejun Heo (type->cnt - (r + 1)) * sizeof(type->regions[r])); 264e3239ff9SBenjamin Herrenschmidt type->cnt--; 26595f72d1eSYinghai Lu 2668f7a6605SBenjamin Herrenschmidt /* Special case for empty arrays */ 2678f7a6605SBenjamin Herrenschmidt if (type->cnt == 0) { 2681440c4e2STejun Heo WARN_ON(type->total_size != 0); 2698f7a6605SBenjamin Herrenschmidt type->cnt = 1; 2708f7a6605SBenjamin Herrenschmidt type->regions[0].base = 0; 2718f7a6605SBenjamin Herrenschmidt type->regions[0].size = 0; 27266a20757STang Chen type->regions[0].flags = 0; 2737c0caeb8STejun Heo memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 2748f7a6605SBenjamin Herrenschmidt } 27595f72d1eSYinghai Lu } 27695f72d1eSYinghai Lu 277354f17e1SPhilipp Hachtmann #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK 278354f17e1SPhilipp Hachtmann 27929f67386SYinghai Lu phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( 28029f67386SYinghai Lu phys_addr_t *addr) 28129f67386SYinghai Lu { 28229f67386SYinghai Lu if (memblock.reserved.regions == memblock_reserved_init_regions) 28329f67386SYinghai Lu return 0; 28429f67386SYinghai Lu 28529f67386SYinghai Lu *addr = __pa(memblock.reserved.regions); 28629f67386SYinghai Lu 28729f67386SYinghai Lu return PAGE_ALIGN(sizeof(struct memblock_region) * 28829f67386SYinghai Lu memblock.reserved.max); 28929f67386SYinghai Lu } 29029f67386SYinghai Lu 2915e270e25SPhilipp Hachtmann phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info( 2925e270e25SPhilipp Hachtmann phys_addr_t *addr) 2935e270e25SPhilipp Hachtmann { 2945e270e25SPhilipp Hachtmann if (memblock.memory.regions == memblock_memory_init_regions) 2955e270e25SPhilipp Hachtmann return 0; 2965e270e25SPhilipp Hachtmann 2975e270e25SPhilipp Hachtmann *addr = __pa(memblock.memory.regions); 2985e270e25SPhilipp Hachtmann 2995e270e25SPhilipp Hachtmann return PAGE_ALIGN(sizeof(struct memblock_region) * 3005e270e25SPhilipp Hachtmann memblock.memory.max); 3015e270e25SPhilipp Hachtmann } 3025e270e25SPhilipp Hachtmann 3035e270e25SPhilipp Hachtmann #endif 3045e270e25SPhilipp Hachtmann 30548c3b583SGreg Pearson /** 30648c3b583SGreg Pearson * memblock_double_array - double the size of the memblock regions array 30748c3b583SGreg Pearson * @type: memblock type of the regions array being doubled 30848c3b583SGreg Pearson * @new_area_start: starting address of memory range to avoid overlap with 30948c3b583SGreg Pearson * @new_area_size: size of memory range to avoid overlap with 31048c3b583SGreg Pearson * 31148c3b583SGreg Pearson * Double the size of the @type regions array. If memblock is being used to 31248c3b583SGreg Pearson * allocate memory for a new reserved regions array and there is a previously 31348c3b583SGreg Pearson * allocated memory range [@new_area_start,@new_area_start+@new_area_size] 31448c3b583SGreg Pearson * waiting to be reserved, ensure the memory used by the new array does 31548c3b583SGreg Pearson * not overlap. 31648c3b583SGreg Pearson * 31748c3b583SGreg Pearson * RETURNS: 31848c3b583SGreg Pearson * 0 on success, -1 on failure. 31948c3b583SGreg Pearson */ 32048c3b583SGreg Pearson static int __init_memblock memblock_double_array(struct memblock_type *type, 32148c3b583SGreg Pearson phys_addr_t new_area_start, 32248c3b583SGreg Pearson phys_addr_t new_area_size) 323142b45a7SBenjamin Herrenschmidt { 324142b45a7SBenjamin Herrenschmidt struct memblock_region *new_array, *old_array; 32529f67386SYinghai Lu phys_addr_t old_alloc_size, new_alloc_size; 326142b45a7SBenjamin Herrenschmidt phys_addr_t old_size, new_size, addr; 327142b45a7SBenjamin Herrenschmidt int use_slab = slab_is_available(); 328181eb394SGavin Shan int *in_slab; 329142b45a7SBenjamin Herrenschmidt 330142b45a7SBenjamin Herrenschmidt /* We don't allow resizing until we know about the reserved regions 331142b45a7SBenjamin Herrenschmidt * of memory that aren't suitable for allocation 332142b45a7SBenjamin Herrenschmidt */ 333142b45a7SBenjamin Herrenschmidt if (!memblock_can_resize) 334142b45a7SBenjamin Herrenschmidt return -1; 335142b45a7SBenjamin Herrenschmidt 336142b45a7SBenjamin Herrenschmidt /* Calculate new doubled size */ 337142b45a7SBenjamin Herrenschmidt old_size = type->max * sizeof(struct memblock_region); 338142b45a7SBenjamin Herrenschmidt new_size = old_size << 1; 33929f67386SYinghai Lu /* 34029f67386SYinghai Lu * We need to allocated new one align to PAGE_SIZE, 34129f67386SYinghai Lu * so we can free them completely later. 34229f67386SYinghai Lu */ 34329f67386SYinghai Lu old_alloc_size = PAGE_ALIGN(old_size); 34429f67386SYinghai Lu new_alloc_size = PAGE_ALIGN(new_size); 345142b45a7SBenjamin Herrenschmidt 346181eb394SGavin Shan /* Retrieve the slab flag */ 347181eb394SGavin Shan if (type == &memblock.memory) 348181eb394SGavin Shan in_slab = &memblock_memory_in_slab; 349181eb394SGavin Shan else 350181eb394SGavin Shan in_slab = &memblock_reserved_in_slab; 351181eb394SGavin Shan 352142b45a7SBenjamin Herrenschmidt /* Try to find some space for it. 353142b45a7SBenjamin Herrenschmidt * 354142b45a7SBenjamin Herrenschmidt * WARNING: We assume that either slab_is_available() and we use it or 355fd07383bSAndrew Morton * we use MEMBLOCK for allocations. That means that this is unsafe to 356fd07383bSAndrew Morton * use when bootmem is currently active (unless bootmem itself is 357fd07383bSAndrew Morton * implemented on top of MEMBLOCK which isn't the case yet) 358142b45a7SBenjamin Herrenschmidt * 359142b45a7SBenjamin Herrenschmidt * This should however not be an issue for now, as we currently only 360fd07383bSAndrew Morton * call into MEMBLOCK while it's still active, or much later when slab 361fd07383bSAndrew Morton * is active for memory hotplug operations 362142b45a7SBenjamin Herrenschmidt */ 363142b45a7SBenjamin Herrenschmidt if (use_slab) { 364142b45a7SBenjamin Herrenschmidt new_array = kmalloc(new_size, GFP_KERNEL); 3651f5026a7STejun Heo addr = new_array ? __pa(new_array) : 0; 3664e2f0775SGavin Shan } else { 36748c3b583SGreg Pearson /* only exclude range when trying to double reserved.regions */ 36848c3b583SGreg Pearson if (type != &memblock.reserved) 36948c3b583SGreg Pearson new_area_start = new_area_size = 0; 37048c3b583SGreg Pearson 37148c3b583SGreg Pearson addr = memblock_find_in_range(new_area_start + new_area_size, 37248c3b583SGreg Pearson memblock.current_limit, 37329f67386SYinghai Lu new_alloc_size, PAGE_SIZE); 37448c3b583SGreg Pearson if (!addr && new_area_size) 37548c3b583SGreg Pearson addr = memblock_find_in_range(0, 37648c3b583SGreg Pearson min(new_area_start, memblock.current_limit), 37729f67386SYinghai Lu new_alloc_size, PAGE_SIZE); 37848c3b583SGreg Pearson 37915674868SSachin Kamat new_array = addr ? __va(addr) : NULL; 3804e2f0775SGavin Shan } 3811f5026a7STejun Heo if (!addr) { 382142b45a7SBenjamin Herrenschmidt pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 383142b45a7SBenjamin Herrenschmidt memblock_type_name(type), type->max, type->max * 2); 384142b45a7SBenjamin Herrenschmidt return -1; 385142b45a7SBenjamin Herrenschmidt } 386142b45a7SBenjamin Herrenschmidt 387fd07383bSAndrew Morton memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]", 388fd07383bSAndrew Morton memblock_type_name(type), type->max * 2, (u64)addr, 389fd07383bSAndrew Morton (u64)addr + new_size - 1); 390ea9e4376SYinghai Lu 391fd07383bSAndrew Morton /* 392fd07383bSAndrew Morton * Found space, we now need to move the array over before we add the 393fd07383bSAndrew Morton * reserved region since it may be our reserved array itself that is 394fd07383bSAndrew Morton * full. 395142b45a7SBenjamin Herrenschmidt */ 396142b45a7SBenjamin Herrenschmidt memcpy(new_array, type->regions, old_size); 397142b45a7SBenjamin Herrenschmidt memset(new_array + type->max, 0, old_size); 398142b45a7SBenjamin Herrenschmidt old_array = type->regions; 399142b45a7SBenjamin Herrenschmidt type->regions = new_array; 400142b45a7SBenjamin Herrenschmidt type->max <<= 1; 401142b45a7SBenjamin Herrenschmidt 402fd07383bSAndrew Morton /* Free old array. We needn't free it if the array is the static one */ 403181eb394SGavin Shan if (*in_slab) 404181eb394SGavin Shan kfree(old_array); 405181eb394SGavin Shan else if (old_array != memblock_memory_init_regions && 406142b45a7SBenjamin Herrenschmidt old_array != memblock_reserved_init_regions) 40729f67386SYinghai Lu memblock_free(__pa(old_array), old_alloc_size); 408142b45a7SBenjamin Herrenschmidt 409fd07383bSAndrew Morton /* 410fd07383bSAndrew Morton * Reserve the new array if that comes from the memblock. Otherwise, we 411fd07383bSAndrew Morton * needn't do it 412181eb394SGavin Shan */ 413181eb394SGavin Shan if (!use_slab) 41429f67386SYinghai Lu BUG_ON(memblock_reserve(addr, new_alloc_size)); 415181eb394SGavin Shan 416181eb394SGavin Shan /* Update slab flag */ 417181eb394SGavin Shan *in_slab = use_slab; 418181eb394SGavin Shan 419142b45a7SBenjamin Herrenschmidt return 0; 420142b45a7SBenjamin Herrenschmidt } 421142b45a7SBenjamin Herrenschmidt 422784656f9STejun Heo /** 423784656f9STejun Heo * memblock_merge_regions - merge neighboring compatible regions 424784656f9STejun Heo * @type: memblock type to scan 425784656f9STejun Heo * 426784656f9STejun Heo * Scan @type and merge neighboring compatible regions. 427784656f9STejun Heo */ 428784656f9STejun Heo static void __init_memblock memblock_merge_regions(struct memblock_type *type) 429784656f9STejun Heo { 430784656f9STejun Heo int i = 0; 431784656f9STejun Heo 432784656f9STejun Heo /* cnt never goes below 1 */ 433784656f9STejun Heo while (i < type->cnt - 1) { 434784656f9STejun Heo struct memblock_region *this = &type->regions[i]; 435784656f9STejun Heo struct memblock_region *next = &type->regions[i + 1]; 436784656f9STejun Heo 4377c0caeb8STejun Heo if (this->base + this->size != next->base || 4387c0caeb8STejun Heo memblock_get_region_node(this) != 43966a20757STang Chen memblock_get_region_node(next) || 44066a20757STang Chen this->flags != next->flags) { 441784656f9STejun Heo BUG_ON(this->base + this->size > next->base); 442784656f9STejun Heo i++; 443784656f9STejun Heo continue; 444784656f9STejun Heo } 445784656f9STejun Heo 446784656f9STejun Heo this->size += next->size; 447c0232ae8SLin Feng /* move forward from next + 1, index of which is i + 2 */ 448c0232ae8SLin Feng memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); 449784656f9STejun Heo type->cnt--; 450784656f9STejun Heo } 451784656f9STejun Heo } 452784656f9STejun Heo 453784656f9STejun Heo /** 454784656f9STejun Heo * memblock_insert_region - insert new memblock region 455784656f9STejun Heo * @type: memblock type to insert into 456784656f9STejun Heo * @idx: index for the insertion point 457784656f9STejun Heo * @base: base address of the new region 458784656f9STejun Heo * @size: size of the new region 459209ff86dSTang Chen * @nid: node id of the new region 46066a20757STang Chen * @flags: flags of the new region 461784656f9STejun Heo * 462784656f9STejun Heo * Insert new memblock region [@base,@base+@size) into @type at @idx. 463784656f9STejun Heo * @type must already have extra room to accomodate the new region. 464784656f9STejun Heo */ 465784656f9STejun Heo static void __init_memblock memblock_insert_region(struct memblock_type *type, 466784656f9STejun Heo int idx, phys_addr_t base, 46766a20757STang Chen phys_addr_t size, 46866a20757STang Chen int nid, unsigned long flags) 469784656f9STejun Heo { 470784656f9STejun Heo struct memblock_region *rgn = &type->regions[idx]; 471784656f9STejun Heo 472784656f9STejun Heo BUG_ON(type->cnt >= type->max); 473784656f9STejun Heo memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 474784656f9STejun Heo rgn->base = base; 475784656f9STejun Heo rgn->size = size; 47666a20757STang Chen rgn->flags = flags; 4777c0caeb8STejun Heo memblock_set_region_node(rgn, nid); 478784656f9STejun Heo type->cnt++; 4791440c4e2STejun Heo type->total_size += size; 480784656f9STejun Heo } 481784656f9STejun Heo 482784656f9STejun Heo /** 483f1af9d3aSPhilipp Hachtmann * memblock_add_range - add new memblock region 484784656f9STejun Heo * @type: memblock type to add new region into 485784656f9STejun Heo * @base: base address of the new region 486784656f9STejun Heo * @size: size of the new region 4877fb0bc3fSTejun Heo * @nid: nid of the new region 48866a20757STang Chen * @flags: flags of the new region 489784656f9STejun Heo * 490784656f9STejun Heo * Add new memblock region [@base,@base+@size) into @type. The new region 491784656f9STejun Heo * is allowed to overlap with existing ones - overlaps don't affect already 492784656f9STejun Heo * existing regions. @type is guaranteed to be minimal (all neighbouring 493784656f9STejun Heo * compatible regions are merged) after the addition. 494784656f9STejun Heo * 495784656f9STejun Heo * RETURNS: 496784656f9STejun Heo * 0 on success, -errno on failure. 497784656f9STejun Heo */ 498f1af9d3aSPhilipp Hachtmann int __init_memblock memblock_add_range(struct memblock_type *type, 49966a20757STang Chen phys_addr_t base, phys_addr_t size, 50066a20757STang Chen int nid, unsigned long flags) 50195f72d1eSYinghai Lu { 502784656f9STejun Heo bool insert = false; 503eb18f1b5STejun Heo phys_addr_t obase = base; 504eb18f1b5STejun Heo phys_addr_t end = base + memblock_cap_size(base, &size); 505784656f9STejun Heo int i, nr_new; 50695f72d1eSYinghai Lu 507b3dc627cSTejun Heo if (!size) 508b3dc627cSTejun Heo return 0; 509b3dc627cSTejun Heo 510784656f9STejun Heo /* special case for empty array */ 511784656f9STejun Heo if (type->regions[0].size == 0) { 5121440c4e2STejun Heo WARN_ON(type->cnt != 1 || type->total_size); 513784656f9STejun Heo type->regions[0].base = base; 514784656f9STejun Heo type->regions[0].size = size; 51566a20757STang Chen type->regions[0].flags = flags; 5167fb0bc3fSTejun Heo memblock_set_region_node(&type->regions[0], nid); 5171440c4e2STejun Heo type->total_size = size; 518784656f9STejun Heo return 0; 519784656f9STejun Heo } 520784656f9STejun Heo repeat: 521784656f9STejun Heo /* 522784656f9STejun Heo * The following is executed twice. Once with %false @insert and 523784656f9STejun Heo * then with %true. The first counts the number of regions needed 524784656f9STejun Heo * to accomodate the new area. The second actually inserts them. 525784656f9STejun Heo */ 526784656f9STejun Heo base = obase; 527784656f9STejun Heo nr_new = 0; 528784656f9STejun Heo 5298f7a6605SBenjamin Herrenschmidt for (i = 0; i < type->cnt; i++) { 5308f7a6605SBenjamin Herrenschmidt struct memblock_region *rgn = &type->regions[i]; 531784656f9STejun Heo phys_addr_t rbase = rgn->base; 532784656f9STejun Heo phys_addr_t rend = rbase + rgn->size; 5338f7a6605SBenjamin Herrenschmidt 534784656f9STejun Heo if (rbase >= end) 5358f7a6605SBenjamin Herrenschmidt break; 536784656f9STejun Heo if (rend <= base) 537784656f9STejun Heo continue; 538784656f9STejun Heo /* 539784656f9STejun Heo * @rgn overlaps. If it separates the lower part of new 540784656f9STejun Heo * area, insert that portion. 5418f7a6605SBenjamin Herrenschmidt */ 542784656f9STejun Heo if (rbase > base) { 543784656f9STejun Heo nr_new++; 544784656f9STejun Heo if (insert) 545784656f9STejun Heo memblock_insert_region(type, i++, base, 54666a20757STang Chen rbase - base, nid, 54766a20757STang Chen flags); 548784656f9STejun Heo } 549784656f9STejun Heo /* area below @rend is dealt with, forget about it */ 550784656f9STejun Heo base = min(rend, end); 5518f7a6605SBenjamin Herrenschmidt } 5528f7a6605SBenjamin Herrenschmidt 553784656f9STejun Heo /* insert the remaining portion */ 554784656f9STejun Heo if (base < end) { 555784656f9STejun Heo nr_new++; 556784656f9STejun Heo if (insert) 55766a20757STang Chen memblock_insert_region(type, i, base, end - base, 55866a20757STang Chen nid, flags); 5598f7a6605SBenjamin Herrenschmidt } 5608f7a6605SBenjamin Herrenschmidt 561784656f9STejun Heo /* 562784656f9STejun Heo * If this was the first round, resize array and repeat for actual 563784656f9STejun Heo * insertions; otherwise, merge and return. 5648f7a6605SBenjamin Herrenschmidt */ 565784656f9STejun Heo if (!insert) { 566784656f9STejun Heo while (type->cnt + nr_new > type->max) 56748c3b583SGreg Pearson if (memblock_double_array(type, obase, size) < 0) 568784656f9STejun Heo return -ENOMEM; 569784656f9STejun Heo insert = true; 570784656f9STejun Heo goto repeat; 57195f72d1eSYinghai Lu } else { 572784656f9STejun Heo memblock_merge_regions(type); 57395f72d1eSYinghai Lu return 0; 57495f72d1eSYinghai Lu } 575784656f9STejun Heo } 57695f72d1eSYinghai Lu 5777fb0bc3fSTejun Heo int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 5787fb0bc3fSTejun Heo int nid) 5797fb0bc3fSTejun Heo { 580f1af9d3aSPhilipp Hachtmann return memblock_add_range(&memblock.memory, base, size, nid, 0); 5817fb0bc3fSTejun Heo } 5827fb0bc3fSTejun Heo 583581adcbeSTejun Heo int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 58495f72d1eSYinghai Lu { 585f1af9d3aSPhilipp Hachtmann return memblock_add_range(&memblock.memory, base, size, 58666a20757STang Chen MAX_NUMNODES, 0); 58795f72d1eSYinghai Lu } 58895f72d1eSYinghai Lu 5896a9ceb31STejun Heo /** 5906a9ceb31STejun Heo * memblock_isolate_range - isolate given range into disjoint memblocks 5916a9ceb31STejun Heo * @type: memblock type to isolate range for 5926a9ceb31STejun Heo * @base: base of range to isolate 5936a9ceb31STejun Heo * @size: size of range to isolate 5946a9ceb31STejun Heo * @start_rgn: out parameter for the start of isolated region 5956a9ceb31STejun Heo * @end_rgn: out parameter for the end of isolated region 5966a9ceb31STejun Heo * 5976a9ceb31STejun Heo * Walk @type and ensure that regions don't cross the boundaries defined by 5986a9ceb31STejun Heo * [@base,@base+@size). Crossing regions are split at the boundaries, 5996a9ceb31STejun Heo * which may create at most two more regions. The index of the first 6006a9ceb31STejun Heo * region inside the range is returned in *@start_rgn and end in *@end_rgn. 6016a9ceb31STejun Heo * 6026a9ceb31STejun Heo * RETURNS: 6036a9ceb31STejun Heo * 0 on success, -errno on failure. 6046a9ceb31STejun Heo */ 6056a9ceb31STejun Heo static int __init_memblock memblock_isolate_range(struct memblock_type *type, 6066a9ceb31STejun Heo phys_addr_t base, phys_addr_t size, 6076a9ceb31STejun Heo int *start_rgn, int *end_rgn) 6086a9ceb31STejun Heo { 609eb18f1b5STejun Heo phys_addr_t end = base + memblock_cap_size(base, &size); 6106a9ceb31STejun Heo int i; 6116a9ceb31STejun Heo 6126a9ceb31STejun Heo *start_rgn = *end_rgn = 0; 6136a9ceb31STejun Heo 614b3dc627cSTejun Heo if (!size) 615b3dc627cSTejun Heo return 0; 616b3dc627cSTejun Heo 6176a9ceb31STejun Heo /* we'll create at most two more regions */ 6186a9ceb31STejun Heo while (type->cnt + 2 > type->max) 61948c3b583SGreg Pearson if (memblock_double_array(type, base, size) < 0) 6206a9ceb31STejun Heo return -ENOMEM; 6216a9ceb31STejun Heo 6226a9ceb31STejun Heo for (i = 0; i < type->cnt; i++) { 6236a9ceb31STejun Heo struct memblock_region *rgn = &type->regions[i]; 6246a9ceb31STejun Heo phys_addr_t rbase = rgn->base; 6256a9ceb31STejun Heo phys_addr_t rend = rbase + rgn->size; 6266a9ceb31STejun Heo 6276a9ceb31STejun Heo if (rbase >= end) 6286a9ceb31STejun Heo break; 6296a9ceb31STejun Heo if (rend <= base) 6306a9ceb31STejun Heo continue; 6316a9ceb31STejun Heo 6326a9ceb31STejun Heo if (rbase < base) { 6336a9ceb31STejun Heo /* 6346a9ceb31STejun Heo * @rgn intersects from below. Split and continue 6356a9ceb31STejun Heo * to process the next region - the new top half. 6366a9ceb31STejun Heo */ 6376a9ceb31STejun Heo rgn->base = base; 6381440c4e2STejun Heo rgn->size -= base - rbase; 6391440c4e2STejun Heo type->total_size -= base - rbase; 6406a9ceb31STejun Heo memblock_insert_region(type, i, rbase, base - rbase, 64166a20757STang Chen memblock_get_region_node(rgn), 64266a20757STang Chen rgn->flags); 6436a9ceb31STejun Heo } else if (rend > end) { 6446a9ceb31STejun Heo /* 6456a9ceb31STejun Heo * @rgn intersects from above. Split and redo the 6466a9ceb31STejun Heo * current region - the new bottom half. 6476a9ceb31STejun Heo */ 6486a9ceb31STejun Heo rgn->base = end; 6491440c4e2STejun Heo rgn->size -= end - rbase; 6501440c4e2STejun Heo type->total_size -= end - rbase; 6516a9ceb31STejun Heo memblock_insert_region(type, i--, rbase, end - rbase, 65266a20757STang Chen memblock_get_region_node(rgn), 65366a20757STang Chen rgn->flags); 6546a9ceb31STejun Heo } else { 6556a9ceb31STejun Heo /* @rgn is fully contained, record it */ 6566a9ceb31STejun Heo if (!*end_rgn) 6576a9ceb31STejun Heo *start_rgn = i; 6586a9ceb31STejun Heo *end_rgn = i + 1; 6596a9ceb31STejun Heo } 6606a9ceb31STejun Heo } 6616a9ceb31STejun Heo 6626a9ceb31STejun Heo return 0; 6636a9ceb31STejun Heo } 6646a9ceb31STejun Heo 665f1af9d3aSPhilipp Hachtmann int __init_memblock memblock_remove_range(struct memblock_type *type, 6668f7a6605SBenjamin Herrenschmidt phys_addr_t base, phys_addr_t size) 66795f72d1eSYinghai Lu { 66871936180STejun Heo int start_rgn, end_rgn; 66971936180STejun Heo int i, ret; 67095f72d1eSYinghai Lu 67171936180STejun Heo ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 67271936180STejun Heo if (ret) 67371936180STejun Heo return ret; 67495f72d1eSYinghai Lu 67571936180STejun Heo for (i = end_rgn - 1; i >= start_rgn; i--) 67671936180STejun Heo memblock_remove_region(type, i); 67795f72d1eSYinghai Lu return 0; 67895f72d1eSYinghai Lu } 67995f72d1eSYinghai Lu 680581adcbeSTejun Heo int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 68195f72d1eSYinghai Lu { 682f1af9d3aSPhilipp Hachtmann return memblock_remove_range(&memblock.memory, base, size); 68395f72d1eSYinghai Lu } 68495f72d1eSYinghai Lu 685f1af9d3aSPhilipp Hachtmann 686581adcbeSTejun Heo int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 68795f72d1eSYinghai Lu { 68824aa0788STejun Heo memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", 689a150439cSH. Peter Anvin (unsigned long long)base, 690931d13f5SGrygorii Strashko (unsigned long long)base + size - 1, 691a150439cSH. Peter Anvin (void *)_RET_IP_); 69224aa0788STejun Heo 693aedf95eaSCatalin Marinas kmemleak_free_part(__va(base), size); 694f1af9d3aSPhilipp Hachtmann return memblock_remove_range(&memblock.reserved, base, size); 69595f72d1eSYinghai Lu } 69695f72d1eSYinghai Lu 69766a20757STang Chen static int __init_memblock memblock_reserve_region(phys_addr_t base, 69866a20757STang Chen phys_addr_t size, 69966a20757STang Chen int nid, 70066a20757STang Chen unsigned long flags) 70195f72d1eSYinghai Lu { 702e3239ff9SBenjamin Herrenschmidt struct memblock_type *_rgn = &memblock.reserved; 70395f72d1eSYinghai Lu 70466a20757STang Chen memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n", 705a150439cSH. Peter Anvin (unsigned long long)base, 706931d13f5SGrygorii Strashko (unsigned long long)base + size - 1, 70766a20757STang Chen flags, (void *)_RET_IP_); 70895f72d1eSYinghai Lu 709f1af9d3aSPhilipp Hachtmann return memblock_add_range(_rgn, base, size, nid, flags); 71066a20757STang Chen } 71166a20757STang Chen 71266a20757STang Chen int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 71366a20757STang Chen { 71466a20757STang Chen return memblock_reserve_region(base, size, MAX_NUMNODES, 0); 71595f72d1eSYinghai Lu } 71695f72d1eSYinghai Lu 71735fd0808STejun Heo /** 71866b16edfSTang Chen * 719*4308ce17STony Luck * This function isolates region [@base, @base + @size), and sets/clears flag 72066b16edfSTang Chen * 72166b16edfSTang Chen * Return 0 on succees, -errno on failure. 72266b16edfSTang Chen */ 723*4308ce17STony Luck static int __init_memblock memblock_setclr_flag(phys_addr_t base, 724*4308ce17STony Luck phys_addr_t size, int set, int flag) 72566b16edfSTang Chen { 72666b16edfSTang Chen struct memblock_type *type = &memblock.memory; 72766b16edfSTang Chen int i, ret, start_rgn, end_rgn; 72866b16edfSTang Chen 72966b16edfSTang Chen ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 73066b16edfSTang Chen if (ret) 73166b16edfSTang Chen return ret; 73266b16edfSTang Chen 73366b16edfSTang Chen for (i = start_rgn; i < end_rgn; i++) 734*4308ce17STony Luck if (set) 735*4308ce17STony Luck memblock_set_region_flags(&type->regions[i], flag); 736*4308ce17STony Luck else 737*4308ce17STony Luck memblock_clear_region_flags(&type->regions[i], flag); 73866b16edfSTang Chen 73966b16edfSTang Chen memblock_merge_regions(type); 74066b16edfSTang Chen return 0; 74166b16edfSTang Chen } 74266b16edfSTang Chen 74366b16edfSTang Chen /** 744*4308ce17STony Luck * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. 745*4308ce17STony Luck * @base: the base phys addr of the region 746*4308ce17STony Luck * @size: the size of the region 747*4308ce17STony Luck * 748*4308ce17STony Luck * Return 0 on succees, -errno on failure. 749*4308ce17STony Luck */ 750*4308ce17STony Luck int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) 751*4308ce17STony Luck { 752*4308ce17STony Luck return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); 753*4308ce17STony Luck } 754*4308ce17STony Luck 755*4308ce17STony Luck /** 75666b16edfSTang Chen * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. 75766b16edfSTang Chen * @base: the base phys addr of the region 75866b16edfSTang Chen * @size: the size of the region 75966b16edfSTang Chen * 76066b16edfSTang Chen * Return 0 on succees, -errno on failure. 76166b16edfSTang Chen */ 76266b16edfSTang Chen int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) 76366b16edfSTang Chen { 764*4308ce17STony Luck return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); 76566b16edfSTang Chen } 76666b16edfSTang Chen 76766b16edfSTang Chen /** 768f1af9d3aSPhilipp Hachtmann * __next__mem_range - next function for for_each_free_mem_range() etc. 76935fd0808STejun Heo * @idx: pointer to u64 loop variable 770b1154233SGrygorii Strashko * @nid: node selector, %NUMA_NO_NODE for all nodes 771f1af9d3aSPhilipp Hachtmann * @type_a: pointer to memblock_type from where the range is taken 772f1af9d3aSPhilipp Hachtmann * @type_b: pointer to memblock_type which excludes memory from being taken 773dad7557eSWanpeng Li * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 774dad7557eSWanpeng Li * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 775dad7557eSWanpeng Li * @out_nid: ptr to int for nid of the range, can be %NULL 77635fd0808STejun Heo * 777f1af9d3aSPhilipp Hachtmann * Find the first area from *@idx which matches @nid, fill the out 77835fd0808STejun Heo * parameters, and update *@idx for the next iteration. The lower 32bit of 779f1af9d3aSPhilipp Hachtmann * *@idx contains index into type_a and the upper 32bit indexes the 780f1af9d3aSPhilipp Hachtmann * areas before each region in type_b. For example, if type_b regions 78135fd0808STejun Heo * look like the following, 78235fd0808STejun Heo * 78335fd0808STejun Heo * 0:[0-16), 1:[32-48), 2:[128-130) 78435fd0808STejun Heo * 78535fd0808STejun Heo * The upper 32bit indexes the following regions. 78635fd0808STejun Heo * 78735fd0808STejun Heo * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 78835fd0808STejun Heo * 78935fd0808STejun Heo * As both region arrays are sorted, the function advances the two indices 79035fd0808STejun Heo * in lockstep and returns each intersection. 79135fd0808STejun Heo */ 792f1af9d3aSPhilipp Hachtmann void __init_memblock __next_mem_range(u64 *idx, int nid, 793f1af9d3aSPhilipp Hachtmann struct memblock_type *type_a, 794f1af9d3aSPhilipp Hachtmann struct memblock_type *type_b, 79535fd0808STejun Heo phys_addr_t *out_start, 79635fd0808STejun Heo phys_addr_t *out_end, int *out_nid) 79735fd0808STejun Heo { 798f1af9d3aSPhilipp Hachtmann int idx_a = *idx & 0xffffffff; 799f1af9d3aSPhilipp Hachtmann int idx_b = *idx >> 32; 800b1154233SGrygorii Strashko 801f1af9d3aSPhilipp Hachtmann if (WARN_ONCE(nid == MAX_NUMNODES, 802f1af9d3aSPhilipp Hachtmann "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 803560dca27SGrygorii Strashko nid = NUMA_NO_NODE; 80435fd0808STejun Heo 805f1af9d3aSPhilipp Hachtmann for (; idx_a < type_a->cnt; idx_a++) { 806f1af9d3aSPhilipp Hachtmann struct memblock_region *m = &type_a->regions[idx_a]; 807f1af9d3aSPhilipp Hachtmann 80835fd0808STejun Heo phys_addr_t m_start = m->base; 80935fd0808STejun Heo phys_addr_t m_end = m->base + m->size; 810f1af9d3aSPhilipp Hachtmann int m_nid = memblock_get_region_node(m); 81135fd0808STejun Heo 81235fd0808STejun Heo /* only memory regions are associated with nodes, check it */ 813f1af9d3aSPhilipp Hachtmann if (nid != NUMA_NO_NODE && nid != m_nid) 81435fd0808STejun Heo continue; 81535fd0808STejun Heo 8160a313a99SXishi Qiu /* skip hotpluggable memory regions if needed */ 8170a313a99SXishi Qiu if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 8180a313a99SXishi Qiu continue; 8190a313a99SXishi Qiu 820f1af9d3aSPhilipp Hachtmann if (!type_b) { 821f1af9d3aSPhilipp Hachtmann if (out_start) 822f1af9d3aSPhilipp Hachtmann *out_start = m_start; 823f1af9d3aSPhilipp Hachtmann if (out_end) 824f1af9d3aSPhilipp Hachtmann *out_end = m_end; 825f1af9d3aSPhilipp Hachtmann if (out_nid) 826f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 827f1af9d3aSPhilipp Hachtmann idx_a++; 828f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 829f1af9d3aSPhilipp Hachtmann return; 830f1af9d3aSPhilipp Hachtmann } 83135fd0808STejun Heo 832f1af9d3aSPhilipp Hachtmann /* scan areas before each reservation */ 833f1af9d3aSPhilipp Hachtmann for (; idx_b < type_b->cnt + 1; idx_b++) { 834f1af9d3aSPhilipp Hachtmann struct memblock_region *r; 835f1af9d3aSPhilipp Hachtmann phys_addr_t r_start; 836f1af9d3aSPhilipp Hachtmann phys_addr_t r_end; 837f1af9d3aSPhilipp Hachtmann 838f1af9d3aSPhilipp Hachtmann r = &type_b->regions[idx_b]; 839f1af9d3aSPhilipp Hachtmann r_start = idx_b ? r[-1].base + r[-1].size : 0; 840f1af9d3aSPhilipp Hachtmann r_end = idx_b < type_b->cnt ? 841f1af9d3aSPhilipp Hachtmann r->base : ULLONG_MAX; 842f1af9d3aSPhilipp Hachtmann 843f1af9d3aSPhilipp Hachtmann /* 844f1af9d3aSPhilipp Hachtmann * if idx_b advanced past idx_a, 845f1af9d3aSPhilipp Hachtmann * break out to advance idx_a 846f1af9d3aSPhilipp Hachtmann */ 84735fd0808STejun Heo if (r_start >= m_end) 84835fd0808STejun Heo break; 84935fd0808STejun Heo /* if the two regions intersect, we're done */ 85035fd0808STejun Heo if (m_start < r_end) { 85135fd0808STejun Heo if (out_start) 852f1af9d3aSPhilipp Hachtmann *out_start = 853f1af9d3aSPhilipp Hachtmann max(m_start, r_start); 85435fd0808STejun Heo if (out_end) 85535fd0808STejun Heo *out_end = min(m_end, r_end); 85635fd0808STejun Heo if (out_nid) 857f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 85835fd0808STejun Heo /* 859f1af9d3aSPhilipp Hachtmann * The region which ends first is 860f1af9d3aSPhilipp Hachtmann * advanced for the next iteration. 86135fd0808STejun Heo */ 86235fd0808STejun Heo if (m_end <= r_end) 863f1af9d3aSPhilipp Hachtmann idx_a++; 86435fd0808STejun Heo else 865f1af9d3aSPhilipp Hachtmann idx_b++; 866f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 86735fd0808STejun Heo return; 86835fd0808STejun Heo } 86935fd0808STejun Heo } 87035fd0808STejun Heo } 87135fd0808STejun Heo 87235fd0808STejun Heo /* signal end of iteration */ 87335fd0808STejun Heo *idx = ULLONG_MAX; 87435fd0808STejun Heo } 87535fd0808STejun Heo 8767bd0b0f0STejun Heo /** 877f1af9d3aSPhilipp Hachtmann * __next_mem_range_rev - generic next function for for_each_*_range_rev() 878f1af9d3aSPhilipp Hachtmann * 879f1af9d3aSPhilipp Hachtmann * Finds the next range from type_a which is not marked as unsuitable 880f1af9d3aSPhilipp Hachtmann * in type_b. 881f1af9d3aSPhilipp Hachtmann * 8827bd0b0f0STejun Heo * @idx: pointer to u64 loop variable 883b1154233SGrygorii Strashko * @nid: nid: node selector, %NUMA_NO_NODE for all nodes 884f1af9d3aSPhilipp Hachtmann * @type_a: pointer to memblock_type from where the range is taken 885f1af9d3aSPhilipp Hachtmann * @type_b: pointer to memblock_type which excludes memory from being taken 886dad7557eSWanpeng Li * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 887dad7557eSWanpeng Li * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 888dad7557eSWanpeng Li * @out_nid: ptr to int for nid of the range, can be %NULL 8897bd0b0f0STejun Heo * 890f1af9d3aSPhilipp Hachtmann * Reverse of __next_mem_range(). 8917bd0b0f0STejun Heo */ 892f1af9d3aSPhilipp Hachtmann void __init_memblock __next_mem_range_rev(u64 *idx, int nid, 893f1af9d3aSPhilipp Hachtmann struct memblock_type *type_a, 894f1af9d3aSPhilipp Hachtmann struct memblock_type *type_b, 8957bd0b0f0STejun Heo phys_addr_t *out_start, 8967bd0b0f0STejun Heo phys_addr_t *out_end, int *out_nid) 8977bd0b0f0STejun Heo { 898f1af9d3aSPhilipp Hachtmann int idx_a = *idx & 0xffffffff; 899f1af9d3aSPhilipp Hachtmann int idx_b = *idx >> 32; 900b1154233SGrygorii Strashko 901560dca27SGrygorii Strashko if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 902560dca27SGrygorii Strashko nid = NUMA_NO_NODE; 9037bd0b0f0STejun Heo 9047bd0b0f0STejun Heo if (*idx == (u64)ULLONG_MAX) { 905f1af9d3aSPhilipp Hachtmann idx_a = type_a->cnt - 1; 906f1af9d3aSPhilipp Hachtmann idx_b = type_b->cnt; 9077bd0b0f0STejun Heo } 9087bd0b0f0STejun Heo 909f1af9d3aSPhilipp Hachtmann for (; idx_a >= 0; idx_a--) { 910f1af9d3aSPhilipp Hachtmann struct memblock_region *m = &type_a->regions[idx_a]; 911f1af9d3aSPhilipp Hachtmann 9127bd0b0f0STejun Heo phys_addr_t m_start = m->base; 9137bd0b0f0STejun Heo phys_addr_t m_end = m->base + m->size; 914f1af9d3aSPhilipp Hachtmann int m_nid = memblock_get_region_node(m); 9157bd0b0f0STejun Heo 9167bd0b0f0STejun Heo /* only memory regions are associated with nodes, check it */ 917f1af9d3aSPhilipp Hachtmann if (nid != NUMA_NO_NODE && nid != m_nid) 9187bd0b0f0STejun Heo continue; 9197bd0b0f0STejun Heo 92055ac590cSTang Chen /* skip hotpluggable memory regions if needed */ 92155ac590cSTang Chen if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 92255ac590cSTang Chen continue; 92355ac590cSTang Chen 924f1af9d3aSPhilipp Hachtmann if (!type_b) { 925f1af9d3aSPhilipp Hachtmann if (out_start) 926f1af9d3aSPhilipp Hachtmann *out_start = m_start; 927f1af9d3aSPhilipp Hachtmann if (out_end) 928f1af9d3aSPhilipp Hachtmann *out_end = m_end; 929f1af9d3aSPhilipp Hachtmann if (out_nid) 930f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 931f1af9d3aSPhilipp Hachtmann idx_a++; 932f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 933f1af9d3aSPhilipp Hachtmann return; 934f1af9d3aSPhilipp Hachtmann } 9357bd0b0f0STejun Heo 936f1af9d3aSPhilipp Hachtmann /* scan areas before each reservation */ 937f1af9d3aSPhilipp Hachtmann for (; idx_b >= 0; idx_b--) { 938f1af9d3aSPhilipp Hachtmann struct memblock_region *r; 939f1af9d3aSPhilipp Hachtmann phys_addr_t r_start; 940f1af9d3aSPhilipp Hachtmann phys_addr_t r_end; 941f1af9d3aSPhilipp Hachtmann 942f1af9d3aSPhilipp Hachtmann r = &type_b->regions[idx_b]; 943f1af9d3aSPhilipp Hachtmann r_start = idx_b ? r[-1].base + r[-1].size : 0; 944f1af9d3aSPhilipp Hachtmann r_end = idx_b < type_b->cnt ? 945f1af9d3aSPhilipp Hachtmann r->base : ULLONG_MAX; 946f1af9d3aSPhilipp Hachtmann /* 947f1af9d3aSPhilipp Hachtmann * if idx_b advanced past idx_a, 948f1af9d3aSPhilipp Hachtmann * break out to advance idx_a 949f1af9d3aSPhilipp Hachtmann */ 950f1af9d3aSPhilipp Hachtmann 9517bd0b0f0STejun Heo if (r_end <= m_start) 9527bd0b0f0STejun Heo break; 9537bd0b0f0STejun Heo /* if the two regions intersect, we're done */ 9547bd0b0f0STejun Heo if (m_end > r_start) { 9557bd0b0f0STejun Heo if (out_start) 9567bd0b0f0STejun Heo *out_start = max(m_start, r_start); 9577bd0b0f0STejun Heo if (out_end) 9587bd0b0f0STejun Heo *out_end = min(m_end, r_end); 9597bd0b0f0STejun Heo if (out_nid) 960f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 9617bd0b0f0STejun Heo if (m_start >= r_start) 962f1af9d3aSPhilipp Hachtmann idx_a--; 9637bd0b0f0STejun Heo else 964f1af9d3aSPhilipp Hachtmann idx_b--; 965f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 9667bd0b0f0STejun Heo return; 9677bd0b0f0STejun Heo } 9687bd0b0f0STejun Heo } 9697bd0b0f0STejun Heo } 970f1af9d3aSPhilipp Hachtmann /* signal end of iteration */ 9717bd0b0f0STejun Heo *idx = ULLONG_MAX; 9727bd0b0f0STejun Heo } 9737bd0b0f0STejun Heo 9747c0caeb8STejun Heo #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 9757c0caeb8STejun Heo /* 9767c0caeb8STejun Heo * Common iterator interface used to define for_each_mem_range(). 9777c0caeb8STejun Heo */ 9787c0caeb8STejun Heo void __init_memblock __next_mem_pfn_range(int *idx, int nid, 9797c0caeb8STejun Heo unsigned long *out_start_pfn, 9807c0caeb8STejun Heo unsigned long *out_end_pfn, int *out_nid) 9817c0caeb8STejun Heo { 9827c0caeb8STejun Heo struct memblock_type *type = &memblock.memory; 9837c0caeb8STejun Heo struct memblock_region *r; 9847c0caeb8STejun Heo 9857c0caeb8STejun Heo while (++*idx < type->cnt) { 9867c0caeb8STejun Heo r = &type->regions[*idx]; 9877c0caeb8STejun Heo 9887c0caeb8STejun Heo if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 9897c0caeb8STejun Heo continue; 9907c0caeb8STejun Heo if (nid == MAX_NUMNODES || nid == r->nid) 9917c0caeb8STejun Heo break; 9927c0caeb8STejun Heo } 9937c0caeb8STejun Heo if (*idx >= type->cnt) { 9947c0caeb8STejun Heo *idx = -1; 9957c0caeb8STejun Heo return; 9967c0caeb8STejun Heo } 9977c0caeb8STejun Heo 9987c0caeb8STejun Heo if (out_start_pfn) 9997c0caeb8STejun Heo *out_start_pfn = PFN_UP(r->base); 10007c0caeb8STejun Heo if (out_end_pfn) 10017c0caeb8STejun Heo *out_end_pfn = PFN_DOWN(r->base + r->size); 10027c0caeb8STejun Heo if (out_nid) 10037c0caeb8STejun Heo *out_nid = r->nid; 10047c0caeb8STejun Heo } 10057c0caeb8STejun Heo 10067c0caeb8STejun Heo /** 10077c0caeb8STejun Heo * memblock_set_node - set node ID on memblock regions 10087c0caeb8STejun Heo * @base: base of area to set node ID for 10097c0caeb8STejun Heo * @size: size of area to set node ID for 1010e7e8de59STang Chen * @type: memblock type to set node ID for 10117c0caeb8STejun Heo * @nid: node ID to set 10127c0caeb8STejun Heo * 1013e7e8de59STang Chen * Set the nid of memblock @type regions in [@base,@base+@size) to @nid. 10147c0caeb8STejun Heo * Regions which cross the area boundaries are split as necessary. 10157c0caeb8STejun Heo * 10167c0caeb8STejun Heo * RETURNS: 10177c0caeb8STejun Heo * 0 on success, -errno on failure. 10187c0caeb8STejun Heo */ 10197c0caeb8STejun Heo int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 1020e7e8de59STang Chen struct memblock_type *type, int nid) 10217c0caeb8STejun Heo { 10226a9ceb31STejun Heo int start_rgn, end_rgn; 10236a9ceb31STejun Heo int i, ret; 10247c0caeb8STejun Heo 10256a9ceb31STejun Heo ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 10266a9ceb31STejun Heo if (ret) 10276a9ceb31STejun Heo return ret; 10287c0caeb8STejun Heo 10296a9ceb31STejun Heo for (i = start_rgn; i < end_rgn; i++) 1030e9d24ad3SWanpeng Li memblock_set_region_node(&type->regions[i], nid); 10317c0caeb8STejun Heo 10327c0caeb8STejun Heo memblock_merge_regions(type); 10337c0caeb8STejun Heo return 0; 10347c0caeb8STejun Heo } 10357c0caeb8STejun Heo #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 10367c0caeb8STejun Heo 10372bfc2862SAkinobu Mita static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 10382bfc2862SAkinobu Mita phys_addr_t align, phys_addr_t start, 10392bfc2862SAkinobu Mita phys_addr_t end, int nid) 104095f72d1eSYinghai Lu { 10416ed311b2SBenjamin Herrenschmidt phys_addr_t found; 104295f72d1eSYinghai Lu 104379f40fabSGrygorii Strashko if (!align) 104479f40fabSGrygorii Strashko align = SMP_CACHE_BYTES; 104594f3d3afSVineet Gupta 10462bfc2862SAkinobu Mita found = memblock_find_in_range_node(size, align, start, end, nid); 1047aedf95eaSCatalin Marinas if (found && !memblock_reserve(found, size)) { 1048aedf95eaSCatalin Marinas /* 1049aedf95eaSCatalin Marinas * The min_count is set to 0 so that memblock allocations are 1050aedf95eaSCatalin Marinas * never reported as leaks. 1051aedf95eaSCatalin Marinas */ 1052aedf95eaSCatalin Marinas kmemleak_alloc(__va(found), size, 0, 0); 10536ed311b2SBenjamin Herrenschmidt return found; 1054aedf95eaSCatalin Marinas } 10556ed311b2SBenjamin Herrenschmidt return 0; 105695f72d1eSYinghai Lu } 105795f72d1eSYinghai Lu 10582bfc2862SAkinobu Mita phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, 10592bfc2862SAkinobu Mita phys_addr_t start, phys_addr_t end) 10602bfc2862SAkinobu Mita { 10612bfc2862SAkinobu Mita return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE); 10622bfc2862SAkinobu Mita } 10632bfc2862SAkinobu Mita 10642bfc2862SAkinobu Mita static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, 10652bfc2862SAkinobu Mita phys_addr_t align, phys_addr_t max_addr, 10662bfc2862SAkinobu Mita int nid) 10672bfc2862SAkinobu Mita { 10682bfc2862SAkinobu Mita return memblock_alloc_range_nid(size, align, 0, max_addr, nid); 10692bfc2862SAkinobu Mita } 10702bfc2862SAkinobu Mita 10717bd0b0f0STejun Heo phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) 10727bd0b0f0STejun Heo { 10737bd0b0f0STejun Heo return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 10747bd0b0f0STejun Heo } 10757bd0b0f0STejun Heo 10767bd0b0f0STejun Heo phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 10777bd0b0f0STejun Heo { 1078b1154233SGrygorii Strashko return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE); 10797bd0b0f0STejun Heo } 10807bd0b0f0STejun Heo 10816ed311b2SBenjamin Herrenschmidt phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 108295f72d1eSYinghai Lu { 10836ed311b2SBenjamin Herrenschmidt phys_addr_t alloc; 10846ed311b2SBenjamin Herrenschmidt 10856ed311b2SBenjamin Herrenschmidt alloc = __memblock_alloc_base(size, align, max_addr); 10866ed311b2SBenjamin Herrenschmidt 10876ed311b2SBenjamin Herrenschmidt if (alloc == 0) 10886ed311b2SBenjamin Herrenschmidt panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", 10896ed311b2SBenjamin Herrenschmidt (unsigned long long) size, (unsigned long long) max_addr); 10906ed311b2SBenjamin Herrenschmidt 10916ed311b2SBenjamin Herrenschmidt return alloc; 109295f72d1eSYinghai Lu } 109395f72d1eSYinghai Lu 10946ed311b2SBenjamin Herrenschmidt phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) 109595f72d1eSYinghai Lu { 10966ed311b2SBenjamin Herrenschmidt return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 109795f72d1eSYinghai Lu } 109895f72d1eSYinghai Lu 10999d1e2492SBenjamin Herrenschmidt phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 11009d1e2492SBenjamin Herrenschmidt { 11019d1e2492SBenjamin Herrenschmidt phys_addr_t res = memblock_alloc_nid(size, align, nid); 11029d1e2492SBenjamin Herrenschmidt 11039d1e2492SBenjamin Herrenschmidt if (res) 11049d1e2492SBenjamin Herrenschmidt return res; 110515fb0972STejun Heo return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 110695f72d1eSYinghai Lu } 110795f72d1eSYinghai Lu 110826f09e9bSSantosh Shilimkar /** 110926f09e9bSSantosh Shilimkar * memblock_virt_alloc_internal - allocate boot memory block 111026f09e9bSSantosh Shilimkar * @size: size of memory block to be allocated in bytes 111126f09e9bSSantosh Shilimkar * @align: alignment of the region and block's size 111226f09e9bSSantosh Shilimkar * @min_addr: the lower bound of the memory region to allocate (phys address) 111326f09e9bSSantosh Shilimkar * @max_addr: the upper bound of the memory region to allocate (phys address) 111426f09e9bSSantosh Shilimkar * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 111526f09e9bSSantosh Shilimkar * 111626f09e9bSSantosh Shilimkar * The @min_addr limit is dropped if it can not be satisfied and the allocation 111726f09e9bSSantosh Shilimkar * will fall back to memory below @min_addr. Also, allocation may fall back 111826f09e9bSSantosh Shilimkar * to any node in the system if the specified node can not 111926f09e9bSSantosh Shilimkar * hold the requested memory. 112026f09e9bSSantosh Shilimkar * 112126f09e9bSSantosh Shilimkar * The allocation is performed from memory region limited by 112226f09e9bSSantosh Shilimkar * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. 112326f09e9bSSantosh Shilimkar * 112426f09e9bSSantosh Shilimkar * The memory block is aligned on SMP_CACHE_BYTES if @align == 0. 112526f09e9bSSantosh Shilimkar * 112626f09e9bSSantosh Shilimkar * The phys address of allocated boot memory block is converted to virtual and 112726f09e9bSSantosh Shilimkar * allocated memory is reset to 0. 112826f09e9bSSantosh Shilimkar * 112926f09e9bSSantosh Shilimkar * In addition, function sets the min_count to 0 using kmemleak_alloc for 113026f09e9bSSantosh Shilimkar * allocated boot memory block, so that it is never reported as leaks. 113126f09e9bSSantosh Shilimkar * 113226f09e9bSSantosh Shilimkar * RETURNS: 113326f09e9bSSantosh Shilimkar * Virtual address of allocated memory block on success, NULL on failure. 113426f09e9bSSantosh Shilimkar */ 113526f09e9bSSantosh Shilimkar static void * __init memblock_virt_alloc_internal( 113626f09e9bSSantosh Shilimkar phys_addr_t size, phys_addr_t align, 113726f09e9bSSantosh Shilimkar phys_addr_t min_addr, phys_addr_t max_addr, 113826f09e9bSSantosh Shilimkar int nid) 113926f09e9bSSantosh Shilimkar { 114026f09e9bSSantosh Shilimkar phys_addr_t alloc; 114126f09e9bSSantosh Shilimkar void *ptr; 114226f09e9bSSantosh Shilimkar 1143560dca27SGrygorii Strashko if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1144560dca27SGrygorii Strashko nid = NUMA_NO_NODE; 114526f09e9bSSantosh Shilimkar 114626f09e9bSSantosh Shilimkar /* 114726f09e9bSSantosh Shilimkar * Detect any accidental use of these APIs after slab is ready, as at 114826f09e9bSSantosh Shilimkar * this moment memblock may be deinitialized already and its 114926f09e9bSSantosh Shilimkar * internal data may be destroyed (after execution of free_all_bootmem) 115026f09e9bSSantosh Shilimkar */ 115126f09e9bSSantosh Shilimkar if (WARN_ON_ONCE(slab_is_available())) 115226f09e9bSSantosh Shilimkar return kzalloc_node(size, GFP_NOWAIT, nid); 115326f09e9bSSantosh Shilimkar 115426f09e9bSSantosh Shilimkar if (!align) 115526f09e9bSSantosh Shilimkar align = SMP_CACHE_BYTES; 115626f09e9bSSantosh Shilimkar 1157f544e14fSYinghai Lu if (max_addr > memblock.current_limit) 1158f544e14fSYinghai Lu max_addr = memblock.current_limit; 1159f544e14fSYinghai Lu 116026f09e9bSSantosh Shilimkar again: 116126f09e9bSSantosh Shilimkar alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, 116226f09e9bSSantosh Shilimkar nid); 116326f09e9bSSantosh Shilimkar if (alloc) 116426f09e9bSSantosh Shilimkar goto done; 116526f09e9bSSantosh Shilimkar 116626f09e9bSSantosh Shilimkar if (nid != NUMA_NO_NODE) { 116726f09e9bSSantosh Shilimkar alloc = memblock_find_in_range_node(size, align, min_addr, 116826f09e9bSSantosh Shilimkar max_addr, NUMA_NO_NODE); 116926f09e9bSSantosh Shilimkar if (alloc) 117026f09e9bSSantosh Shilimkar goto done; 117126f09e9bSSantosh Shilimkar } 117226f09e9bSSantosh Shilimkar 117326f09e9bSSantosh Shilimkar if (min_addr) { 117426f09e9bSSantosh Shilimkar min_addr = 0; 117526f09e9bSSantosh Shilimkar goto again; 117626f09e9bSSantosh Shilimkar } else { 117726f09e9bSSantosh Shilimkar goto error; 117826f09e9bSSantosh Shilimkar } 117926f09e9bSSantosh Shilimkar 118026f09e9bSSantosh Shilimkar done: 118126f09e9bSSantosh Shilimkar memblock_reserve(alloc, size); 118226f09e9bSSantosh Shilimkar ptr = phys_to_virt(alloc); 118326f09e9bSSantosh Shilimkar memset(ptr, 0, size); 118426f09e9bSSantosh Shilimkar 118526f09e9bSSantosh Shilimkar /* 118626f09e9bSSantosh Shilimkar * The min_count is set to 0 so that bootmem allocated blocks 118726f09e9bSSantosh Shilimkar * are never reported as leaks. This is because many of these blocks 118826f09e9bSSantosh Shilimkar * are only referred via the physical address which is not 118926f09e9bSSantosh Shilimkar * looked up by kmemleak. 119026f09e9bSSantosh Shilimkar */ 119126f09e9bSSantosh Shilimkar kmemleak_alloc(ptr, size, 0, 0); 119226f09e9bSSantosh Shilimkar 119326f09e9bSSantosh Shilimkar return ptr; 119426f09e9bSSantosh Shilimkar 119526f09e9bSSantosh Shilimkar error: 119626f09e9bSSantosh Shilimkar return NULL; 119726f09e9bSSantosh Shilimkar } 119826f09e9bSSantosh Shilimkar 119926f09e9bSSantosh Shilimkar /** 120026f09e9bSSantosh Shilimkar * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block 120126f09e9bSSantosh Shilimkar * @size: size of memory block to be allocated in bytes 120226f09e9bSSantosh Shilimkar * @align: alignment of the region and block's size 120326f09e9bSSantosh Shilimkar * @min_addr: the lower bound of the memory region from where the allocation 120426f09e9bSSantosh Shilimkar * is preferred (phys address) 120526f09e9bSSantosh Shilimkar * @max_addr: the upper bound of the memory region from where the allocation 120626f09e9bSSantosh Shilimkar * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 120726f09e9bSSantosh Shilimkar * allocate only from memory limited by memblock.current_limit value 120826f09e9bSSantosh Shilimkar * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 120926f09e9bSSantosh Shilimkar * 121026f09e9bSSantosh Shilimkar * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides 121126f09e9bSSantosh Shilimkar * additional debug information (including caller info), if enabled. 121226f09e9bSSantosh Shilimkar * 121326f09e9bSSantosh Shilimkar * RETURNS: 121426f09e9bSSantosh Shilimkar * Virtual address of allocated memory block on success, NULL on failure. 121526f09e9bSSantosh Shilimkar */ 121626f09e9bSSantosh Shilimkar void * __init memblock_virt_alloc_try_nid_nopanic( 121726f09e9bSSantosh Shilimkar phys_addr_t size, phys_addr_t align, 121826f09e9bSSantosh Shilimkar phys_addr_t min_addr, phys_addr_t max_addr, 121926f09e9bSSantosh Shilimkar int nid) 122026f09e9bSSantosh Shilimkar { 122126f09e9bSSantosh Shilimkar memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 122226f09e9bSSantosh Shilimkar __func__, (u64)size, (u64)align, nid, (u64)min_addr, 122326f09e9bSSantosh Shilimkar (u64)max_addr, (void *)_RET_IP_); 122426f09e9bSSantosh Shilimkar return memblock_virt_alloc_internal(size, align, min_addr, 122526f09e9bSSantosh Shilimkar max_addr, nid); 122626f09e9bSSantosh Shilimkar } 122726f09e9bSSantosh Shilimkar 122826f09e9bSSantosh Shilimkar /** 122926f09e9bSSantosh Shilimkar * memblock_virt_alloc_try_nid - allocate boot memory block with panicking 123026f09e9bSSantosh Shilimkar * @size: size of memory block to be allocated in bytes 123126f09e9bSSantosh Shilimkar * @align: alignment of the region and block's size 123226f09e9bSSantosh Shilimkar * @min_addr: the lower bound of the memory region from where the allocation 123326f09e9bSSantosh Shilimkar * is preferred (phys address) 123426f09e9bSSantosh Shilimkar * @max_addr: the upper bound of the memory region from where the allocation 123526f09e9bSSantosh Shilimkar * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 123626f09e9bSSantosh Shilimkar * allocate only from memory limited by memblock.current_limit value 123726f09e9bSSantosh Shilimkar * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 123826f09e9bSSantosh Shilimkar * 123926f09e9bSSantosh Shilimkar * Public panicking version of _memblock_virt_alloc_try_nid_nopanic() 124026f09e9bSSantosh Shilimkar * which provides debug information (including caller info), if enabled, 124126f09e9bSSantosh Shilimkar * and panics if the request can not be satisfied. 124226f09e9bSSantosh Shilimkar * 124326f09e9bSSantosh Shilimkar * RETURNS: 124426f09e9bSSantosh Shilimkar * Virtual address of allocated memory block on success, NULL on failure. 124526f09e9bSSantosh Shilimkar */ 124626f09e9bSSantosh Shilimkar void * __init memblock_virt_alloc_try_nid( 124726f09e9bSSantosh Shilimkar phys_addr_t size, phys_addr_t align, 124826f09e9bSSantosh Shilimkar phys_addr_t min_addr, phys_addr_t max_addr, 124926f09e9bSSantosh Shilimkar int nid) 125026f09e9bSSantosh Shilimkar { 125126f09e9bSSantosh Shilimkar void *ptr; 125226f09e9bSSantosh Shilimkar 125326f09e9bSSantosh Shilimkar memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 125426f09e9bSSantosh Shilimkar __func__, (u64)size, (u64)align, nid, (u64)min_addr, 125526f09e9bSSantosh Shilimkar (u64)max_addr, (void *)_RET_IP_); 125626f09e9bSSantosh Shilimkar ptr = memblock_virt_alloc_internal(size, align, 125726f09e9bSSantosh Shilimkar min_addr, max_addr, nid); 125826f09e9bSSantosh Shilimkar if (ptr) 125926f09e9bSSantosh Shilimkar return ptr; 126026f09e9bSSantosh Shilimkar 126126f09e9bSSantosh Shilimkar panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n", 126226f09e9bSSantosh Shilimkar __func__, (u64)size, (u64)align, nid, (u64)min_addr, 126326f09e9bSSantosh Shilimkar (u64)max_addr); 126426f09e9bSSantosh Shilimkar return NULL; 126526f09e9bSSantosh Shilimkar } 126626f09e9bSSantosh Shilimkar 126726f09e9bSSantosh Shilimkar /** 126826f09e9bSSantosh Shilimkar * __memblock_free_early - free boot memory block 126926f09e9bSSantosh Shilimkar * @base: phys starting address of the boot memory block 127026f09e9bSSantosh Shilimkar * @size: size of the boot memory block in bytes 127126f09e9bSSantosh Shilimkar * 127226f09e9bSSantosh Shilimkar * Free boot memory block previously allocated by memblock_virt_alloc_xx() API. 127326f09e9bSSantosh Shilimkar * The freeing memory will not be released to the buddy allocator. 127426f09e9bSSantosh Shilimkar */ 127526f09e9bSSantosh Shilimkar void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) 127626f09e9bSSantosh Shilimkar { 127726f09e9bSSantosh Shilimkar memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", 127826f09e9bSSantosh Shilimkar __func__, (u64)base, (u64)base + size - 1, 127926f09e9bSSantosh Shilimkar (void *)_RET_IP_); 128026f09e9bSSantosh Shilimkar kmemleak_free_part(__va(base), size); 1281f1af9d3aSPhilipp Hachtmann memblock_remove_range(&memblock.reserved, base, size); 128226f09e9bSSantosh Shilimkar } 128326f09e9bSSantosh Shilimkar 128426f09e9bSSantosh Shilimkar /* 128526f09e9bSSantosh Shilimkar * __memblock_free_late - free bootmem block pages directly to buddy allocator 128626f09e9bSSantosh Shilimkar * @addr: phys starting address of the boot memory block 128726f09e9bSSantosh Shilimkar * @size: size of the boot memory block in bytes 128826f09e9bSSantosh Shilimkar * 128926f09e9bSSantosh Shilimkar * This is only useful when the bootmem allocator has already been torn 129026f09e9bSSantosh Shilimkar * down, but we are still initializing the system. Pages are released directly 129126f09e9bSSantosh Shilimkar * to the buddy allocator, no bootmem metadata is updated because it is gone. 129226f09e9bSSantosh Shilimkar */ 129326f09e9bSSantosh Shilimkar void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) 129426f09e9bSSantosh Shilimkar { 129526f09e9bSSantosh Shilimkar u64 cursor, end; 129626f09e9bSSantosh Shilimkar 129726f09e9bSSantosh Shilimkar memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", 129826f09e9bSSantosh Shilimkar __func__, (u64)base, (u64)base + size - 1, 129926f09e9bSSantosh Shilimkar (void *)_RET_IP_); 130026f09e9bSSantosh Shilimkar kmemleak_free_part(__va(base), size); 130126f09e9bSSantosh Shilimkar cursor = PFN_UP(base); 130226f09e9bSSantosh Shilimkar end = PFN_DOWN(base + size); 130326f09e9bSSantosh Shilimkar 130426f09e9bSSantosh Shilimkar for (; cursor < end; cursor++) { 130526f09e9bSSantosh Shilimkar __free_pages_bootmem(pfn_to_page(cursor), 0); 130626f09e9bSSantosh Shilimkar totalram_pages++; 130726f09e9bSSantosh Shilimkar } 130826f09e9bSSantosh Shilimkar } 13099d1e2492SBenjamin Herrenschmidt 13109d1e2492SBenjamin Herrenschmidt /* 13119d1e2492SBenjamin Herrenschmidt * Remaining API functions 13129d1e2492SBenjamin Herrenschmidt */ 13139d1e2492SBenjamin Herrenschmidt 13142898cc4cSBenjamin Herrenschmidt phys_addr_t __init memblock_phys_mem_size(void) 131595f72d1eSYinghai Lu { 13161440c4e2STejun Heo return memblock.memory.total_size; 131795f72d1eSYinghai Lu } 131895f72d1eSYinghai Lu 1319595ad9afSYinghai Lu phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) 1320595ad9afSYinghai Lu { 1321595ad9afSYinghai Lu unsigned long pages = 0; 1322595ad9afSYinghai Lu struct memblock_region *r; 1323595ad9afSYinghai Lu unsigned long start_pfn, end_pfn; 1324595ad9afSYinghai Lu 1325595ad9afSYinghai Lu for_each_memblock(memory, r) { 1326595ad9afSYinghai Lu start_pfn = memblock_region_memory_base_pfn(r); 1327595ad9afSYinghai Lu end_pfn = memblock_region_memory_end_pfn(r); 1328595ad9afSYinghai Lu start_pfn = min_t(unsigned long, start_pfn, limit_pfn); 1329595ad9afSYinghai Lu end_pfn = min_t(unsigned long, end_pfn, limit_pfn); 1330595ad9afSYinghai Lu pages += end_pfn - start_pfn; 1331595ad9afSYinghai Lu } 1332595ad9afSYinghai Lu 133316763230SFabian Frederick return PFN_PHYS(pages); 1334595ad9afSYinghai Lu } 1335595ad9afSYinghai Lu 13360a93ebefSSam Ravnborg /* lowest address */ 13370a93ebefSSam Ravnborg phys_addr_t __init_memblock memblock_start_of_DRAM(void) 13380a93ebefSSam Ravnborg { 13390a93ebefSSam Ravnborg return memblock.memory.regions[0].base; 13400a93ebefSSam Ravnborg } 13410a93ebefSSam Ravnborg 134210d06439SYinghai Lu phys_addr_t __init_memblock memblock_end_of_DRAM(void) 134395f72d1eSYinghai Lu { 134495f72d1eSYinghai Lu int idx = memblock.memory.cnt - 1; 134595f72d1eSYinghai Lu 1346e3239ff9SBenjamin Herrenschmidt return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 134795f72d1eSYinghai Lu } 134895f72d1eSYinghai Lu 1349c0ce8fefSTejun Heo void __init memblock_enforce_memory_limit(phys_addr_t limit) 135095f72d1eSYinghai Lu { 1351c0ce8fefSTejun Heo phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; 1352136199f0SEmil Medve struct memblock_region *r; 135395f72d1eSYinghai Lu 1354c0ce8fefSTejun Heo if (!limit) 135595f72d1eSYinghai Lu return; 135695f72d1eSYinghai Lu 1357c0ce8fefSTejun Heo /* find out max address */ 1358136199f0SEmil Medve for_each_memblock(memory, r) { 1359c0ce8fefSTejun Heo if (limit <= r->size) { 1360c0ce8fefSTejun Heo max_addr = r->base + limit; 136195f72d1eSYinghai Lu break; 136295f72d1eSYinghai Lu } 1363c0ce8fefSTejun Heo limit -= r->size; 136495f72d1eSYinghai Lu } 1365c0ce8fefSTejun Heo 1366c0ce8fefSTejun Heo /* truncate both memory and reserved regions */ 1367f1af9d3aSPhilipp Hachtmann memblock_remove_range(&memblock.memory, max_addr, 1368f1af9d3aSPhilipp Hachtmann (phys_addr_t)ULLONG_MAX); 1369f1af9d3aSPhilipp Hachtmann memblock_remove_range(&memblock.reserved, max_addr, 1370f1af9d3aSPhilipp Hachtmann (phys_addr_t)ULLONG_MAX); 137195f72d1eSYinghai Lu } 137295f72d1eSYinghai Lu 1373cd79481dSYinghai Lu static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 137472d4b0b4SBenjamin Herrenschmidt { 137572d4b0b4SBenjamin Herrenschmidt unsigned int left = 0, right = type->cnt; 137672d4b0b4SBenjamin Herrenschmidt 137772d4b0b4SBenjamin Herrenschmidt do { 137872d4b0b4SBenjamin Herrenschmidt unsigned int mid = (right + left) / 2; 137972d4b0b4SBenjamin Herrenschmidt 138072d4b0b4SBenjamin Herrenschmidt if (addr < type->regions[mid].base) 138172d4b0b4SBenjamin Herrenschmidt right = mid; 138272d4b0b4SBenjamin Herrenschmidt else if (addr >= (type->regions[mid].base + 138372d4b0b4SBenjamin Herrenschmidt type->regions[mid].size)) 138472d4b0b4SBenjamin Herrenschmidt left = mid + 1; 138572d4b0b4SBenjamin Herrenschmidt else 138672d4b0b4SBenjamin Herrenschmidt return mid; 138772d4b0b4SBenjamin Herrenschmidt } while (left < right); 138872d4b0b4SBenjamin Herrenschmidt return -1; 138972d4b0b4SBenjamin Herrenschmidt } 139072d4b0b4SBenjamin Herrenschmidt 13912898cc4cSBenjamin Herrenschmidt int __init memblock_is_reserved(phys_addr_t addr) 139295f72d1eSYinghai Lu { 139372d4b0b4SBenjamin Herrenschmidt return memblock_search(&memblock.reserved, addr) != -1; 139495f72d1eSYinghai Lu } 139572d4b0b4SBenjamin Herrenschmidt 13963661ca66SYinghai Lu int __init_memblock memblock_is_memory(phys_addr_t addr) 139772d4b0b4SBenjamin Herrenschmidt { 139872d4b0b4SBenjamin Herrenschmidt return memblock_search(&memblock.memory, addr) != -1; 139972d4b0b4SBenjamin Herrenschmidt } 140072d4b0b4SBenjamin Herrenschmidt 1401e76b63f8SYinghai Lu #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1402e76b63f8SYinghai Lu int __init_memblock memblock_search_pfn_nid(unsigned long pfn, 1403e76b63f8SYinghai Lu unsigned long *start_pfn, unsigned long *end_pfn) 1404e76b63f8SYinghai Lu { 1405e76b63f8SYinghai Lu struct memblock_type *type = &memblock.memory; 140616763230SFabian Frederick int mid = memblock_search(type, PFN_PHYS(pfn)); 1407e76b63f8SYinghai Lu 1408e76b63f8SYinghai Lu if (mid == -1) 1409e76b63f8SYinghai Lu return -1; 1410e76b63f8SYinghai Lu 1411f7e2f7e8SFabian Frederick *start_pfn = PFN_DOWN(type->regions[mid].base); 1412f7e2f7e8SFabian Frederick *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); 1413e76b63f8SYinghai Lu 1414e76b63f8SYinghai Lu return type->regions[mid].nid; 1415e76b63f8SYinghai Lu } 1416e76b63f8SYinghai Lu #endif 1417e76b63f8SYinghai Lu 1418eab30949SStephen Boyd /** 1419eab30949SStephen Boyd * memblock_is_region_memory - check if a region is a subset of memory 1420eab30949SStephen Boyd * @base: base of region to check 1421eab30949SStephen Boyd * @size: size of region to check 1422eab30949SStephen Boyd * 1423eab30949SStephen Boyd * Check if the region [@base, @base+@size) is a subset of a memory block. 1424eab30949SStephen Boyd * 1425eab30949SStephen Boyd * RETURNS: 1426eab30949SStephen Boyd * 0 if false, non-zero if true 1427eab30949SStephen Boyd */ 14283661ca66SYinghai Lu int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 142972d4b0b4SBenjamin Herrenschmidt { 1430abb65272STomi Valkeinen int idx = memblock_search(&memblock.memory, base); 1431eb18f1b5STejun Heo phys_addr_t end = base + memblock_cap_size(base, &size); 143272d4b0b4SBenjamin Herrenschmidt 143372d4b0b4SBenjamin Herrenschmidt if (idx == -1) 143495f72d1eSYinghai Lu return 0; 1435abb65272STomi Valkeinen return memblock.memory.regions[idx].base <= base && 1436abb65272STomi Valkeinen (memblock.memory.regions[idx].base + 1437eb18f1b5STejun Heo memblock.memory.regions[idx].size) >= end; 143895f72d1eSYinghai Lu } 143995f72d1eSYinghai Lu 1440eab30949SStephen Boyd /** 1441eab30949SStephen Boyd * memblock_is_region_reserved - check if a region intersects reserved memory 1442eab30949SStephen Boyd * @base: base of region to check 1443eab30949SStephen Boyd * @size: size of region to check 1444eab30949SStephen Boyd * 1445eab30949SStephen Boyd * Check if the region [@base, @base+@size) intersects a reserved memory block. 1446eab30949SStephen Boyd * 1447eab30949SStephen Boyd * RETURNS: 1448eab30949SStephen Boyd * 0 if false, non-zero if true 1449eab30949SStephen Boyd */ 145010d06439SYinghai Lu int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 145195f72d1eSYinghai Lu { 1452eb18f1b5STejun Heo memblock_cap_size(base, &size); 1453f1c2c19cSBenjamin Herrenschmidt return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 145495f72d1eSYinghai Lu } 145595f72d1eSYinghai Lu 14566ede1fd3SYinghai Lu void __init_memblock memblock_trim_memory(phys_addr_t align) 14576ede1fd3SYinghai Lu { 14586ede1fd3SYinghai Lu phys_addr_t start, end, orig_start, orig_end; 1459136199f0SEmil Medve struct memblock_region *r; 14606ede1fd3SYinghai Lu 1461136199f0SEmil Medve for_each_memblock(memory, r) { 1462136199f0SEmil Medve orig_start = r->base; 1463136199f0SEmil Medve orig_end = r->base + r->size; 14646ede1fd3SYinghai Lu start = round_up(orig_start, align); 14656ede1fd3SYinghai Lu end = round_down(orig_end, align); 14666ede1fd3SYinghai Lu 14676ede1fd3SYinghai Lu if (start == orig_start && end == orig_end) 14686ede1fd3SYinghai Lu continue; 14696ede1fd3SYinghai Lu 14706ede1fd3SYinghai Lu if (start < end) { 1471136199f0SEmil Medve r->base = start; 1472136199f0SEmil Medve r->size = end - start; 14736ede1fd3SYinghai Lu } else { 1474136199f0SEmil Medve memblock_remove_region(&memblock.memory, 1475136199f0SEmil Medve r - memblock.memory.regions); 1476136199f0SEmil Medve r--; 14776ede1fd3SYinghai Lu } 14786ede1fd3SYinghai Lu } 14796ede1fd3SYinghai Lu } 1480e63075a3SBenjamin Herrenschmidt 14813661ca66SYinghai Lu void __init_memblock memblock_set_current_limit(phys_addr_t limit) 1482e63075a3SBenjamin Herrenschmidt { 1483e63075a3SBenjamin Herrenschmidt memblock.current_limit = limit; 1484e63075a3SBenjamin Herrenschmidt } 1485e63075a3SBenjamin Herrenschmidt 1486fec51014SLaura Abbott phys_addr_t __init_memblock memblock_get_current_limit(void) 1487fec51014SLaura Abbott { 1488fec51014SLaura Abbott return memblock.current_limit; 1489fec51014SLaura Abbott } 1490fec51014SLaura Abbott 14917c0caeb8STejun Heo static void __init_memblock memblock_dump(struct memblock_type *type, char *name) 14926ed311b2SBenjamin Herrenschmidt { 14936ed311b2SBenjamin Herrenschmidt unsigned long long base, size; 149466a20757STang Chen unsigned long flags; 14956ed311b2SBenjamin Herrenschmidt int i; 14966ed311b2SBenjamin Herrenschmidt 14977c0caeb8STejun Heo pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); 14986ed311b2SBenjamin Herrenschmidt 14997c0caeb8STejun Heo for (i = 0; i < type->cnt; i++) { 15007c0caeb8STejun Heo struct memblock_region *rgn = &type->regions[i]; 15017c0caeb8STejun Heo char nid_buf[32] = ""; 15026ed311b2SBenjamin Herrenschmidt 15037c0caeb8STejun Heo base = rgn->base; 15047c0caeb8STejun Heo size = rgn->size; 150566a20757STang Chen flags = rgn->flags; 15067c0caeb8STejun Heo #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 15077c0caeb8STejun Heo if (memblock_get_region_node(rgn) != MAX_NUMNODES) 15087c0caeb8STejun Heo snprintf(nid_buf, sizeof(nid_buf), " on node %d", 15097c0caeb8STejun Heo memblock_get_region_node(rgn)); 15107c0caeb8STejun Heo #endif 151166a20757STang Chen pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n", 151266a20757STang Chen name, i, base, base + size - 1, size, nid_buf, flags); 15136ed311b2SBenjamin Herrenschmidt } 15146ed311b2SBenjamin Herrenschmidt } 15156ed311b2SBenjamin Herrenschmidt 15164ff7b82fSTejun Heo void __init_memblock __memblock_dump_all(void) 15176ed311b2SBenjamin Herrenschmidt { 15186ed311b2SBenjamin Herrenschmidt pr_info("MEMBLOCK configuration:\n"); 15191440c4e2STejun Heo pr_info(" memory size = %#llx reserved size = %#llx\n", 15201440c4e2STejun Heo (unsigned long long)memblock.memory.total_size, 15211440c4e2STejun Heo (unsigned long long)memblock.reserved.total_size); 15226ed311b2SBenjamin Herrenschmidt 15236ed311b2SBenjamin Herrenschmidt memblock_dump(&memblock.memory, "memory"); 15246ed311b2SBenjamin Herrenschmidt memblock_dump(&memblock.reserved, "reserved"); 15256ed311b2SBenjamin Herrenschmidt } 15266ed311b2SBenjamin Herrenschmidt 15271aadc056STejun Heo void __init memblock_allow_resize(void) 15286ed311b2SBenjamin Herrenschmidt { 1529142b45a7SBenjamin Herrenschmidt memblock_can_resize = 1; 15306ed311b2SBenjamin Herrenschmidt } 15316ed311b2SBenjamin Herrenschmidt 15326ed311b2SBenjamin Herrenschmidt static int __init early_memblock(char *p) 15336ed311b2SBenjamin Herrenschmidt { 15346ed311b2SBenjamin Herrenschmidt if (p && strstr(p, "debug")) 15356ed311b2SBenjamin Herrenschmidt memblock_debug = 1; 15366ed311b2SBenjamin Herrenschmidt return 0; 15376ed311b2SBenjamin Herrenschmidt } 15386ed311b2SBenjamin Herrenschmidt early_param("memblock", early_memblock); 15396ed311b2SBenjamin Herrenschmidt 1540c378ddd5STejun Heo #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) 15416d03b885SBenjamin Herrenschmidt 15426d03b885SBenjamin Herrenschmidt static int memblock_debug_show(struct seq_file *m, void *private) 15436d03b885SBenjamin Herrenschmidt { 15446d03b885SBenjamin Herrenschmidt struct memblock_type *type = m->private; 15456d03b885SBenjamin Herrenschmidt struct memblock_region *reg; 15466d03b885SBenjamin Herrenschmidt int i; 15476d03b885SBenjamin Herrenschmidt 15486d03b885SBenjamin Herrenschmidt for (i = 0; i < type->cnt; i++) { 15496d03b885SBenjamin Herrenschmidt reg = &type->regions[i]; 15506d03b885SBenjamin Herrenschmidt seq_printf(m, "%4d: ", i); 15516d03b885SBenjamin Herrenschmidt if (sizeof(phys_addr_t) == 4) 15526d03b885SBenjamin Herrenschmidt seq_printf(m, "0x%08lx..0x%08lx\n", 15536d03b885SBenjamin Herrenschmidt (unsigned long)reg->base, 15546d03b885SBenjamin Herrenschmidt (unsigned long)(reg->base + reg->size - 1)); 15556d03b885SBenjamin Herrenschmidt else 15566d03b885SBenjamin Herrenschmidt seq_printf(m, "0x%016llx..0x%016llx\n", 15576d03b885SBenjamin Herrenschmidt (unsigned long long)reg->base, 15586d03b885SBenjamin Herrenschmidt (unsigned long long)(reg->base + reg->size - 1)); 15596d03b885SBenjamin Herrenschmidt 15606d03b885SBenjamin Herrenschmidt } 15616d03b885SBenjamin Herrenschmidt return 0; 15626d03b885SBenjamin Herrenschmidt } 15636d03b885SBenjamin Herrenschmidt 15646d03b885SBenjamin Herrenschmidt static int memblock_debug_open(struct inode *inode, struct file *file) 15656d03b885SBenjamin Herrenschmidt { 15666d03b885SBenjamin Herrenschmidt return single_open(file, memblock_debug_show, inode->i_private); 15676d03b885SBenjamin Herrenschmidt } 15686d03b885SBenjamin Herrenschmidt 15696d03b885SBenjamin Herrenschmidt static const struct file_operations memblock_debug_fops = { 15706d03b885SBenjamin Herrenschmidt .open = memblock_debug_open, 15716d03b885SBenjamin Herrenschmidt .read = seq_read, 15726d03b885SBenjamin Herrenschmidt .llseek = seq_lseek, 15736d03b885SBenjamin Herrenschmidt .release = single_release, 15746d03b885SBenjamin Herrenschmidt }; 15756d03b885SBenjamin Herrenschmidt 15766d03b885SBenjamin Herrenschmidt static int __init memblock_init_debugfs(void) 15776d03b885SBenjamin Herrenschmidt { 15786d03b885SBenjamin Herrenschmidt struct dentry *root = debugfs_create_dir("memblock", NULL); 15796d03b885SBenjamin Herrenschmidt if (!root) 15806d03b885SBenjamin Herrenschmidt return -ENXIO; 15816d03b885SBenjamin Herrenschmidt debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); 15826d03b885SBenjamin Herrenschmidt debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); 158370210ed9SPhilipp Hachtmann #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 158470210ed9SPhilipp Hachtmann debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops); 158570210ed9SPhilipp Hachtmann #endif 15866d03b885SBenjamin Herrenschmidt 15876d03b885SBenjamin Herrenschmidt return 0; 15886d03b885SBenjamin Herrenschmidt } 15896d03b885SBenjamin Herrenschmidt __initcall(memblock_init_debugfs); 15906d03b885SBenjamin Herrenschmidt 15916d03b885SBenjamin Herrenschmidt #endif /* CONFIG_DEBUG_FS */ 1592