195f72d1eSYinghai Lu /* 295f72d1eSYinghai Lu * Procedures for maintaining information about logical memory blocks. 395f72d1eSYinghai Lu * 495f72d1eSYinghai Lu * Peter Bergner, IBM Corp. June 2001. 595f72d1eSYinghai Lu * Copyright (C) 2001 Peter Bergner. 695f72d1eSYinghai Lu * 795f72d1eSYinghai Lu * This program is free software; you can redistribute it and/or 895f72d1eSYinghai Lu * modify it under the terms of the GNU General Public License 995f72d1eSYinghai Lu * as published by the Free Software Foundation; either version 1095f72d1eSYinghai Lu * 2 of the License, or (at your option) any later version. 1195f72d1eSYinghai Lu */ 1295f72d1eSYinghai Lu 1395f72d1eSYinghai Lu #include <linux/kernel.h> 14142b45a7SBenjamin Herrenschmidt #include <linux/slab.h> 1595f72d1eSYinghai Lu #include <linux/init.h> 1695f72d1eSYinghai Lu #include <linux/bitops.h> 17449e8df3SBenjamin Herrenschmidt #include <linux/poison.h> 18c196f76fSBenjamin Herrenschmidt #include <linux/pfn.h> 196d03b885SBenjamin Herrenschmidt #include <linux/debugfs.h> 206d03b885SBenjamin Herrenschmidt #include <linux/seq_file.h> 2195f72d1eSYinghai Lu #include <linux/memblock.h> 2295f72d1eSYinghai Lu 2379442ed1STang Chen #include <asm-generic/sections.h> 2426f09e9bSSantosh Shilimkar #include <linux/io.h> 2526f09e9bSSantosh Shilimkar 2626f09e9bSSantosh Shilimkar #include "internal.h" 2779442ed1STang Chen 28fe091c20STejun Heo static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 29fe091c20STejun Heo static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 3070210ed9SPhilipp Hachtmann #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 3170210ed9SPhilipp Hachtmann static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; 3270210ed9SPhilipp Hachtmann #endif 33fe091c20STejun Heo 34fe091c20STejun Heo struct memblock memblock __initdata_memblock = { 35fe091c20STejun Heo .memory.regions = memblock_memory_init_regions, 36fe091c20STejun Heo .memory.cnt = 1, /* empty dummy entry */ 37fe091c20STejun Heo .memory.max = INIT_MEMBLOCK_REGIONS, 38fe091c20STejun Heo 39fe091c20STejun Heo .reserved.regions = memblock_reserved_init_regions, 40fe091c20STejun Heo .reserved.cnt = 1, /* empty dummy entry */ 41fe091c20STejun Heo .reserved.max = INIT_MEMBLOCK_REGIONS, 42fe091c20STejun Heo 4370210ed9SPhilipp Hachtmann #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 4470210ed9SPhilipp Hachtmann .physmem.regions = memblock_physmem_init_regions, 4570210ed9SPhilipp Hachtmann .physmem.cnt = 1, /* empty dummy entry */ 4670210ed9SPhilipp Hachtmann .physmem.max = INIT_PHYSMEM_REGIONS, 4770210ed9SPhilipp Hachtmann #endif 4870210ed9SPhilipp Hachtmann 4979442ed1STang Chen .bottom_up = false, 50fe091c20STejun Heo .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 51fe091c20STejun Heo }; 5295f72d1eSYinghai Lu 5310d06439SYinghai Lu int memblock_debug __initdata_memblock; 5455ac590cSTang Chen #ifdef CONFIG_MOVABLE_NODE 5555ac590cSTang Chen bool movable_node_enabled __initdata_memblock = false; 5655ac590cSTang Chen #endif 571aadc056STejun Heo static int memblock_can_resize __initdata_memblock; 58181eb394SGavin Shan static int memblock_memory_in_slab __initdata_memblock = 0; 59181eb394SGavin Shan static int memblock_reserved_in_slab __initdata_memblock = 0; 6095f72d1eSYinghai Lu 61142b45a7SBenjamin Herrenschmidt /* inline so we don't get a warning when pr_debug is compiled out */ 62c2233116SRaghavendra D Prabhu static __init_memblock const char * 63c2233116SRaghavendra D Prabhu memblock_type_name(struct memblock_type *type) 64142b45a7SBenjamin Herrenschmidt { 65142b45a7SBenjamin Herrenschmidt if (type == &memblock.memory) 66142b45a7SBenjamin Herrenschmidt return "memory"; 67142b45a7SBenjamin Herrenschmidt else if (type == &memblock.reserved) 68142b45a7SBenjamin Herrenschmidt return "reserved"; 69142b45a7SBenjamin Herrenschmidt else 70142b45a7SBenjamin Herrenschmidt return "unknown"; 71142b45a7SBenjamin Herrenschmidt } 72142b45a7SBenjamin Herrenschmidt 73eb18f1b5STejun Heo /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 74eb18f1b5STejun Heo static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 75eb18f1b5STejun Heo { 76eb18f1b5STejun Heo return *size = min(*size, (phys_addr_t)ULLONG_MAX - base); 77eb18f1b5STejun Heo } 78eb18f1b5STejun Heo 796ed311b2SBenjamin Herrenschmidt /* 806ed311b2SBenjamin Herrenschmidt * Address comparison utilities 816ed311b2SBenjamin Herrenschmidt */ 8210d06439SYinghai Lu static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 832898cc4cSBenjamin Herrenschmidt phys_addr_t base2, phys_addr_t size2) 8495f72d1eSYinghai Lu { 8595f72d1eSYinghai Lu return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 8695f72d1eSYinghai Lu } 8795f72d1eSYinghai Lu 882d7d3eb2SH Hartley Sweeten static long __init_memblock memblock_overlaps_region(struct memblock_type *type, 892d7d3eb2SH Hartley Sweeten phys_addr_t base, phys_addr_t size) 906ed311b2SBenjamin Herrenschmidt { 916ed311b2SBenjamin Herrenschmidt unsigned long i; 926ed311b2SBenjamin Herrenschmidt 936ed311b2SBenjamin Herrenschmidt for (i = 0; i < type->cnt; i++) { 946ed311b2SBenjamin Herrenschmidt phys_addr_t rgnbase = type->regions[i].base; 956ed311b2SBenjamin Herrenschmidt phys_addr_t rgnsize = type->regions[i].size; 966ed311b2SBenjamin Herrenschmidt if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) 976ed311b2SBenjamin Herrenschmidt break; 986ed311b2SBenjamin Herrenschmidt } 996ed311b2SBenjamin Herrenschmidt 1006ed311b2SBenjamin Herrenschmidt return (i < type->cnt) ? i : -1; 1016ed311b2SBenjamin Herrenschmidt } 1026ed311b2SBenjamin Herrenschmidt 10379442ed1STang Chen /* 10479442ed1STang Chen * __memblock_find_range_bottom_up - find free area utility in bottom-up 10579442ed1STang Chen * @start: start of candidate range 10679442ed1STang Chen * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 10779442ed1STang Chen * @size: size of free area to find 10879442ed1STang Chen * @align: alignment of free area to find 109b1154233SGrygorii Strashko * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 11079442ed1STang Chen * 11179442ed1STang Chen * Utility called from memblock_find_in_range_node(), find free area bottom-up. 11279442ed1STang Chen * 11379442ed1STang Chen * RETURNS: 11479442ed1STang Chen * Found address on success, 0 on failure. 11579442ed1STang Chen */ 11679442ed1STang Chen static phys_addr_t __init_memblock 11779442ed1STang Chen __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 11879442ed1STang Chen phys_addr_t size, phys_addr_t align, int nid) 11979442ed1STang Chen { 12079442ed1STang Chen phys_addr_t this_start, this_end, cand; 12179442ed1STang Chen u64 i; 12279442ed1STang Chen 12379442ed1STang Chen for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) { 12479442ed1STang Chen this_start = clamp(this_start, start, end); 12579442ed1STang Chen this_end = clamp(this_end, start, end); 12679442ed1STang Chen 12779442ed1STang Chen cand = round_up(this_start, align); 12879442ed1STang Chen if (cand < this_end && this_end - cand >= size) 12979442ed1STang Chen return cand; 13079442ed1STang Chen } 13179442ed1STang Chen 13279442ed1STang Chen return 0; 13379442ed1STang Chen } 13479442ed1STang Chen 1357bd0b0f0STejun Heo /** 1361402899eSTang Chen * __memblock_find_range_top_down - find free area utility, in top-down 1371402899eSTang Chen * @start: start of candidate range 1381402899eSTang Chen * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 1391402899eSTang Chen * @size: size of free area to find 1401402899eSTang Chen * @align: alignment of free area to find 141b1154233SGrygorii Strashko * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1421402899eSTang Chen * 1431402899eSTang Chen * Utility called from memblock_find_in_range_node(), find free area top-down. 1441402899eSTang Chen * 1451402899eSTang Chen * RETURNS: 14679442ed1STang Chen * Found address on success, 0 on failure. 1471402899eSTang Chen */ 1481402899eSTang Chen static phys_addr_t __init_memblock 1491402899eSTang Chen __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 1501402899eSTang Chen phys_addr_t size, phys_addr_t align, int nid) 1511402899eSTang Chen { 1521402899eSTang Chen phys_addr_t this_start, this_end, cand; 1531402899eSTang Chen u64 i; 1541402899eSTang Chen 1551402899eSTang Chen for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { 1561402899eSTang Chen this_start = clamp(this_start, start, end); 1571402899eSTang Chen this_end = clamp(this_end, start, end); 1581402899eSTang Chen 1591402899eSTang Chen if (this_end < size) 1601402899eSTang Chen continue; 1611402899eSTang Chen 1621402899eSTang Chen cand = round_down(this_end - size, align); 1631402899eSTang Chen if (cand >= this_start) 1641402899eSTang Chen return cand; 1651402899eSTang Chen } 1661402899eSTang Chen 1671402899eSTang Chen return 0; 1681402899eSTang Chen } 1691402899eSTang Chen 1701402899eSTang Chen /** 1717bd0b0f0STejun Heo * memblock_find_in_range_node - find free area in given range and node 1727bd0b0f0STejun Heo * @size: size of free area to find 1737bd0b0f0STejun Heo * @align: alignment of free area to find 17487029ee9SGrygorii Strashko * @start: start of candidate range 17587029ee9SGrygorii Strashko * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 176b1154233SGrygorii Strashko * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1777bd0b0f0STejun Heo * 1787bd0b0f0STejun Heo * Find @size free area aligned to @align in the specified range and node. 1797bd0b0f0STejun Heo * 18079442ed1STang Chen * When allocation direction is bottom-up, the @start should be greater 18179442ed1STang Chen * than the end of the kernel image. Otherwise, it will be trimmed. The 18279442ed1STang Chen * reason is that we want the bottom-up allocation just near the kernel 18379442ed1STang Chen * image so it is highly likely that the allocated memory and the kernel 18479442ed1STang Chen * will reside in the same node. 18579442ed1STang Chen * 18679442ed1STang Chen * If bottom-up allocation failed, will try to allocate memory top-down. 18779442ed1STang Chen * 1887bd0b0f0STejun Heo * RETURNS: 18979442ed1STang Chen * Found address on success, 0 on failure. 1906ed311b2SBenjamin Herrenschmidt */ 19187029ee9SGrygorii Strashko phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 19287029ee9SGrygorii Strashko phys_addr_t align, phys_addr_t start, 19387029ee9SGrygorii Strashko phys_addr_t end, int nid) 194f7210e6cSTang Chen { 1950cfb8f0cSTang Chen phys_addr_t kernel_end, ret; 19679442ed1STang Chen 197f7210e6cSTang Chen /* pump up @end */ 198f7210e6cSTang Chen if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 199f7210e6cSTang Chen end = memblock.current_limit; 200f7210e6cSTang Chen 201f7210e6cSTang Chen /* avoid allocating the first page */ 202f7210e6cSTang Chen start = max_t(phys_addr_t, start, PAGE_SIZE); 203f7210e6cSTang Chen end = max(start, end); 20479442ed1STang Chen kernel_end = __pa_symbol(_end); 20579442ed1STang Chen 20679442ed1STang Chen /* 20779442ed1STang Chen * try bottom-up allocation only when bottom-up mode 20879442ed1STang Chen * is set and @end is above the kernel image. 20979442ed1STang Chen */ 21079442ed1STang Chen if (memblock_bottom_up() && end > kernel_end) { 21179442ed1STang Chen phys_addr_t bottom_up_start; 21279442ed1STang Chen 21379442ed1STang Chen /* make sure we will allocate above the kernel */ 21479442ed1STang Chen bottom_up_start = max(start, kernel_end); 21579442ed1STang Chen 21679442ed1STang Chen /* ok, try bottom-up allocation first */ 21779442ed1STang Chen ret = __memblock_find_range_bottom_up(bottom_up_start, end, 21879442ed1STang Chen size, align, nid); 21979442ed1STang Chen if (ret) 22079442ed1STang Chen return ret; 22179442ed1STang Chen 22279442ed1STang Chen /* 22379442ed1STang Chen * we always limit bottom-up allocation above the kernel, 22479442ed1STang Chen * but top-down allocation doesn't have the limit, so 22579442ed1STang Chen * retrying top-down allocation may succeed when bottom-up 22679442ed1STang Chen * allocation failed. 22779442ed1STang Chen * 22879442ed1STang Chen * bottom-up allocation is expected to be fail very rarely, 22979442ed1STang Chen * so we use WARN_ONCE() here to see the stack trace if 23079442ed1STang Chen * fail happens. 23179442ed1STang Chen */ 23279442ed1STang Chen WARN_ONCE(1, "memblock: bottom-up allocation failed, " 23379442ed1STang Chen "memory hotunplug may be affected\n"); 23479442ed1STang Chen } 235f7210e6cSTang Chen 2361402899eSTang Chen return __memblock_find_range_top_down(start, end, size, align, nid); 237f7210e6cSTang Chen } 2386ed311b2SBenjamin Herrenschmidt 2397bd0b0f0STejun Heo /** 2407bd0b0f0STejun Heo * memblock_find_in_range - find free area in given range 2417bd0b0f0STejun Heo * @start: start of candidate range 2427bd0b0f0STejun Heo * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 2437bd0b0f0STejun Heo * @size: size of free area to find 2447bd0b0f0STejun Heo * @align: alignment of free area to find 2457bd0b0f0STejun Heo * 2467bd0b0f0STejun Heo * Find @size free area aligned to @align in the specified range. 2477bd0b0f0STejun Heo * 2487bd0b0f0STejun Heo * RETURNS: 24979442ed1STang Chen * Found address on success, 0 on failure. 2507bd0b0f0STejun Heo */ 2517bd0b0f0STejun Heo phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 2527bd0b0f0STejun Heo phys_addr_t end, phys_addr_t size, 2537bd0b0f0STejun Heo phys_addr_t align) 2547bd0b0f0STejun Heo { 25587029ee9SGrygorii Strashko return memblock_find_in_range_node(size, align, start, end, 256b1154233SGrygorii Strashko NUMA_NO_NODE); 2577bd0b0f0STejun Heo } 2587bd0b0f0STejun Heo 25910d06439SYinghai Lu static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 26095f72d1eSYinghai Lu { 2611440c4e2STejun Heo type->total_size -= type->regions[r].size; 2627c0caeb8STejun Heo memmove(&type->regions[r], &type->regions[r + 1], 2637c0caeb8STejun Heo (type->cnt - (r + 1)) * sizeof(type->regions[r])); 264e3239ff9SBenjamin Herrenschmidt type->cnt--; 26595f72d1eSYinghai Lu 2668f7a6605SBenjamin Herrenschmidt /* Special case for empty arrays */ 2678f7a6605SBenjamin Herrenschmidt if (type->cnt == 0) { 2681440c4e2STejun Heo WARN_ON(type->total_size != 0); 2698f7a6605SBenjamin Herrenschmidt type->cnt = 1; 2708f7a6605SBenjamin Herrenschmidt type->regions[0].base = 0; 2718f7a6605SBenjamin Herrenschmidt type->regions[0].size = 0; 27266a20757STang Chen type->regions[0].flags = 0; 2737c0caeb8STejun Heo memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 2748f7a6605SBenjamin Herrenschmidt } 27595f72d1eSYinghai Lu } 27695f72d1eSYinghai Lu 277354f17e1SPhilipp Hachtmann #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK 278354f17e1SPhilipp Hachtmann 27929f67386SYinghai Lu phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( 28029f67386SYinghai Lu phys_addr_t *addr) 28129f67386SYinghai Lu { 28229f67386SYinghai Lu if (memblock.reserved.regions == memblock_reserved_init_regions) 28329f67386SYinghai Lu return 0; 28429f67386SYinghai Lu 28529f67386SYinghai Lu *addr = __pa(memblock.reserved.regions); 28629f67386SYinghai Lu 28729f67386SYinghai Lu return PAGE_ALIGN(sizeof(struct memblock_region) * 28829f67386SYinghai Lu memblock.reserved.max); 28929f67386SYinghai Lu } 29029f67386SYinghai Lu 2915e270e25SPhilipp Hachtmann phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info( 2925e270e25SPhilipp Hachtmann phys_addr_t *addr) 2935e270e25SPhilipp Hachtmann { 2945e270e25SPhilipp Hachtmann if (memblock.memory.regions == memblock_memory_init_regions) 2955e270e25SPhilipp Hachtmann return 0; 2965e270e25SPhilipp Hachtmann 2975e270e25SPhilipp Hachtmann *addr = __pa(memblock.memory.regions); 2985e270e25SPhilipp Hachtmann 2995e270e25SPhilipp Hachtmann return PAGE_ALIGN(sizeof(struct memblock_region) * 3005e270e25SPhilipp Hachtmann memblock.memory.max); 3015e270e25SPhilipp Hachtmann } 3025e270e25SPhilipp Hachtmann 3035e270e25SPhilipp Hachtmann #endif 3045e270e25SPhilipp Hachtmann 30548c3b583SGreg Pearson /** 30648c3b583SGreg Pearson * memblock_double_array - double the size of the memblock regions array 30748c3b583SGreg Pearson * @type: memblock type of the regions array being doubled 30848c3b583SGreg Pearson * @new_area_start: starting address of memory range to avoid overlap with 30948c3b583SGreg Pearson * @new_area_size: size of memory range to avoid overlap with 31048c3b583SGreg Pearson * 31148c3b583SGreg Pearson * Double the size of the @type regions array. If memblock is being used to 31248c3b583SGreg Pearson * allocate memory for a new reserved regions array and there is a previously 31348c3b583SGreg Pearson * allocated memory range [@new_area_start,@new_area_start+@new_area_size] 31448c3b583SGreg Pearson * waiting to be reserved, ensure the memory used by the new array does 31548c3b583SGreg Pearson * not overlap. 31648c3b583SGreg Pearson * 31748c3b583SGreg Pearson * RETURNS: 31848c3b583SGreg Pearson * 0 on success, -1 on failure. 31948c3b583SGreg Pearson */ 32048c3b583SGreg Pearson static int __init_memblock memblock_double_array(struct memblock_type *type, 32148c3b583SGreg Pearson phys_addr_t new_area_start, 32248c3b583SGreg Pearson phys_addr_t new_area_size) 323142b45a7SBenjamin Herrenschmidt { 324142b45a7SBenjamin Herrenschmidt struct memblock_region *new_array, *old_array; 32529f67386SYinghai Lu phys_addr_t old_alloc_size, new_alloc_size; 326142b45a7SBenjamin Herrenschmidt phys_addr_t old_size, new_size, addr; 327142b45a7SBenjamin Herrenschmidt int use_slab = slab_is_available(); 328181eb394SGavin Shan int *in_slab; 329142b45a7SBenjamin Herrenschmidt 330142b45a7SBenjamin Herrenschmidt /* We don't allow resizing until we know about the reserved regions 331142b45a7SBenjamin Herrenschmidt * of memory that aren't suitable for allocation 332142b45a7SBenjamin Herrenschmidt */ 333142b45a7SBenjamin Herrenschmidt if (!memblock_can_resize) 334142b45a7SBenjamin Herrenschmidt return -1; 335142b45a7SBenjamin Herrenschmidt 336142b45a7SBenjamin Herrenschmidt /* Calculate new doubled size */ 337142b45a7SBenjamin Herrenschmidt old_size = type->max * sizeof(struct memblock_region); 338142b45a7SBenjamin Herrenschmidt new_size = old_size << 1; 33929f67386SYinghai Lu /* 34029f67386SYinghai Lu * We need to allocated new one align to PAGE_SIZE, 34129f67386SYinghai Lu * so we can free them completely later. 34229f67386SYinghai Lu */ 34329f67386SYinghai Lu old_alloc_size = PAGE_ALIGN(old_size); 34429f67386SYinghai Lu new_alloc_size = PAGE_ALIGN(new_size); 345142b45a7SBenjamin Herrenschmidt 346181eb394SGavin Shan /* Retrieve the slab flag */ 347181eb394SGavin Shan if (type == &memblock.memory) 348181eb394SGavin Shan in_slab = &memblock_memory_in_slab; 349181eb394SGavin Shan else 350181eb394SGavin Shan in_slab = &memblock_reserved_in_slab; 351181eb394SGavin Shan 352142b45a7SBenjamin Herrenschmidt /* Try to find some space for it. 353142b45a7SBenjamin Herrenschmidt * 354142b45a7SBenjamin Herrenschmidt * WARNING: We assume that either slab_is_available() and we use it or 355fd07383bSAndrew Morton * we use MEMBLOCK for allocations. That means that this is unsafe to 356fd07383bSAndrew Morton * use when bootmem is currently active (unless bootmem itself is 357fd07383bSAndrew Morton * implemented on top of MEMBLOCK which isn't the case yet) 358142b45a7SBenjamin Herrenschmidt * 359142b45a7SBenjamin Herrenschmidt * This should however not be an issue for now, as we currently only 360fd07383bSAndrew Morton * call into MEMBLOCK while it's still active, or much later when slab 361fd07383bSAndrew Morton * is active for memory hotplug operations 362142b45a7SBenjamin Herrenschmidt */ 363142b45a7SBenjamin Herrenschmidt if (use_slab) { 364142b45a7SBenjamin Herrenschmidt new_array = kmalloc(new_size, GFP_KERNEL); 3651f5026a7STejun Heo addr = new_array ? __pa(new_array) : 0; 3664e2f0775SGavin Shan } else { 36748c3b583SGreg Pearson /* only exclude range when trying to double reserved.regions */ 36848c3b583SGreg Pearson if (type != &memblock.reserved) 36948c3b583SGreg Pearson new_area_start = new_area_size = 0; 37048c3b583SGreg Pearson 37148c3b583SGreg Pearson addr = memblock_find_in_range(new_area_start + new_area_size, 37248c3b583SGreg Pearson memblock.current_limit, 37329f67386SYinghai Lu new_alloc_size, PAGE_SIZE); 37448c3b583SGreg Pearson if (!addr && new_area_size) 37548c3b583SGreg Pearson addr = memblock_find_in_range(0, 37648c3b583SGreg Pearson min(new_area_start, memblock.current_limit), 37729f67386SYinghai Lu new_alloc_size, PAGE_SIZE); 37848c3b583SGreg Pearson 37915674868SSachin Kamat new_array = addr ? __va(addr) : NULL; 3804e2f0775SGavin Shan } 3811f5026a7STejun Heo if (!addr) { 382142b45a7SBenjamin Herrenschmidt pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 383142b45a7SBenjamin Herrenschmidt memblock_type_name(type), type->max, type->max * 2); 384142b45a7SBenjamin Herrenschmidt return -1; 385142b45a7SBenjamin Herrenschmidt } 386142b45a7SBenjamin Herrenschmidt 387fd07383bSAndrew Morton memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]", 388fd07383bSAndrew Morton memblock_type_name(type), type->max * 2, (u64)addr, 389fd07383bSAndrew Morton (u64)addr + new_size - 1); 390ea9e4376SYinghai Lu 391fd07383bSAndrew Morton /* 392fd07383bSAndrew Morton * Found space, we now need to move the array over before we add the 393fd07383bSAndrew Morton * reserved region since it may be our reserved array itself that is 394fd07383bSAndrew Morton * full. 395142b45a7SBenjamin Herrenschmidt */ 396142b45a7SBenjamin Herrenschmidt memcpy(new_array, type->regions, old_size); 397142b45a7SBenjamin Herrenschmidt memset(new_array + type->max, 0, old_size); 398142b45a7SBenjamin Herrenschmidt old_array = type->regions; 399142b45a7SBenjamin Herrenschmidt type->regions = new_array; 400142b45a7SBenjamin Herrenschmidt type->max <<= 1; 401142b45a7SBenjamin Herrenschmidt 402fd07383bSAndrew Morton /* Free old array. We needn't free it if the array is the static one */ 403181eb394SGavin Shan if (*in_slab) 404181eb394SGavin Shan kfree(old_array); 405181eb394SGavin Shan else if (old_array != memblock_memory_init_regions && 406142b45a7SBenjamin Herrenschmidt old_array != memblock_reserved_init_regions) 40729f67386SYinghai Lu memblock_free(__pa(old_array), old_alloc_size); 408142b45a7SBenjamin Herrenschmidt 409fd07383bSAndrew Morton /* 410fd07383bSAndrew Morton * Reserve the new array if that comes from the memblock. Otherwise, we 411fd07383bSAndrew Morton * needn't do it 412181eb394SGavin Shan */ 413181eb394SGavin Shan if (!use_slab) 41429f67386SYinghai Lu BUG_ON(memblock_reserve(addr, new_alloc_size)); 415181eb394SGavin Shan 416181eb394SGavin Shan /* Update slab flag */ 417181eb394SGavin Shan *in_slab = use_slab; 418181eb394SGavin Shan 419142b45a7SBenjamin Herrenschmidt return 0; 420142b45a7SBenjamin Herrenschmidt } 421142b45a7SBenjamin Herrenschmidt 422784656f9STejun Heo /** 423784656f9STejun Heo * memblock_merge_regions - merge neighboring compatible regions 424784656f9STejun Heo * @type: memblock type to scan 425784656f9STejun Heo * 426784656f9STejun Heo * Scan @type and merge neighboring compatible regions. 427784656f9STejun Heo */ 428784656f9STejun Heo static void __init_memblock memblock_merge_regions(struct memblock_type *type) 429784656f9STejun Heo { 430784656f9STejun Heo int i = 0; 431784656f9STejun Heo 432784656f9STejun Heo /* cnt never goes below 1 */ 433784656f9STejun Heo while (i < type->cnt - 1) { 434784656f9STejun Heo struct memblock_region *this = &type->regions[i]; 435784656f9STejun Heo struct memblock_region *next = &type->regions[i + 1]; 436784656f9STejun Heo 4377c0caeb8STejun Heo if (this->base + this->size != next->base || 4387c0caeb8STejun Heo memblock_get_region_node(this) != 43966a20757STang Chen memblock_get_region_node(next) || 44066a20757STang Chen this->flags != next->flags) { 441784656f9STejun Heo BUG_ON(this->base + this->size > next->base); 442784656f9STejun Heo i++; 443784656f9STejun Heo continue; 444784656f9STejun Heo } 445784656f9STejun Heo 446784656f9STejun Heo this->size += next->size; 447c0232ae8SLin Feng /* move forward from next + 1, index of which is i + 2 */ 448c0232ae8SLin Feng memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); 449784656f9STejun Heo type->cnt--; 450784656f9STejun Heo } 451784656f9STejun Heo } 452784656f9STejun Heo 453784656f9STejun Heo /** 454784656f9STejun Heo * memblock_insert_region - insert new memblock region 455784656f9STejun Heo * @type: memblock type to insert into 456784656f9STejun Heo * @idx: index for the insertion point 457784656f9STejun Heo * @base: base address of the new region 458784656f9STejun Heo * @size: size of the new region 459209ff86dSTang Chen * @nid: node id of the new region 46066a20757STang Chen * @flags: flags of the new region 461784656f9STejun Heo * 462784656f9STejun Heo * Insert new memblock region [@base,@base+@size) into @type at @idx. 463784656f9STejun Heo * @type must already have extra room to accomodate the new region. 464784656f9STejun Heo */ 465784656f9STejun Heo static void __init_memblock memblock_insert_region(struct memblock_type *type, 466784656f9STejun Heo int idx, phys_addr_t base, 46766a20757STang Chen phys_addr_t size, 46866a20757STang Chen int nid, unsigned long flags) 469784656f9STejun Heo { 470784656f9STejun Heo struct memblock_region *rgn = &type->regions[idx]; 471784656f9STejun Heo 472784656f9STejun Heo BUG_ON(type->cnt >= type->max); 473784656f9STejun Heo memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 474784656f9STejun Heo rgn->base = base; 475784656f9STejun Heo rgn->size = size; 47666a20757STang Chen rgn->flags = flags; 4777c0caeb8STejun Heo memblock_set_region_node(rgn, nid); 478784656f9STejun Heo type->cnt++; 4791440c4e2STejun Heo type->total_size += size; 480784656f9STejun Heo } 481784656f9STejun Heo 482784656f9STejun Heo /** 483f1af9d3aSPhilipp Hachtmann * memblock_add_range - add new memblock region 484784656f9STejun Heo * @type: memblock type to add new region into 485784656f9STejun Heo * @base: base address of the new region 486784656f9STejun Heo * @size: size of the new region 4877fb0bc3fSTejun Heo * @nid: nid of the new region 48866a20757STang Chen * @flags: flags of the new region 489784656f9STejun Heo * 490784656f9STejun Heo * Add new memblock region [@base,@base+@size) into @type. The new region 491784656f9STejun Heo * is allowed to overlap with existing ones - overlaps don't affect already 492784656f9STejun Heo * existing regions. @type is guaranteed to be minimal (all neighbouring 493784656f9STejun Heo * compatible regions are merged) after the addition. 494784656f9STejun Heo * 495784656f9STejun Heo * RETURNS: 496784656f9STejun Heo * 0 on success, -errno on failure. 497784656f9STejun Heo */ 498f1af9d3aSPhilipp Hachtmann int __init_memblock memblock_add_range(struct memblock_type *type, 49966a20757STang Chen phys_addr_t base, phys_addr_t size, 50066a20757STang Chen int nid, unsigned long flags) 50195f72d1eSYinghai Lu { 502784656f9STejun Heo bool insert = false; 503eb18f1b5STejun Heo phys_addr_t obase = base; 504eb18f1b5STejun Heo phys_addr_t end = base + memblock_cap_size(base, &size); 505784656f9STejun Heo int i, nr_new; 50695f72d1eSYinghai Lu 507b3dc627cSTejun Heo if (!size) 508b3dc627cSTejun Heo return 0; 509b3dc627cSTejun Heo 510784656f9STejun Heo /* special case for empty array */ 511784656f9STejun Heo if (type->regions[0].size == 0) { 5121440c4e2STejun Heo WARN_ON(type->cnt != 1 || type->total_size); 513784656f9STejun Heo type->regions[0].base = base; 514784656f9STejun Heo type->regions[0].size = size; 51566a20757STang Chen type->regions[0].flags = flags; 5167fb0bc3fSTejun Heo memblock_set_region_node(&type->regions[0], nid); 5171440c4e2STejun Heo type->total_size = size; 518784656f9STejun Heo return 0; 519784656f9STejun Heo } 520784656f9STejun Heo repeat: 521784656f9STejun Heo /* 522784656f9STejun Heo * The following is executed twice. Once with %false @insert and 523784656f9STejun Heo * then with %true. The first counts the number of regions needed 524784656f9STejun Heo * to accomodate the new area. The second actually inserts them. 525784656f9STejun Heo */ 526784656f9STejun Heo base = obase; 527784656f9STejun Heo nr_new = 0; 528784656f9STejun Heo 5298f7a6605SBenjamin Herrenschmidt for (i = 0; i < type->cnt; i++) { 5308f7a6605SBenjamin Herrenschmidt struct memblock_region *rgn = &type->regions[i]; 531784656f9STejun Heo phys_addr_t rbase = rgn->base; 532784656f9STejun Heo phys_addr_t rend = rbase + rgn->size; 5338f7a6605SBenjamin Herrenschmidt 534784656f9STejun Heo if (rbase >= end) 5358f7a6605SBenjamin Herrenschmidt break; 536784656f9STejun Heo if (rend <= base) 537784656f9STejun Heo continue; 538784656f9STejun Heo /* 539784656f9STejun Heo * @rgn overlaps. If it separates the lower part of new 540784656f9STejun Heo * area, insert that portion. 5418f7a6605SBenjamin Herrenschmidt */ 542784656f9STejun Heo if (rbase > base) { 543784656f9STejun Heo nr_new++; 544784656f9STejun Heo if (insert) 545784656f9STejun Heo memblock_insert_region(type, i++, base, 54666a20757STang Chen rbase - base, nid, 54766a20757STang Chen flags); 548784656f9STejun Heo } 549784656f9STejun Heo /* area below @rend is dealt with, forget about it */ 550784656f9STejun Heo base = min(rend, end); 5518f7a6605SBenjamin Herrenschmidt } 5528f7a6605SBenjamin Herrenschmidt 553784656f9STejun Heo /* insert the remaining portion */ 554784656f9STejun Heo if (base < end) { 555784656f9STejun Heo nr_new++; 556784656f9STejun Heo if (insert) 55766a20757STang Chen memblock_insert_region(type, i, base, end - base, 55866a20757STang Chen nid, flags); 5598f7a6605SBenjamin Herrenschmidt } 5608f7a6605SBenjamin Herrenschmidt 561784656f9STejun Heo /* 562784656f9STejun Heo * If this was the first round, resize array and repeat for actual 563784656f9STejun Heo * insertions; otherwise, merge and return. 5648f7a6605SBenjamin Herrenschmidt */ 565784656f9STejun Heo if (!insert) { 566784656f9STejun Heo while (type->cnt + nr_new > type->max) 56748c3b583SGreg Pearson if (memblock_double_array(type, obase, size) < 0) 568784656f9STejun Heo return -ENOMEM; 569784656f9STejun Heo insert = true; 570784656f9STejun Heo goto repeat; 57195f72d1eSYinghai Lu } else { 572784656f9STejun Heo memblock_merge_regions(type); 57395f72d1eSYinghai Lu return 0; 57495f72d1eSYinghai Lu } 575784656f9STejun Heo } 57695f72d1eSYinghai Lu 5777fb0bc3fSTejun Heo int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 5787fb0bc3fSTejun Heo int nid) 5797fb0bc3fSTejun Heo { 580f1af9d3aSPhilipp Hachtmann return memblock_add_range(&memblock.memory, base, size, nid, 0); 5817fb0bc3fSTejun Heo } 5827fb0bc3fSTejun Heo 583*6a4055bcSAlexander Kuleshov static int __init_memblock memblock_add_region(phys_addr_t base, 584*6a4055bcSAlexander Kuleshov phys_addr_t size, 585*6a4055bcSAlexander Kuleshov int nid, 586*6a4055bcSAlexander Kuleshov unsigned long flags) 587*6a4055bcSAlexander Kuleshov { 588*6a4055bcSAlexander Kuleshov struct memblock_type *_rgn = &memblock.memory; 589*6a4055bcSAlexander Kuleshov 590*6a4055bcSAlexander Kuleshov memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n", 591*6a4055bcSAlexander Kuleshov (unsigned long long)base, 592*6a4055bcSAlexander Kuleshov (unsigned long long)base + size - 1, 593*6a4055bcSAlexander Kuleshov flags, (void *)_RET_IP_); 594*6a4055bcSAlexander Kuleshov 595*6a4055bcSAlexander Kuleshov return memblock_add_range(_rgn, base, size, nid, flags); 596*6a4055bcSAlexander Kuleshov } 597*6a4055bcSAlexander Kuleshov 598581adcbeSTejun Heo int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 59995f72d1eSYinghai Lu { 600*6a4055bcSAlexander Kuleshov return memblock_add_region(base, size, MAX_NUMNODES, 0); 60195f72d1eSYinghai Lu } 60295f72d1eSYinghai Lu 6036a9ceb31STejun Heo /** 6046a9ceb31STejun Heo * memblock_isolate_range - isolate given range into disjoint memblocks 6056a9ceb31STejun Heo * @type: memblock type to isolate range for 6066a9ceb31STejun Heo * @base: base of range to isolate 6076a9ceb31STejun Heo * @size: size of range to isolate 6086a9ceb31STejun Heo * @start_rgn: out parameter for the start of isolated region 6096a9ceb31STejun Heo * @end_rgn: out parameter for the end of isolated region 6106a9ceb31STejun Heo * 6116a9ceb31STejun Heo * Walk @type and ensure that regions don't cross the boundaries defined by 6126a9ceb31STejun Heo * [@base,@base+@size). Crossing regions are split at the boundaries, 6136a9ceb31STejun Heo * which may create at most two more regions. The index of the first 6146a9ceb31STejun Heo * region inside the range is returned in *@start_rgn and end in *@end_rgn. 6156a9ceb31STejun Heo * 6166a9ceb31STejun Heo * RETURNS: 6176a9ceb31STejun Heo * 0 on success, -errno on failure. 6186a9ceb31STejun Heo */ 6196a9ceb31STejun Heo static int __init_memblock memblock_isolate_range(struct memblock_type *type, 6206a9ceb31STejun Heo phys_addr_t base, phys_addr_t size, 6216a9ceb31STejun Heo int *start_rgn, int *end_rgn) 6226a9ceb31STejun Heo { 623eb18f1b5STejun Heo phys_addr_t end = base + memblock_cap_size(base, &size); 6246a9ceb31STejun Heo int i; 6256a9ceb31STejun Heo 6266a9ceb31STejun Heo *start_rgn = *end_rgn = 0; 6276a9ceb31STejun Heo 628b3dc627cSTejun Heo if (!size) 629b3dc627cSTejun Heo return 0; 630b3dc627cSTejun Heo 6316a9ceb31STejun Heo /* we'll create at most two more regions */ 6326a9ceb31STejun Heo while (type->cnt + 2 > type->max) 63348c3b583SGreg Pearson if (memblock_double_array(type, base, size) < 0) 6346a9ceb31STejun Heo return -ENOMEM; 6356a9ceb31STejun Heo 6366a9ceb31STejun Heo for (i = 0; i < type->cnt; i++) { 6376a9ceb31STejun Heo struct memblock_region *rgn = &type->regions[i]; 6386a9ceb31STejun Heo phys_addr_t rbase = rgn->base; 6396a9ceb31STejun Heo phys_addr_t rend = rbase + rgn->size; 6406a9ceb31STejun Heo 6416a9ceb31STejun Heo if (rbase >= end) 6426a9ceb31STejun Heo break; 6436a9ceb31STejun Heo if (rend <= base) 6446a9ceb31STejun Heo continue; 6456a9ceb31STejun Heo 6466a9ceb31STejun Heo if (rbase < base) { 6476a9ceb31STejun Heo /* 6486a9ceb31STejun Heo * @rgn intersects from below. Split and continue 6496a9ceb31STejun Heo * to process the next region - the new top half. 6506a9ceb31STejun Heo */ 6516a9ceb31STejun Heo rgn->base = base; 6521440c4e2STejun Heo rgn->size -= base - rbase; 6531440c4e2STejun Heo type->total_size -= base - rbase; 6546a9ceb31STejun Heo memblock_insert_region(type, i, rbase, base - rbase, 65566a20757STang Chen memblock_get_region_node(rgn), 65666a20757STang Chen rgn->flags); 6576a9ceb31STejun Heo } else if (rend > end) { 6586a9ceb31STejun Heo /* 6596a9ceb31STejun Heo * @rgn intersects from above. Split and redo the 6606a9ceb31STejun Heo * current region - the new bottom half. 6616a9ceb31STejun Heo */ 6626a9ceb31STejun Heo rgn->base = end; 6631440c4e2STejun Heo rgn->size -= end - rbase; 6641440c4e2STejun Heo type->total_size -= end - rbase; 6656a9ceb31STejun Heo memblock_insert_region(type, i--, rbase, end - rbase, 66666a20757STang Chen memblock_get_region_node(rgn), 66766a20757STang Chen rgn->flags); 6686a9ceb31STejun Heo } else { 6696a9ceb31STejun Heo /* @rgn is fully contained, record it */ 6706a9ceb31STejun Heo if (!*end_rgn) 6716a9ceb31STejun Heo *start_rgn = i; 6726a9ceb31STejun Heo *end_rgn = i + 1; 6736a9ceb31STejun Heo } 6746a9ceb31STejun Heo } 6756a9ceb31STejun Heo 6766a9ceb31STejun Heo return 0; 6776a9ceb31STejun Heo } 6786a9ceb31STejun Heo 679f1af9d3aSPhilipp Hachtmann int __init_memblock memblock_remove_range(struct memblock_type *type, 6808f7a6605SBenjamin Herrenschmidt phys_addr_t base, phys_addr_t size) 68195f72d1eSYinghai Lu { 68271936180STejun Heo int start_rgn, end_rgn; 68371936180STejun Heo int i, ret; 68495f72d1eSYinghai Lu 68571936180STejun Heo ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 68671936180STejun Heo if (ret) 68771936180STejun Heo return ret; 68895f72d1eSYinghai Lu 68971936180STejun Heo for (i = end_rgn - 1; i >= start_rgn; i--) 69071936180STejun Heo memblock_remove_region(type, i); 69195f72d1eSYinghai Lu return 0; 69295f72d1eSYinghai Lu } 69395f72d1eSYinghai Lu 694581adcbeSTejun Heo int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 69595f72d1eSYinghai Lu { 696f1af9d3aSPhilipp Hachtmann return memblock_remove_range(&memblock.memory, base, size); 69795f72d1eSYinghai Lu } 69895f72d1eSYinghai Lu 699f1af9d3aSPhilipp Hachtmann 700581adcbeSTejun Heo int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 70195f72d1eSYinghai Lu { 70224aa0788STejun Heo memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", 703a150439cSH. Peter Anvin (unsigned long long)base, 704931d13f5SGrygorii Strashko (unsigned long long)base + size - 1, 705a150439cSH. Peter Anvin (void *)_RET_IP_); 70624aa0788STejun Heo 707aedf95eaSCatalin Marinas kmemleak_free_part(__va(base), size); 708f1af9d3aSPhilipp Hachtmann return memblock_remove_range(&memblock.reserved, base, size); 70995f72d1eSYinghai Lu } 71095f72d1eSYinghai Lu 71166a20757STang Chen static int __init_memblock memblock_reserve_region(phys_addr_t base, 71266a20757STang Chen phys_addr_t size, 71366a20757STang Chen int nid, 71466a20757STang Chen unsigned long flags) 71595f72d1eSYinghai Lu { 7167fc825b4SBaoquan He struct memblock_type *type = &memblock.reserved; 71795f72d1eSYinghai Lu 71866a20757STang Chen memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n", 719a150439cSH. Peter Anvin (unsigned long long)base, 720931d13f5SGrygorii Strashko (unsigned long long)base + size - 1, 72166a20757STang Chen flags, (void *)_RET_IP_); 72295f72d1eSYinghai Lu 7237fc825b4SBaoquan He return memblock_add_range(type, base, size, nid, flags); 72466a20757STang Chen } 72566a20757STang Chen 72666a20757STang Chen int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 72766a20757STang Chen { 72866a20757STang Chen return memblock_reserve_region(base, size, MAX_NUMNODES, 0); 72995f72d1eSYinghai Lu } 73095f72d1eSYinghai Lu 73135fd0808STejun Heo /** 73266b16edfSTang Chen * 7334308ce17STony Luck * This function isolates region [@base, @base + @size), and sets/clears flag 73466b16edfSTang Chen * 73566b16edfSTang Chen * Return 0 on succees, -errno on failure. 73666b16edfSTang Chen */ 7374308ce17STony Luck static int __init_memblock memblock_setclr_flag(phys_addr_t base, 7384308ce17STony Luck phys_addr_t size, int set, int flag) 73966b16edfSTang Chen { 74066b16edfSTang Chen struct memblock_type *type = &memblock.memory; 74166b16edfSTang Chen int i, ret, start_rgn, end_rgn; 74266b16edfSTang Chen 74366b16edfSTang Chen ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 74466b16edfSTang Chen if (ret) 74566b16edfSTang Chen return ret; 74666b16edfSTang Chen 74766b16edfSTang Chen for (i = start_rgn; i < end_rgn; i++) 7484308ce17STony Luck if (set) 7494308ce17STony Luck memblock_set_region_flags(&type->regions[i], flag); 7504308ce17STony Luck else 7514308ce17STony Luck memblock_clear_region_flags(&type->regions[i], flag); 75266b16edfSTang Chen 75366b16edfSTang Chen memblock_merge_regions(type); 75466b16edfSTang Chen return 0; 75566b16edfSTang Chen } 75666b16edfSTang Chen 75766b16edfSTang Chen /** 7584308ce17STony Luck * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. 7594308ce17STony Luck * @base: the base phys addr of the region 7604308ce17STony Luck * @size: the size of the region 7614308ce17STony Luck * 7624308ce17STony Luck * Return 0 on succees, -errno on failure. 7634308ce17STony Luck */ 7644308ce17STony Luck int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) 7654308ce17STony Luck { 7664308ce17STony Luck return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); 7674308ce17STony Luck } 7684308ce17STony Luck 7694308ce17STony Luck /** 77066b16edfSTang Chen * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. 77166b16edfSTang Chen * @base: the base phys addr of the region 77266b16edfSTang Chen * @size: the size of the region 77366b16edfSTang Chen * 77466b16edfSTang Chen * Return 0 on succees, -errno on failure. 77566b16edfSTang Chen */ 77666b16edfSTang Chen int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) 77766b16edfSTang Chen { 7784308ce17STony Luck return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); 77966b16edfSTang Chen } 78066b16edfSTang Chen 78166b16edfSTang Chen /** 782f1af9d3aSPhilipp Hachtmann * __next__mem_range - next function for for_each_free_mem_range() etc. 78335fd0808STejun Heo * @idx: pointer to u64 loop variable 784b1154233SGrygorii Strashko * @nid: node selector, %NUMA_NO_NODE for all nodes 785f1af9d3aSPhilipp Hachtmann * @type_a: pointer to memblock_type from where the range is taken 786f1af9d3aSPhilipp Hachtmann * @type_b: pointer to memblock_type which excludes memory from being taken 787dad7557eSWanpeng Li * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 788dad7557eSWanpeng Li * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 789dad7557eSWanpeng Li * @out_nid: ptr to int for nid of the range, can be %NULL 79035fd0808STejun Heo * 791f1af9d3aSPhilipp Hachtmann * Find the first area from *@idx which matches @nid, fill the out 79235fd0808STejun Heo * parameters, and update *@idx for the next iteration. The lower 32bit of 793f1af9d3aSPhilipp Hachtmann * *@idx contains index into type_a and the upper 32bit indexes the 794f1af9d3aSPhilipp Hachtmann * areas before each region in type_b. For example, if type_b regions 79535fd0808STejun Heo * look like the following, 79635fd0808STejun Heo * 79735fd0808STejun Heo * 0:[0-16), 1:[32-48), 2:[128-130) 79835fd0808STejun Heo * 79935fd0808STejun Heo * The upper 32bit indexes the following regions. 80035fd0808STejun Heo * 80135fd0808STejun Heo * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 80235fd0808STejun Heo * 80335fd0808STejun Heo * As both region arrays are sorted, the function advances the two indices 80435fd0808STejun Heo * in lockstep and returns each intersection. 80535fd0808STejun Heo */ 806f1af9d3aSPhilipp Hachtmann void __init_memblock __next_mem_range(u64 *idx, int nid, 807f1af9d3aSPhilipp Hachtmann struct memblock_type *type_a, 808f1af9d3aSPhilipp Hachtmann struct memblock_type *type_b, 80935fd0808STejun Heo phys_addr_t *out_start, 81035fd0808STejun Heo phys_addr_t *out_end, int *out_nid) 81135fd0808STejun Heo { 812f1af9d3aSPhilipp Hachtmann int idx_a = *idx & 0xffffffff; 813f1af9d3aSPhilipp Hachtmann int idx_b = *idx >> 32; 814b1154233SGrygorii Strashko 815f1af9d3aSPhilipp Hachtmann if (WARN_ONCE(nid == MAX_NUMNODES, 816f1af9d3aSPhilipp Hachtmann "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 817560dca27SGrygorii Strashko nid = NUMA_NO_NODE; 81835fd0808STejun Heo 819f1af9d3aSPhilipp Hachtmann for (; idx_a < type_a->cnt; idx_a++) { 820f1af9d3aSPhilipp Hachtmann struct memblock_region *m = &type_a->regions[idx_a]; 821f1af9d3aSPhilipp Hachtmann 82235fd0808STejun Heo phys_addr_t m_start = m->base; 82335fd0808STejun Heo phys_addr_t m_end = m->base + m->size; 824f1af9d3aSPhilipp Hachtmann int m_nid = memblock_get_region_node(m); 82535fd0808STejun Heo 82635fd0808STejun Heo /* only memory regions are associated with nodes, check it */ 827f1af9d3aSPhilipp Hachtmann if (nid != NUMA_NO_NODE && nid != m_nid) 82835fd0808STejun Heo continue; 82935fd0808STejun Heo 8300a313a99SXishi Qiu /* skip hotpluggable memory regions if needed */ 8310a313a99SXishi Qiu if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 8320a313a99SXishi Qiu continue; 8330a313a99SXishi Qiu 834f1af9d3aSPhilipp Hachtmann if (!type_b) { 835f1af9d3aSPhilipp Hachtmann if (out_start) 836f1af9d3aSPhilipp Hachtmann *out_start = m_start; 837f1af9d3aSPhilipp Hachtmann if (out_end) 838f1af9d3aSPhilipp Hachtmann *out_end = m_end; 839f1af9d3aSPhilipp Hachtmann if (out_nid) 840f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 841f1af9d3aSPhilipp Hachtmann idx_a++; 842f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 843f1af9d3aSPhilipp Hachtmann return; 844f1af9d3aSPhilipp Hachtmann } 84535fd0808STejun Heo 846f1af9d3aSPhilipp Hachtmann /* scan areas before each reservation */ 847f1af9d3aSPhilipp Hachtmann for (; idx_b < type_b->cnt + 1; idx_b++) { 848f1af9d3aSPhilipp Hachtmann struct memblock_region *r; 849f1af9d3aSPhilipp Hachtmann phys_addr_t r_start; 850f1af9d3aSPhilipp Hachtmann phys_addr_t r_end; 851f1af9d3aSPhilipp Hachtmann 852f1af9d3aSPhilipp Hachtmann r = &type_b->regions[idx_b]; 853f1af9d3aSPhilipp Hachtmann r_start = idx_b ? r[-1].base + r[-1].size : 0; 854f1af9d3aSPhilipp Hachtmann r_end = idx_b < type_b->cnt ? 855f1af9d3aSPhilipp Hachtmann r->base : ULLONG_MAX; 856f1af9d3aSPhilipp Hachtmann 857f1af9d3aSPhilipp Hachtmann /* 858f1af9d3aSPhilipp Hachtmann * if idx_b advanced past idx_a, 859f1af9d3aSPhilipp Hachtmann * break out to advance idx_a 860f1af9d3aSPhilipp Hachtmann */ 86135fd0808STejun Heo if (r_start >= m_end) 86235fd0808STejun Heo break; 86335fd0808STejun Heo /* if the two regions intersect, we're done */ 86435fd0808STejun Heo if (m_start < r_end) { 86535fd0808STejun Heo if (out_start) 866f1af9d3aSPhilipp Hachtmann *out_start = 867f1af9d3aSPhilipp Hachtmann max(m_start, r_start); 86835fd0808STejun Heo if (out_end) 86935fd0808STejun Heo *out_end = min(m_end, r_end); 87035fd0808STejun Heo if (out_nid) 871f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 87235fd0808STejun Heo /* 873f1af9d3aSPhilipp Hachtmann * The region which ends first is 874f1af9d3aSPhilipp Hachtmann * advanced for the next iteration. 87535fd0808STejun Heo */ 87635fd0808STejun Heo if (m_end <= r_end) 877f1af9d3aSPhilipp Hachtmann idx_a++; 87835fd0808STejun Heo else 879f1af9d3aSPhilipp Hachtmann idx_b++; 880f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 88135fd0808STejun Heo return; 88235fd0808STejun Heo } 88335fd0808STejun Heo } 88435fd0808STejun Heo } 88535fd0808STejun Heo 88635fd0808STejun Heo /* signal end of iteration */ 88735fd0808STejun Heo *idx = ULLONG_MAX; 88835fd0808STejun Heo } 88935fd0808STejun Heo 8907bd0b0f0STejun Heo /** 891f1af9d3aSPhilipp Hachtmann * __next_mem_range_rev - generic next function for for_each_*_range_rev() 892f1af9d3aSPhilipp Hachtmann * 893f1af9d3aSPhilipp Hachtmann * Finds the next range from type_a which is not marked as unsuitable 894f1af9d3aSPhilipp Hachtmann * in type_b. 895f1af9d3aSPhilipp Hachtmann * 8967bd0b0f0STejun Heo * @idx: pointer to u64 loop variable 897b1154233SGrygorii Strashko * @nid: nid: node selector, %NUMA_NO_NODE for all nodes 898f1af9d3aSPhilipp Hachtmann * @type_a: pointer to memblock_type from where the range is taken 899f1af9d3aSPhilipp Hachtmann * @type_b: pointer to memblock_type which excludes memory from being taken 900dad7557eSWanpeng Li * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 901dad7557eSWanpeng Li * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 902dad7557eSWanpeng Li * @out_nid: ptr to int for nid of the range, can be %NULL 9037bd0b0f0STejun Heo * 904f1af9d3aSPhilipp Hachtmann * Reverse of __next_mem_range(). 9057bd0b0f0STejun Heo */ 906f1af9d3aSPhilipp Hachtmann void __init_memblock __next_mem_range_rev(u64 *idx, int nid, 907f1af9d3aSPhilipp Hachtmann struct memblock_type *type_a, 908f1af9d3aSPhilipp Hachtmann struct memblock_type *type_b, 9097bd0b0f0STejun Heo phys_addr_t *out_start, 9107bd0b0f0STejun Heo phys_addr_t *out_end, int *out_nid) 9117bd0b0f0STejun Heo { 912f1af9d3aSPhilipp Hachtmann int idx_a = *idx & 0xffffffff; 913f1af9d3aSPhilipp Hachtmann int idx_b = *idx >> 32; 914b1154233SGrygorii Strashko 915560dca27SGrygorii Strashko if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 916560dca27SGrygorii Strashko nid = NUMA_NO_NODE; 9177bd0b0f0STejun Heo 9187bd0b0f0STejun Heo if (*idx == (u64)ULLONG_MAX) { 919f1af9d3aSPhilipp Hachtmann idx_a = type_a->cnt - 1; 920f1af9d3aSPhilipp Hachtmann idx_b = type_b->cnt; 9217bd0b0f0STejun Heo } 9227bd0b0f0STejun Heo 923f1af9d3aSPhilipp Hachtmann for (; idx_a >= 0; idx_a--) { 924f1af9d3aSPhilipp Hachtmann struct memblock_region *m = &type_a->regions[idx_a]; 925f1af9d3aSPhilipp Hachtmann 9267bd0b0f0STejun Heo phys_addr_t m_start = m->base; 9277bd0b0f0STejun Heo phys_addr_t m_end = m->base + m->size; 928f1af9d3aSPhilipp Hachtmann int m_nid = memblock_get_region_node(m); 9297bd0b0f0STejun Heo 9307bd0b0f0STejun Heo /* only memory regions are associated with nodes, check it */ 931f1af9d3aSPhilipp Hachtmann if (nid != NUMA_NO_NODE && nid != m_nid) 9327bd0b0f0STejun Heo continue; 9337bd0b0f0STejun Heo 93455ac590cSTang Chen /* skip hotpluggable memory regions if needed */ 93555ac590cSTang Chen if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 93655ac590cSTang Chen continue; 93755ac590cSTang Chen 938f1af9d3aSPhilipp Hachtmann if (!type_b) { 939f1af9d3aSPhilipp Hachtmann if (out_start) 940f1af9d3aSPhilipp Hachtmann *out_start = m_start; 941f1af9d3aSPhilipp Hachtmann if (out_end) 942f1af9d3aSPhilipp Hachtmann *out_end = m_end; 943f1af9d3aSPhilipp Hachtmann if (out_nid) 944f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 945f1af9d3aSPhilipp Hachtmann idx_a++; 946f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 947f1af9d3aSPhilipp Hachtmann return; 948f1af9d3aSPhilipp Hachtmann } 9497bd0b0f0STejun Heo 950f1af9d3aSPhilipp Hachtmann /* scan areas before each reservation */ 951f1af9d3aSPhilipp Hachtmann for (; idx_b >= 0; idx_b--) { 952f1af9d3aSPhilipp Hachtmann struct memblock_region *r; 953f1af9d3aSPhilipp Hachtmann phys_addr_t r_start; 954f1af9d3aSPhilipp Hachtmann phys_addr_t r_end; 955f1af9d3aSPhilipp Hachtmann 956f1af9d3aSPhilipp Hachtmann r = &type_b->regions[idx_b]; 957f1af9d3aSPhilipp Hachtmann r_start = idx_b ? r[-1].base + r[-1].size : 0; 958f1af9d3aSPhilipp Hachtmann r_end = idx_b < type_b->cnt ? 959f1af9d3aSPhilipp Hachtmann r->base : ULLONG_MAX; 960f1af9d3aSPhilipp Hachtmann /* 961f1af9d3aSPhilipp Hachtmann * if idx_b advanced past idx_a, 962f1af9d3aSPhilipp Hachtmann * break out to advance idx_a 963f1af9d3aSPhilipp Hachtmann */ 964f1af9d3aSPhilipp Hachtmann 9657bd0b0f0STejun Heo if (r_end <= m_start) 9667bd0b0f0STejun Heo break; 9677bd0b0f0STejun Heo /* if the two regions intersect, we're done */ 9687bd0b0f0STejun Heo if (m_end > r_start) { 9697bd0b0f0STejun Heo if (out_start) 9707bd0b0f0STejun Heo *out_start = max(m_start, r_start); 9717bd0b0f0STejun Heo if (out_end) 9727bd0b0f0STejun Heo *out_end = min(m_end, r_end); 9737bd0b0f0STejun Heo if (out_nid) 974f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 9757bd0b0f0STejun Heo if (m_start >= r_start) 976f1af9d3aSPhilipp Hachtmann idx_a--; 9777bd0b0f0STejun Heo else 978f1af9d3aSPhilipp Hachtmann idx_b--; 979f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 9807bd0b0f0STejun Heo return; 9817bd0b0f0STejun Heo } 9827bd0b0f0STejun Heo } 9837bd0b0f0STejun Heo } 984f1af9d3aSPhilipp Hachtmann /* signal end of iteration */ 9857bd0b0f0STejun Heo *idx = ULLONG_MAX; 9867bd0b0f0STejun Heo } 9877bd0b0f0STejun Heo 9887c0caeb8STejun Heo #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 9897c0caeb8STejun Heo /* 9907c0caeb8STejun Heo * Common iterator interface used to define for_each_mem_range(). 9917c0caeb8STejun Heo */ 9927c0caeb8STejun Heo void __init_memblock __next_mem_pfn_range(int *idx, int nid, 9937c0caeb8STejun Heo unsigned long *out_start_pfn, 9947c0caeb8STejun Heo unsigned long *out_end_pfn, int *out_nid) 9957c0caeb8STejun Heo { 9967c0caeb8STejun Heo struct memblock_type *type = &memblock.memory; 9977c0caeb8STejun Heo struct memblock_region *r; 9987c0caeb8STejun Heo 9997c0caeb8STejun Heo while (++*idx < type->cnt) { 10007c0caeb8STejun Heo r = &type->regions[*idx]; 10017c0caeb8STejun Heo 10027c0caeb8STejun Heo if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 10037c0caeb8STejun Heo continue; 10047c0caeb8STejun Heo if (nid == MAX_NUMNODES || nid == r->nid) 10057c0caeb8STejun Heo break; 10067c0caeb8STejun Heo } 10077c0caeb8STejun Heo if (*idx >= type->cnt) { 10087c0caeb8STejun Heo *idx = -1; 10097c0caeb8STejun Heo return; 10107c0caeb8STejun Heo } 10117c0caeb8STejun Heo 10127c0caeb8STejun Heo if (out_start_pfn) 10137c0caeb8STejun Heo *out_start_pfn = PFN_UP(r->base); 10147c0caeb8STejun Heo if (out_end_pfn) 10157c0caeb8STejun Heo *out_end_pfn = PFN_DOWN(r->base + r->size); 10167c0caeb8STejun Heo if (out_nid) 10177c0caeb8STejun Heo *out_nid = r->nid; 10187c0caeb8STejun Heo } 10197c0caeb8STejun Heo 10207c0caeb8STejun Heo /** 10217c0caeb8STejun Heo * memblock_set_node - set node ID on memblock regions 10227c0caeb8STejun Heo * @base: base of area to set node ID for 10237c0caeb8STejun Heo * @size: size of area to set node ID for 1024e7e8de59STang Chen * @type: memblock type to set node ID for 10257c0caeb8STejun Heo * @nid: node ID to set 10267c0caeb8STejun Heo * 1027e7e8de59STang Chen * Set the nid of memblock @type regions in [@base,@base+@size) to @nid. 10287c0caeb8STejun Heo * Regions which cross the area boundaries are split as necessary. 10297c0caeb8STejun Heo * 10307c0caeb8STejun Heo * RETURNS: 10317c0caeb8STejun Heo * 0 on success, -errno on failure. 10327c0caeb8STejun Heo */ 10337c0caeb8STejun Heo int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 1034e7e8de59STang Chen struct memblock_type *type, int nid) 10357c0caeb8STejun Heo { 10366a9ceb31STejun Heo int start_rgn, end_rgn; 10376a9ceb31STejun Heo int i, ret; 10387c0caeb8STejun Heo 10396a9ceb31STejun Heo ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 10406a9ceb31STejun Heo if (ret) 10416a9ceb31STejun Heo return ret; 10427c0caeb8STejun Heo 10436a9ceb31STejun Heo for (i = start_rgn; i < end_rgn; i++) 1044e9d24ad3SWanpeng Li memblock_set_region_node(&type->regions[i], nid); 10457c0caeb8STejun Heo 10467c0caeb8STejun Heo memblock_merge_regions(type); 10477c0caeb8STejun Heo return 0; 10487c0caeb8STejun Heo } 10497c0caeb8STejun Heo #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 10507c0caeb8STejun Heo 10512bfc2862SAkinobu Mita static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 10522bfc2862SAkinobu Mita phys_addr_t align, phys_addr_t start, 10532bfc2862SAkinobu Mita phys_addr_t end, int nid) 105495f72d1eSYinghai Lu { 10556ed311b2SBenjamin Herrenschmidt phys_addr_t found; 105695f72d1eSYinghai Lu 105779f40fabSGrygorii Strashko if (!align) 105879f40fabSGrygorii Strashko align = SMP_CACHE_BYTES; 105994f3d3afSVineet Gupta 10602bfc2862SAkinobu Mita found = memblock_find_in_range_node(size, align, start, end, nid); 1061aedf95eaSCatalin Marinas if (found && !memblock_reserve(found, size)) { 1062aedf95eaSCatalin Marinas /* 1063aedf95eaSCatalin Marinas * The min_count is set to 0 so that memblock allocations are 1064aedf95eaSCatalin Marinas * never reported as leaks. 1065aedf95eaSCatalin Marinas */ 1066aedf95eaSCatalin Marinas kmemleak_alloc(__va(found), size, 0, 0); 10676ed311b2SBenjamin Herrenschmidt return found; 1068aedf95eaSCatalin Marinas } 10696ed311b2SBenjamin Herrenschmidt return 0; 107095f72d1eSYinghai Lu } 107195f72d1eSYinghai Lu 10722bfc2862SAkinobu Mita phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, 10732bfc2862SAkinobu Mita phys_addr_t start, phys_addr_t end) 10742bfc2862SAkinobu Mita { 10752bfc2862SAkinobu Mita return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE); 10762bfc2862SAkinobu Mita } 10772bfc2862SAkinobu Mita 10782bfc2862SAkinobu Mita static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, 10792bfc2862SAkinobu Mita phys_addr_t align, phys_addr_t max_addr, 10802bfc2862SAkinobu Mita int nid) 10812bfc2862SAkinobu Mita { 10822bfc2862SAkinobu Mita return memblock_alloc_range_nid(size, align, 0, max_addr, nid); 10832bfc2862SAkinobu Mita } 10842bfc2862SAkinobu Mita 10857bd0b0f0STejun Heo phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) 10867bd0b0f0STejun Heo { 10877bd0b0f0STejun Heo return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 10887bd0b0f0STejun Heo } 10897bd0b0f0STejun Heo 10907bd0b0f0STejun Heo phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 10917bd0b0f0STejun Heo { 1092b1154233SGrygorii Strashko return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE); 10937bd0b0f0STejun Heo } 10947bd0b0f0STejun Heo 10956ed311b2SBenjamin Herrenschmidt phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 109695f72d1eSYinghai Lu { 10976ed311b2SBenjamin Herrenschmidt phys_addr_t alloc; 10986ed311b2SBenjamin Herrenschmidt 10996ed311b2SBenjamin Herrenschmidt alloc = __memblock_alloc_base(size, align, max_addr); 11006ed311b2SBenjamin Herrenschmidt 11016ed311b2SBenjamin Herrenschmidt if (alloc == 0) 11026ed311b2SBenjamin Herrenschmidt panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", 11036ed311b2SBenjamin Herrenschmidt (unsigned long long) size, (unsigned long long) max_addr); 11046ed311b2SBenjamin Herrenschmidt 11056ed311b2SBenjamin Herrenschmidt return alloc; 110695f72d1eSYinghai Lu } 110795f72d1eSYinghai Lu 11086ed311b2SBenjamin Herrenschmidt phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) 110995f72d1eSYinghai Lu { 11106ed311b2SBenjamin Herrenschmidt return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 111195f72d1eSYinghai Lu } 111295f72d1eSYinghai Lu 11139d1e2492SBenjamin Herrenschmidt phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 11149d1e2492SBenjamin Herrenschmidt { 11159d1e2492SBenjamin Herrenschmidt phys_addr_t res = memblock_alloc_nid(size, align, nid); 11169d1e2492SBenjamin Herrenschmidt 11179d1e2492SBenjamin Herrenschmidt if (res) 11189d1e2492SBenjamin Herrenschmidt return res; 111915fb0972STejun Heo return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 112095f72d1eSYinghai Lu } 112195f72d1eSYinghai Lu 112226f09e9bSSantosh Shilimkar /** 112326f09e9bSSantosh Shilimkar * memblock_virt_alloc_internal - allocate boot memory block 112426f09e9bSSantosh Shilimkar * @size: size of memory block to be allocated in bytes 112526f09e9bSSantosh Shilimkar * @align: alignment of the region and block's size 112626f09e9bSSantosh Shilimkar * @min_addr: the lower bound of the memory region to allocate (phys address) 112726f09e9bSSantosh Shilimkar * @max_addr: the upper bound of the memory region to allocate (phys address) 112826f09e9bSSantosh Shilimkar * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 112926f09e9bSSantosh Shilimkar * 113026f09e9bSSantosh Shilimkar * The @min_addr limit is dropped if it can not be satisfied and the allocation 113126f09e9bSSantosh Shilimkar * will fall back to memory below @min_addr. Also, allocation may fall back 113226f09e9bSSantosh Shilimkar * to any node in the system if the specified node can not 113326f09e9bSSantosh Shilimkar * hold the requested memory. 113426f09e9bSSantosh Shilimkar * 113526f09e9bSSantosh Shilimkar * The allocation is performed from memory region limited by 113626f09e9bSSantosh Shilimkar * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. 113726f09e9bSSantosh Shilimkar * 113826f09e9bSSantosh Shilimkar * The memory block is aligned on SMP_CACHE_BYTES if @align == 0. 113926f09e9bSSantosh Shilimkar * 114026f09e9bSSantosh Shilimkar * The phys address of allocated boot memory block is converted to virtual and 114126f09e9bSSantosh Shilimkar * allocated memory is reset to 0. 114226f09e9bSSantosh Shilimkar * 114326f09e9bSSantosh Shilimkar * In addition, function sets the min_count to 0 using kmemleak_alloc for 114426f09e9bSSantosh Shilimkar * allocated boot memory block, so that it is never reported as leaks. 114526f09e9bSSantosh Shilimkar * 114626f09e9bSSantosh Shilimkar * RETURNS: 114726f09e9bSSantosh Shilimkar * Virtual address of allocated memory block on success, NULL on failure. 114826f09e9bSSantosh Shilimkar */ 114926f09e9bSSantosh Shilimkar static void * __init memblock_virt_alloc_internal( 115026f09e9bSSantosh Shilimkar phys_addr_t size, phys_addr_t align, 115126f09e9bSSantosh Shilimkar phys_addr_t min_addr, phys_addr_t max_addr, 115226f09e9bSSantosh Shilimkar int nid) 115326f09e9bSSantosh Shilimkar { 115426f09e9bSSantosh Shilimkar phys_addr_t alloc; 115526f09e9bSSantosh Shilimkar void *ptr; 115626f09e9bSSantosh Shilimkar 1157560dca27SGrygorii Strashko if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1158560dca27SGrygorii Strashko nid = NUMA_NO_NODE; 115926f09e9bSSantosh Shilimkar 116026f09e9bSSantosh Shilimkar /* 116126f09e9bSSantosh Shilimkar * Detect any accidental use of these APIs after slab is ready, as at 116226f09e9bSSantosh Shilimkar * this moment memblock may be deinitialized already and its 116326f09e9bSSantosh Shilimkar * internal data may be destroyed (after execution of free_all_bootmem) 116426f09e9bSSantosh Shilimkar */ 116526f09e9bSSantosh Shilimkar if (WARN_ON_ONCE(slab_is_available())) 116626f09e9bSSantosh Shilimkar return kzalloc_node(size, GFP_NOWAIT, nid); 116726f09e9bSSantosh Shilimkar 116826f09e9bSSantosh Shilimkar if (!align) 116926f09e9bSSantosh Shilimkar align = SMP_CACHE_BYTES; 117026f09e9bSSantosh Shilimkar 1171f544e14fSYinghai Lu if (max_addr > memblock.current_limit) 1172f544e14fSYinghai Lu max_addr = memblock.current_limit; 1173f544e14fSYinghai Lu 117426f09e9bSSantosh Shilimkar again: 117526f09e9bSSantosh Shilimkar alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, 117626f09e9bSSantosh Shilimkar nid); 117726f09e9bSSantosh Shilimkar if (alloc) 117826f09e9bSSantosh Shilimkar goto done; 117926f09e9bSSantosh Shilimkar 118026f09e9bSSantosh Shilimkar if (nid != NUMA_NO_NODE) { 118126f09e9bSSantosh Shilimkar alloc = memblock_find_in_range_node(size, align, min_addr, 118226f09e9bSSantosh Shilimkar max_addr, NUMA_NO_NODE); 118326f09e9bSSantosh Shilimkar if (alloc) 118426f09e9bSSantosh Shilimkar goto done; 118526f09e9bSSantosh Shilimkar } 118626f09e9bSSantosh Shilimkar 118726f09e9bSSantosh Shilimkar if (min_addr) { 118826f09e9bSSantosh Shilimkar min_addr = 0; 118926f09e9bSSantosh Shilimkar goto again; 119026f09e9bSSantosh Shilimkar } else { 119126f09e9bSSantosh Shilimkar goto error; 119226f09e9bSSantosh Shilimkar } 119326f09e9bSSantosh Shilimkar 119426f09e9bSSantosh Shilimkar done: 119526f09e9bSSantosh Shilimkar memblock_reserve(alloc, size); 119626f09e9bSSantosh Shilimkar ptr = phys_to_virt(alloc); 119726f09e9bSSantosh Shilimkar memset(ptr, 0, size); 119826f09e9bSSantosh Shilimkar 119926f09e9bSSantosh Shilimkar /* 120026f09e9bSSantosh Shilimkar * The min_count is set to 0 so that bootmem allocated blocks 120126f09e9bSSantosh Shilimkar * are never reported as leaks. This is because many of these blocks 120226f09e9bSSantosh Shilimkar * are only referred via the physical address which is not 120326f09e9bSSantosh Shilimkar * looked up by kmemleak. 120426f09e9bSSantosh Shilimkar */ 120526f09e9bSSantosh Shilimkar kmemleak_alloc(ptr, size, 0, 0); 120626f09e9bSSantosh Shilimkar 120726f09e9bSSantosh Shilimkar return ptr; 120826f09e9bSSantosh Shilimkar 120926f09e9bSSantosh Shilimkar error: 121026f09e9bSSantosh Shilimkar return NULL; 121126f09e9bSSantosh Shilimkar } 121226f09e9bSSantosh Shilimkar 121326f09e9bSSantosh Shilimkar /** 121426f09e9bSSantosh Shilimkar * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block 121526f09e9bSSantosh Shilimkar * @size: size of memory block to be allocated in bytes 121626f09e9bSSantosh Shilimkar * @align: alignment of the region and block's size 121726f09e9bSSantosh Shilimkar * @min_addr: the lower bound of the memory region from where the allocation 121826f09e9bSSantosh Shilimkar * is preferred (phys address) 121926f09e9bSSantosh Shilimkar * @max_addr: the upper bound of the memory region from where the allocation 122026f09e9bSSantosh Shilimkar * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 122126f09e9bSSantosh Shilimkar * allocate only from memory limited by memblock.current_limit value 122226f09e9bSSantosh Shilimkar * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 122326f09e9bSSantosh Shilimkar * 122426f09e9bSSantosh Shilimkar * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides 122526f09e9bSSantosh Shilimkar * additional debug information (including caller info), if enabled. 122626f09e9bSSantosh Shilimkar * 122726f09e9bSSantosh Shilimkar * RETURNS: 122826f09e9bSSantosh Shilimkar * Virtual address of allocated memory block on success, NULL on failure. 122926f09e9bSSantosh Shilimkar */ 123026f09e9bSSantosh Shilimkar void * __init memblock_virt_alloc_try_nid_nopanic( 123126f09e9bSSantosh Shilimkar phys_addr_t size, phys_addr_t align, 123226f09e9bSSantosh Shilimkar phys_addr_t min_addr, phys_addr_t max_addr, 123326f09e9bSSantosh Shilimkar int nid) 123426f09e9bSSantosh Shilimkar { 123526f09e9bSSantosh Shilimkar memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 123626f09e9bSSantosh Shilimkar __func__, (u64)size, (u64)align, nid, (u64)min_addr, 123726f09e9bSSantosh Shilimkar (u64)max_addr, (void *)_RET_IP_); 123826f09e9bSSantosh Shilimkar return memblock_virt_alloc_internal(size, align, min_addr, 123926f09e9bSSantosh Shilimkar max_addr, nid); 124026f09e9bSSantosh Shilimkar } 124126f09e9bSSantosh Shilimkar 124226f09e9bSSantosh Shilimkar /** 124326f09e9bSSantosh Shilimkar * memblock_virt_alloc_try_nid - allocate boot memory block with panicking 124426f09e9bSSantosh Shilimkar * @size: size of memory block to be allocated in bytes 124526f09e9bSSantosh Shilimkar * @align: alignment of the region and block's size 124626f09e9bSSantosh Shilimkar * @min_addr: the lower bound of the memory region from where the allocation 124726f09e9bSSantosh Shilimkar * is preferred (phys address) 124826f09e9bSSantosh Shilimkar * @max_addr: the upper bound of the memory region from where the allocation 124926f09e9bSSantosh Shilimkar * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 125026f09e9bSSantosh Shilimkar * allocate only from memory limited by memblock.current_limit value 125126f09e9bSSantosh Shilimkar * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 125226f09e9bSSantosh Shilimkar * 125326f09e9bSSantosh Shilimkar * Public panicking version of _memblock_virt_alloc_try_nid_nopanic() 125426f09e9bSSantosh Shilimkar * which provides debug information (including caller info), if enabled, 125526f09e9bSSantosh Shilimkar * and panics if the request can not be satisfied. 125626f09e9bSSantosh Shilimkar * 125726f09e9bSSantosh Shilimkar * RETURNS: 125826f09e9bSSantosh Shilimkar * Virtual address of allocated memory block on success, NULL on failure. 125926f09e9bSSantosh Shilimkar */ 126026f09e9bSSantosh Shilimkar void * __init memblock_virt_alloc_try_nid( 126126f09e9bSSantosh Shilimkar phys_addr_t size, phys_addr_t align, 126226f09e9bSSantosh Shilimkar phys_addr_t min_addr, phys_addr_t max_addr, 126326f09e9bSSantosh Shilimkar int nid) 126426f09e9bSSantosh Shilimkar { 126526f09e9bSSantosh Shilimkar void *ptr; 126626f09e9bSSantosh Shilimkar 126726f09e9bSSantosh Shilimkar memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 126826f09e9bSSantosh Shilimkar __func__, (u64)size, (u64)align, nid, (u64)min_addr, 126926f09e9bSSantosh Shilimkar (u64)max_addr, (void *)_RET_IP_); 127026f09e9bSSantosh Shilimkar ptr = memblock_virt_alloc_internal(size, align, 127126f09e9bSSantosh Shilimkar min_addr, max_addr, nid); 127226f09e9bSSantosh Shilimkar if (ptr) 127326f09e9bSSantosh Shilimkar return ptr; 127426f09e9bSSantosh Shilimkar 127526f09e9bSSantosh Shilimkar panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n", 127626f09e9bSSantosh Shilimkar __func__, (u64)size, (u64)align, nid, (u64)min_addr, 127726f09e9bSSantosh Shilimkar (u64)max_addr); 127826f09e9bSSantosh Shilimkar return NULL; 127926f09e9bSSantosh Shilimkar } 128026f09e9bSSantosh Shilimkar 128126f09e9bSSantosh Shilimkar /** 128226f09e9bSSantosh Shilimkar * __memblock_free_early - free boot memory block 128326f09e9bSSantosh Shilimkar * @base: phys starting address of the boot memory block 128426f09e9bSSantosh Shilimkar * @size: size of the boot memory block in bytes 128526f09e9bSSantosh Shilimkar * 128626f09e9bSSantosh Shilimkar * Free boot memory block previously allocated by memblock_virt_alloc_xx() API. 128726f09e9bSSantosh Shilimkar * The freeing memory will not be released to the buddy allocator. 128826f09e9bSSantosh Shilimkar */ 128926f09e9bSSantosh Shilimkar void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) 129026f09e9bSSantosh Shilimkar { 129126f09e9bSSantosh Shilimkar memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", 129226f09e9bSSantosh Shilimkar __func__, (u64)base, (u64)base + size - 1, 129326f09e9bSSantosh Shilimkar (void *)_RET_IP_); 129426f09e9bSSantosh Shilimkar kmemleak_free_part(__va(base), size); 1295f1af9d3aSPhilipp Hachtmann memblock_remove_range(&memblock.reserved, base, size); 129626f09e9bSSantosh Shilimkar } 129726f09e9bSSantosh Shilimkar 129826f09e9bSSantosh Shilimkar /* 129926f09e9bSSantosh Shilimkar * __memblock_free_late - free bootmem block pages directly to buddy allocator 130026f09e9bSSantosh Shilimkar * @addr: phys starting address of the boot memory block 130126f09e9bSSantosh Shilimkar * @size: size of the boot memory block in bytes 130226f09e9bSSantosh Shilimkar * 130326f09e9bSSantosh Shilimkar * This is only useful when the bootmem allocator has already been torn 130426f09e9bSSantosh Shilimkar * down, but we are still initializing the system. Pages are released directly 130526f09e9bSSantosh Shilimkar * to the buddy allocator, no bootmem metadata is updated because it is gone. 130626f09e9bSSantosh Shilimkar */ 130726f09e9bSSantosh Shilimkar void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) 130826f09e9bSSantosh Shilimkar { 130926f09e9bSSantosh Shilimkar u64 cursor, end; 131026f09e9bSSantosh Shilimkar 131126f09e9bSSantosh Shilimkar memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", 131226f09e9bSSantosh Shilimkar __func__, (u64)base, (u64)base + size - 1, 131326f09e9bSSantosh Shilimkar (void *)_RET_IP_); 131426f09e9bSSantosh Shilimkar kmemleak_free_part(__va(base), size); 131526f09e9bSSantosh Shilimkar cursor = PFN_UP(base); 131626f09e9bSSantosh Shilimkar end = PFN_DOWN(base + size); 131726f09e9bSSantosh Shilimkar 131826f09e9bSSantosh Shilimkar for (; cursor < end; cursor++) { 131926f09e9bSSantosh Shilimkar __free_pages_bootmem(pfn_to_page(cursor), 0); 132026f09e9bSSantosh Shilimkar totalram_pages++; 132126f09e9bSSantosh Shilimkar } 132226f09e9bSSantosh Shilimkar } 13239d1e2492SBenjamin Herrenschmidt 13249d1e2492SBenjamin Herrenschmidt /* 13259d1e2492SBenjamin Herrenschmidt * Remaining API functions 13269d1e2492SBenjamin Herrenschmidt */ 13279d1e2492SBenjamin Herrenschmidt 13282898cc4cSBenjamin Herrenschmidt phys_addr_t __init memblock_phys_mem_size(void) 132995f72d1eSYinghai Lu { 13301440c4e2STejun Heo return memblock.memory.total_size; 133195f72d1eSYinghai Lu } 133295f72d1eSYinghai Lu 1333595ad9afSYinghai Lu phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) 1334595ad9afSYinghai Lu { 1335595ad9afSYinghai Lu unsigned long pages = 0; 1336595ad9afSYinghai Lu struct memblock_region *r; 1337595ad9afSYinghai Lu unsigned long start_pfn, end_pfn; 1338595ad9afSYinghai Lu 1339595ad9afSYinghai Lu for_each_memblock(memory, r) { 1340595ad9afSYinghai Lu start_pfn = memblock_region_memory_base_pfn(r); 1341595ad9afSYinghai Lu end_pfn = memblock_region_memory_end_pfn(r); 1342595ad9afSYinghai Lu start_pfn = min_t(unsigned long, start_pfn, limit_pfn); 1343595ad9afSYinghai Lu end_pfn = min_t(unsigned long, end_pfn, limit_pfn); 1344595ad9afSYinghai Lu pages += end_pfn - start_pfn; 1345595ad9afSYinghai Lu } 1346595ad9afSYinghai Lu 134716763230SFabian Frederick return PFN_PHYS(pages); 1348595ad9afSYinghai Lu } 1349595ad9afSYinghai Lu 13500a93ebefSSam Ravnborg /* lowest address */ 13510a93ebefSSam Ravnborg phys_addr_t __init_memblock memblock_start_of_DRAM(void) 13520a93ebefSSam Ravnborg { 13530a93ebefSSam Ravnborg return memblock.memory.regions[0].base; 13540a93ebefSSam Ravnborg } 13550a93ebefSSam Ravnborg 135610d06439SYinghai Lu phys_addr_t __init_memblock memblock_end_of_DRAM(void) 135795f72d1eSYinghai Lu { 135895f72d1eSYinghai Lu int idx = memblock.memory.cnt - 1; 135995f72d1eSYinghai Lu 1360e3239ff9SBenjamin Herrenschmidt return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 136195f72d1eSYinghai Lu } 136295f72d1eSYinghai Lu 1363c0ce8fefSTejun Heo void __init memblock_enforce_memory_limit(phys_addr_t limit) 136495f72d1eSYinghai Lu { 1365c0ce8fefSTejun Heo phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; 1366136199f0SEmil Medve struct memblock_region *r; 136795f72d1eSYinghai Lu 1368c0ce8fefSTejun Heo if (!limit) 136995f72d1eSYinghai Lu return; 137095f72d1eSYinghai Lu 1371c0ce8fefSTejun Heo /* find out max address */ 1372136199f0SEmil Medve for_each_memblock(memory, r) { 1373c0ce8fefSTejun Heo if (limit <= r->size) { 1374c0ce8fefSTejun Heo max_addr = r->base + limit; 137595f72d1eSYinghai Lu break; 137695f72d1eSYinghai Lu } 1377c0ce8fefSTejun Heo limit -= r->size; 137895f72d1eSYinghai Lu } 1379c0ce8fefSTejun Heo 1380c0ce8fefSTejun Heo /* truncate both memory and reserved regions */ 1381f1af9d3aSPhilipp Hachtmann memblock_remove_range(&memblock.memory, max_addr, 1382f1af9d3aSPhilipp Hachtmann (phys_addr_t)ULLONG_MAX); 1383f1af9d3aSPhilipp Hachtmann memblock_remove_range(&memblock.reserved, max_addr, 1384f1af9d3aSPhilipp Hachtmann (phys_addr_t)ULLONG_MAX); 138595f72d1eSYinghai Lu } 138695f72d1eSYinghai Lu 1387cd79481dSYinghai Lu static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 138872d4b0b4SBenjamin Herrenschmidt { 138972d4b0b4SBenjamin Herrenschmidt unsigned int left = 0, right = type->cnt; 139072d4b0b4SBenjamin Herrenschmidt 139172d4b0b4SBenjamin Herrenschmidt do { 139272d4b0b4SBenjamin Herrenschmidt unsigned int mid = (right + left) / 2; 139372d4b0b4SBenjamin Herrenschmidt 139472d4b0b4SBenjamin Herrenschmidt if (addr < type->regions[mid].base) 139572d4b0b4SBenjamin Herrenschmidt right = mid; 139672d4b0b4SBenjamin Herrenschmidt else if (addr >= (type->regions[mid].base + 139772d4b0b4SBenjamin Herrenschmidt type->regions[mid].size)) 139872d4b0b4SBenjamin Herrenschmidt left = mid + 1; 139972d4b0b4SBenjamin Herrenschmidt else 140072d4b0b4SBenjamin Herrenschmidt return mid; 140172d4b0b4SBenjamin Herrenschmidt } while (left < right); 140272d4b0b4SBenjamin Herrenschmidt return -1; 140372d4b0b4SBenjamin Herrenschmidt } 140472d4b0b4SBenjamin Herrenschmidt 14052898cc4cSBenjamin Herrenschmidt int __init memblock_is_reserved(phys_addr_t addr) 140695f72d1eSYinghai Lu { 140772d4b0b4SBenjamin Herrenschmidt return memblock_search(&memblock.reserved, addr) != -1; 140895f72d1eSYinghai Lu } 140972d4b0b4SBenjamin Herrenschmidt 14103661ca66SYinghai Lu int __init_memblock memblock_is_memory(phys_addr_t addr) 141172d4b0b4SBenjamin Herrenschmidt { 141272d4b0b4SBenjamin Herrenschmidt return memblock_search(&memblock.memory, addr) != -1; 141372d4b0b4SBenjamin Herrenschmidt } 141472d4b0b4SBenjamin Herrenschmidt 1415e76b63f8SYinghai Lu #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1416e76b63f8SYinghai Lu int __init_memblock memblock_search_pfn_nid(unsigned long pfn, 1417e76b63f8SYinghai Lu unsigned long *start_pfn, unsigned long *end_pfn) 1418e76b63f8SYinghai Lu { 1419e76b63f8SYinghai Lu struct memblock_type *type = &memblock.memory; 142016763230SFabian Frederick int mid = memblock_search(type, PFN_PHYS(pfn)); 1421e76b63f8SYinghai Lu 1422e76b63f8SYinghai Lu if (mid == -1) 1423e76b63f8SYinghai Lu return -1; 1424e76b63f8SYinghai Lu 1425f7e2f7e8SFabian Frederick *start_pfn = PFN_DOWN(type->regions[mid].base); 1426f7e2f7e8SFabian Frederick *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); 1427e76b63f8SYinghai Lu 1428e76b63f8SYinghai Lu return type->regions[mid].nid; 1429e76b63f8SYinghai Lu } 1430e76b63f8SYinghai Lu #endif 1431e76b63f8SYinghai Lu 1432eab30949SStephen Boyd /** 1433eab30949SStephen Boyd * memblock_is_region_memory - check if a region is a subset of memory 1434eab30949SStephen Boyd * @base: base of region to check 1435eab30949SStephen Boyd * @size: size of region to check 1436eab30949SStephen Boyd * 1437eab30949SStephen Boyd * Check if the region [@base, @base+@size) is a subset of a memory block. 1438eab30949SStephen Boyd * 1439eab30949SStephen Boyd * RETURNS: 1440eab30949SStephen Boyd * 0 if false, non-zero if true 1441eab30949SStephen Boyd */ 14423661ca66SYinghai Lu int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 144372d4b0b4SBenjamin Herrenschmidt { 1444abb65272STomi Valkeinen int idx = memblock_search(&memblock.memory, base); 1445eb18f1b5STejun Heo phys_addr_t end = base + memblock_cap_size(base, &size); 144672d4b0b4SBenjamin Herrenschmidt 144772d4b0b4SBenjamin Herrenschmidt if (idx == -1) 144895f72d1eSYinghai Lu return 0; 1449abb65272STomi Valkeinen return memblock.memory.regions[idx].base <= base && 1450abb65272STomi Valkeinen (memblock.memory.regions[idx].base + 1451eb18f1b5STejun Heo memblock.memory.regions[idx].size) >= end; 145295f72d1eSYinghai Lu } 145395f72d1eSYinghai Lu 1454eab30949SStephen Boyd /** 1455eab30949SStephen Boyd * memblock_is_region_reserved - check if a region intersects reserved memory 1456eab30949SStephen Boyd * @base: base of region to check 1457eab30949SStephen Boyd * @size: size of region to check 1458eab30949SStephen Boyd * 1459eab30949SStephen Boyd * Check if the region [@base, @base+@size) intersects a reserved memory block. 1460eab30949SStephen Boyd * 1461eab30949SStephen Boyd * RETURNS: 1462eab30949SStephen Boyd * 0 if false, non-zero if true 1463eab30949SStephen Boyd */ 146410d06439SYinghai Lu int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 146595f72d1eSYinghai Lu { 1466eb18f1b5STejun Heo memblock_cap_size(base, &size); 1467f1c2c19cSBenjamin Herrenschmidt return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 146895f72d1eSYinghai Lu } 146995f72d1eSYinghai Lu 14706ede1fd3SYinghai Lu void __init_memblock memblock_trim_memory(phys_addr_t align) 14716ede1fd3SYinghai Lu { 14726ede1fd3SYinghai Lu phys_addr_t start, end, orig_start, orig_end; 1473136199f0SEmil Medve struct memblock_region *r; 14746ede1fd3SYinghai Lu 1475136199f0SEmil Medve for_each_memblock(memory, r) { 1476136199f0SEmil Medve orig_start = r->base; 1477136199f0SEmil Medve orig_end = r->base + r->size; 14786ede1fd3SYinghai Lu start = round_up(orig_start, align); 14796ede1fd3SYinghai Lu end = round_down(orig_end, align); 14806ede1fd3SYinghai Lu 14816ede1fd3SYinghai Lu if (start == orig_start && end == orig_end) 14826ede1fd3SYinghai Lu continue; 14836ede1fd3SYinghai Lu 14846ede1fd3SYinghai Lu if (start < end) { 1485136199f0SEmil Medve r->base = start; 1486136199f0SEmil Medve r->size = end - start; 14876ede1fd3SYinghai Lu } else { 1488136199f0SEmil Medve memblock_remove_region(&memblock.memory, 1489136199f0SEmil Medve r - memblock.memory.regions); 1490136199f0SEmil Medve r--; 14916ede1fd3SYinghai Lu } 14926ede1fd3SYinghai Lu } 14936ede1fd3SYinghai Lu } 1494e63075a3SBenjamin Herrenschmidt 14953661ca66SYinghai Lu void __init_memblock memblock_set_current_limit(phys_addr_t limit) 1496e63075a3SBenjamin Herrenschmidt { 1497e63075a3SBenjamin Herrenschmidt memblock.current_limit = limit; 1498e63075a3SBenjamin Herrenschmidt } 1499e63075a3SBenjamin Herrenschmidt 1500fec51014SLaura Abbott phys_addr_t __init_memblock memblock_get_current_limit(void) 1501fec51014SLaura Abbott { 1502fec51014SLaura Abbott return memblock.current_limit; 1503fec51014SLaura Abbott } 1504fec51014SLaura Abbott 15057c0caeb8STejun Heo static void __init_memblock memblock_dump(struct memblock_type *type, char *name) 15066ed311b2SBenjamin Herrenschmidt { 15076ed311b2SBenjamin Herrenschmidt unsigned long long base, size; 150866a20757STang Chen unsigned long flags; 15096ed311b2SBenjamin Herrenschmidt int i; 15106ed311b2SBenjamin Herrenschmidt 15117c0caeb8STejun Heo pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); 15126ed311b2SBenjamin Herrenschmidt 15137c0caeb8STejun Heo for (i = 0; i < type->cnt; i++) { 15147c0caeb8STejun Heo struct memblock_region *rgn = &type->regions[i]; 15157c0caeb8STejun Heo char nid_buf[32] = ""; 15166ed311b2SBenjamin Herrenschmidt 15177c0caeb8STejun Heo base = rgn->base; 15187c0caeb8STejun Heo size = rgn->size; 151966a20757STang Chen flags = rgn->flags; 15207c0caeb8STejun Heo #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 15217c0caeb8STejun Heo if (memblock_get_region_node(rgn) != MAX_NUMNODES) 15227c0caeb8STejun Heo snprintf(nid_buf, sizeof(nid_buf), " on node %d", 15237c0caeb8STejun Heo memblock_get_region_node(rgn)); 15247c0caeb8STejun Heo #endif 152566a20757STang Chen pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n", 152666a20757STang Chen name, i, base, base + size - 1, size, nid_buf, flags); 15276ed311b2SBenjamin Herrenschmidt } 15286ed311b2SBenjamin Herrenschmidt } 15296ed311b2SBenjamin Herrenschmidt 15304ff7b82fSTejun Heo void __init_memblock __memblock_dump_all(void) 15316ed311b2SBenjamin Herrenschmidt { 15326ed311b2SBenjamin Herrenschmidt pr_info("MEMBLOCK configuration:\n"); 15331440c4e2STejun Heo pr_info(" memory size = %#llx reserved size = %#llx\n", 15341440c4e2STejun Heo (unsigned long long)memblock.memory.total_size, 15351440c4e2STejun Heo (unsigned long long)memblock.reserved.total_size); 15366ed311b2SBenjamin Herrenschmidt 15376ed311b2SBenjamin Herrenschmidt memblock_dump(&memblock.memory, "memory"); 15386ed311b2SBenjamin Herrenschmidt memblock_dump(&memblock.reserved, "reserved"); 15396ed311b2SBenjamin Herrenschmidt } 15406ed311b2SBenjamin Herrenschmidt 15411aadc056STejun Heo void __init memblock_allow_resize(void) 15426ed311b2SBenjamin Herrenschmidt { 1543142b45a7SBenjamin Herrenschmidt memblock_can_resize = 1; 15446ed311b2SBenjamin Herrenschmidt } 15456ed311b2SBenjamin Herrenschmidt 15466ed311b2SBenjamin Herrenschmidt static int __init early_memblock(char *p) 15476ed311b2SBenjamin Herrenschmidt { 15486ed311b2SBenjamin Herrenschmidt if (p && strstr(p, "debug")) 15496ed311b2SBenjamin Herrenschmidt memblock_debug = 1; 15506ed311b2SBenjamin Herrenschmidt return 0; 15516ed311b2SBenjamin Herrenschmidt } 15526ed311b2SBenjamin Herrenschmidt early_param("memblock", early_memblock); 15536ed311b2SBenjamin Herrenschmidt 1554c378ddd5STejun Heo #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) 15556d03b885SBenjamin Herrenschmidt 15566d03b885SBenjamin Herrenschmidt static int memblock_debug_show(struct seq_file *m, void *private) 15576d03b885SBenjamin Herrenschmidt { 15586d03b885SBenjamin Herrenschmidt struct memblock_type *type = m->private; 15596d03b885SBenjamin Herrenschmidt struct memblock_region *reg; 15606d03b885SBenjamin Herrenschmidt int i; 15616d03b885SBenjamin Herrenschmidt 15626d03b885SBenjamin Herrenschmidt for (i = 0; i < type->cnt; i++) { 15636d03b885SBenjamin Herrenschmidt reg = &type->regions[i]; 15646d03b885SBenjamin Herrenschmidt seq_printf(m, "%4d: ", i); 15656d03b885SBenjamin Herrenschmidt if (sizeof(phys_addr_t) == 4) 15666d03b885SBenjamin Herrenschmidt seq_printf(m, "0x%08lx..0x%08lx\n", 15676d03b885SBenjamin Herrenschmidt (unsigned long)reg->base, 15686d03b885SBenjamin Herrenschmidt (unsigned long)(reg->base + reg->size - 1)); 15696d03b885SBenjamin Herrenschmidt else 15706d03b885SBenjamin Herrenschmidt seq_printf(m, "0x%016llx..0x%016llx\n", 15716d03b885SBenjamin Herrenschmidt (unsigned long long)reg->base, 15726d03b885SBenjamin Herrenschmidt (unsigned long long)(reg->base + reg->size - 1)); 15736d03b885SBenjamin Herrenschmidt 15746d03b885SBenjamin Herrenschmidt } 15756d03b885SBenjamin Herrenschmidt return 0; 15766d03b885SBenjamin Herrenschmidt } 15776d03b885SBenjamin Herrenschmidt 15786d03b885SBenjamin Herrenschmidt static int memblock_debug_open(struct inode *inode, struct file *file) 15796d03b885SBenjamin Herrenschmidt { 15806d03b885SBenjamin Herrenschmidt return single_open(file, memblock_debug_show, inode->i_private); 15816d03b885SBenjamin Herrenschmidt } 15826d03b885SBenjamin Herrenschmidt 15836d03b885SBenjamin Herrenschmidt static const struct file_operations memblock_debug_fops = { 15846d03b885SBenjamin Herrenschmidt .open = memblock_debug_open, 15856d03b885SBenjamin Herrenschmidt .read = seq_read, 15866d03b885SBenjamin Herrenschmidt .llseek = seq_lseek, 15876d03b885SBenjamin Herrenschmidt .release = single_release, 15886d03b885SBenjamin Herrenschmidt }; 15896d03b885SBenjamin Herrenschmidt 15906d03b885SBenjamin Herrenschmidt static int __init memblock_init_debugfs(void) 15916d03b885SBenjamin Herrenschmidt { 15926d03b885SBenjamin Herrenschmidt struct dentry *root = debugfs_create_dir("memblock", NULL); 15936d03b885SBenjamin Herrenschmidt if (!root) 15946d03b885SBenjamin Herrenschmidt return -ENXIO; 15956d03b885SBenjamin Herrenschmidt debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); 15966d03b885SBenjamin Herrenschmidt debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); 159770210ed9SPhilipp Hachtmann #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 159870210ed9SPhilipp Hachtmann debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops); 159970210ed9SPhilipp Hachtmann #endif 16006d03b885SBenjamin Herrenschmidt 16016d03b885SBenjamin Herrenschmidt return 0; 16026d03b885SBenjamin Herrenschmidt } 16036d03b885SBenjamin Herrenschmidt __initcall(memblock_init_debugfs); 16046d03b885SBenjamin Herrenschmidt 16056d03b885SBenjamin Herrenschmidt #endif /* CONFIG_DEBUG_FS */ 1606