12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 295f72d1eSYinghai Lu /* 395f72d1eSYinghai Lu * Procedures for maintaining information about logical memory blocks. 495f72d1eSYinghai Lu * 595f72d1eSYinghai Lu * Peter Bergner, IBM Corp. June 2001. 695f72d1eSYinghai Lu * Copyright (C) 2001 Peter Bergner. 795f72d1eSYinghai Lu */ 895f72d1eSYinghai Lu 995f72d1eSYinghai Lu #include <linux/kernel.h> 10142b45a7SBenjamin Herrenschmidt #include <linux/slab.h> 1195f72d1eSYinghai Lu #include <linux/init.h> 1295f72d1eSYinghai Lu #include <linux/bitops.h> 13449e8df3SBenjamin Herrenschmidt #include <linux/poison.h> 14c196f76fSBenjamin Herrenschmidt #include <linux/pfn.h> 156d03b885SBenjamin Herrenschmidt #include <linux/debugfs.h> 16514c6032SRandy Dunlap #include <linux/kmemleak.h> 176d03b885SBenjamin Herrenschmidt #include <linux/seq_file.h> 1895f72d1eSYinghai Lu #include <linux/memblock.h> 1995f72d1eSYinghai Lu 20c4c5ad6bSChristoph Hellwig #include <asm/sections.h> 2126f09e9bSSantosh Shilimkar #include <linux/io.h> 2226f09e9bSSantosh Shilimkar 2326f09e9bSSantosh Shilimkar #include "internal.h" 2479442ed1STang Chen 258a5b403dSArd Biesheuvel #define INIT_MEMBLOCK_REGIONS 128 268a5b403dSArd Biesheuvel #define INIT_PHYSMEM_REGIONS 4 278a5b403dSArd Biesheuvel 288a5b403dSArd Biesheuvel #ifndef INIT_MEMBLOCK_RESERVED_REGIONS 298a5b403dSArd Biesheuvel # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS 308a5b403dSArd Biesheuvel #endif 318a5b403dSArd Biesheuvel 323e039c5cSMike Rapoport /** 333e039c5cSMike Rapoport * DOC: memblock overview 343e039c5cSMike Rapoport * 353e039c5cSMike Rapoport * Memblock is a method of managing memory regions during the early 363e039c5cSMike Rapoport * boot period when the usual kernel memory allocators are not up and 373e039c5cSMike Rapoport * running. 383e039c5cSMike Rapoport * 393e039c5cSMike Rapoport * Memblock views the system memory as collections of contiguous 403e039c5cSMike Rapoport * regions. There are several types of these collections: 413e039c5cSMike Rapoport * 423e039c5cSMike Rapoport * * ``memory`` - describes the physical memory available to the 433e039c5cSMike Rapoport * kernel; this may differ from the actual physical memory installed 443e039c5cSMike Rapoport * in the system, for instance when the memory is restricted with 453e039c5cSMike Rapoport * ``mem=`` command line parameter 463e039c5cSMike Rapoport * * ``reserved`` - describes the regions that were allocated 473e039c5cSMike Rapoport * * ``physmap`` - describes the actual physical memory regardless of 483e039c5cSMike Rapoport * the possible restrictions; the ``physmap`` type is only available 493e039c5cSMike Rapoport * on some architectures. 503e039c5cSMike Rapoport * 513e039c5cSMike Rapoport * Each region is represented by :c:type:`struct memblock_region` that 523e039c5cSMike Rapoport * defines the region extents, its attributes and NUMA node id on NUMA 533e039c5cSMike Rapoport * systems. Every memory type is described by the :c:type:`struct 543e039c5cSMike Rapoport * memblock_type` which contains an array of memory regions along with 553e039c5cSMike Rapoport * the allocator metadata. The memory types are nicely wrapped with 563e039c5cSMike Rapoport * :c:type:`struct memblock`. This structure is statically initialzed 573e039c5cSMike Rapoport * at build time. The region arrays for the "memory" and "reserved" 583e039c5cSMike Rapoport * types are initially sized to %INIT_MEMBLOCK_REGIONS and for the 593e039c5cSMike Rapoport * "physmap" type to %INIT_PHYSMEM_REGIONS. 603e039c5cSMike Rapoport * The :c:func:`memblock_allow_resize` enables automatic resizing of 613e039c5cSMike Rapoport * the region arrays during addition of new regions. This feature 623e039c5cSMike Rapoport * should be used with care so that memory allocated for the region 633e039c5cSMike Rapoport * array will not overlap with areas that should be reserved, for 643e039c5cSMike Rapoport * example initrd. 653e039c5cSMike Rapoport * 663e039c5cSMike Rapoport * The early architecture setup should tell memblock what the physical 673e039c5cSMike Rapoport * memory layout is by using :c:func:`memblock_add` or 683e039c5cSMike Rapoport * :c:func:`memblock_add_node` functions. The first function does not 693e039c5cSMike Rapoport * assign the region to a NUMA node and it is appropriate for UMA 703e039c5cSMike Rapoport * systems. Yet, it is possible to use it on NUMA systems as well and 713e039c5cSMike Rapoport * assign the region to a NUMA node later in the setup process using 723e039c5cSMike Rapoport * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node` 733e039c5cSMike Rapoport * performs such an assignment directly. 743e039c5cSMike Rapoport * 75a2974133SMike Rapoport * Once memblock is setup the memory can be allocated using one of the 76a2974133SMike Rapoport * API variants: 77a2974133SMike Rapoport * 78a2974133SMike Rapoport * * :c:func:`memblock_phys_alloc*` - these functions return the 79a2974133SMike Rapoport * **physical** address of the allocated memory 80a2974133SMike Rapoport * * :c:func:`memblock_alloc*` - these functions return the **virtual** 81a2974133SMike Rapoport * address of the allocated memory. 82a2974133SMike Rapoport * 83a2974133SMike Rapoport * Note, that both API variants use implict assumptions about allowed 84a2974133SMike Rapoport * memory ranges and the fallback methods. Consult the documentation 85a2974133SMike Rapoport * of :c:func:`memblock_alloc_internal` and 86a2974133SMike Rapoport * :c:func:`memblock_alloc_range_nid` functions for more elaboarte 87a2974133SMike Rapoport * description. 883e039c5cSMike Rapoport * 893e039c5cSMike Rapoport * As the system boot progresses, the architecture specific 903e039c5cSMike Rapoport * :c:func:`mem_init` function frees all the memory to the buddy page 913e039c5cSMike Rapoport * allocator. 923e039c5cSMike Rapoport * 93350e88baSMike Rapoport * Unless an architecure enables %CONFIG_ARCH_KEEP_MEMBLOCK, the 943e039c5cSMike Rapoport * memblock data structures will be discarded after the system 953e039c5cSMike Rapoport * initialization compltes. 963e039c5cSMike Rapoport */ 973e039c5cSMike Rapoport 98bda49a81SMike Rapoport #ifndef CONFIG_NEED_MULTIPLE_NODES 99bda49a81SMike Rapoport struct pglist_data __refdata contig_page_data; 100bda49a81SMike Rapoport EXPORT_SYMBOL(contig_page_data); 101bda49a81SMike Rapoport #endif 102bda49a81SMike Rapoport 103bda49a81SMike Rapoport unsigned long max_low_pfn; 104bda49a81SMike Rapoport unsigned long min_low_pfn; 105bda49a81SMike Rapoport unsigned long max_pfn; 106bda49a81SMike Rapoport unsigned long long max_possible_pfn; 107bda49a81SMike Rapoport 108fe091c20STejun Heo static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 1098a5b403dSArd Biesheuvel static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; 11070210ed9SPhilipp Hachtmann #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 11170210ed9SPhilipp Hachtmann static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; 11270210ed9SPhilipp Hachtmann #endif 113fe091c20STejun Heo 114fe091c20STejun Heo struct memblock memblock __initdata_memblock = { 115fe091c20STejun Heo .memory.regions = memblock_memory_init_regions, 116fe091c20STejun Heo .memory.cnt = 1, /* empty dummy entry */ 117fe091c20STejun Heo .memory.max = INIT_MEMBLOCK_REGIONS, 1180262d9c8SHeiko Carstens .memory.name = "memory", 119fe091c20STejun Heo 120fe091c20STejun Heo .reserved.regions = memblock_reserved_init_regions, 121fe091c20STejun Heo .reserved.cnt = 1, /* empty dummy entry */ 1228a5b403dSArd Biesheuvel .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, 1230262d9c8SHeiko Carstens .reserved.name = "reserved", 124fe091c20STejun Heo 12570210ed9SPhilipp Hachtmann #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 12670210ed9SPhilipp Hachtmann .physmem.regions = memblock_physmem_init_regions, 12770210ed9SPhilipp Hachtmann .physmem.cnt = 1, /* empty dummy entry */ 12870210ed9SPhilipp Hachtmann .physmem.max = INIT_PHYSMEM_REGIONS, 1290262d9c8SHeiko Carstens .physmem.name = "physmem", 13070210ed9SPhilipp Hachtmann #endif 13170210ed9SPhilipp Hachtmann 13279442ed1STang Chen .bottom_up = false, 133fe091c20STejun Heo .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 134fe091c20STejun Heo }; 13595f72d1eSYinghai Lu 13610d06439SYinghai Lu int memblock_debug __initdata_memblock; 137a3f5bafcSTony Luck static bool system_has_some_mirror __initdata_memblock = false; 1381aadc056STejun Heo static int memblock_can_resize __initdata_memblock; 139181eb394SGavin Shan static int memblock_memory_in_slab __initdata_memblock = 0; 140181eb394SGavin Shan static int memblock_reserved_in_slab __initdata_memblock = 0; 14195f72d1eSYinghai Lu 142c366ea89SMike Rapoport static enum memblock_flags __init_memblock choose_memblock_flags(void) 143a3f5bafcSTony Luck { 144a3f5bafcSTony Luck return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; 145a3f5bafcSTony Luck } 146a3f5bafcSTony Luck 147eb18f1b5STejun Heo /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 148eb18f1b5STejun Heo static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 149eb18f1b5STejun Heo { 1501c4bc43dSStefan Agner return *size = min(*size, PHYS_ADDR_MAX - base); 151eb18f1b5STejun Heo } 152eb18f1b5STejun Heo 1536ed311b2SBenjamin Herrenschmidt /* 1546ed311b2SBenjamin Herrenschmidt * Address comparison utilities 1556ed311b2SBenjamin Herrenschmidt */ 15610d06439SYinghai Lu static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 1572898cc4cSBenjamin Herrenschmidt phys_addr_t base2, phys_addr_t size2) 15895f72d1eSYinghai Lu { 15995f72d1eSYinghai Lu return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 16095f72d1eSYinghai Lu } 16195f72d1eSYinghai Lu 16295cf82ecSTang Chen bool __init_memblock memblock_overlaps_region(struct memblock_type *type, 1632d7d3eb2SH Hartley Sweeten phys_addr_t base, phys_addr_t size) 1646ed311b2SBenjamin Herrenschmidt { 1656ed311b2SBenjamin Herrenschmidt unsigned long i; 1666ed311b2SBenjamin Herrenschmidt 167f14516fbSAlexander Kuleshov for (i = 0; i < type->cnt; i++) 168f14516fbSAlexander Kuleshov if (memblock_addrs_overlap(base, size, type->regions[i].base, 169f14516fbSAlexander Kuleshov type->regions[i].size)) 1706ed311b2SBenjamin Herrenschmidt break; 171c5c5c9d1STang Chen return i < type->cnt; 1726ed311b2SBenjamin Herrenschmidt } 1736ed311b2SBenjamin Herrenschmidt 17447cec443SMike Rapoport /** 17579442ed1STang Chen * __memblock_find_range_bottom_up - find free area utility in bottom-up 17679442ed1STang Chen * @start: start of candidate range 17747cec443SMike Rapoport * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 17847cec443SMike Rapoport * %MEMBLOCK_ALLOC_ACCESSIBLE 17979442ed1STang Chen * @size: size of free area to find 18079442ed1STang Chen * @align: alignment of free area to find 181b1154233SGrygorii Strashko * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 182fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes 18379442ed1STang Chen * 18479442ed1STang Chen * Utility called from memblock_find_in_range_node(), find free area bottom-up. 18579442ed1STang Chen * 18647cec443SMike Rapoport * Return: 18779442ed1STang Chen * Found address on success, 0 on failure. 18879442ed1STang Chen */ 18979442ed1STang Chen static phys_addr_t __init_memblock 19079442ed1STang Chen __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 191fc6daaf9STony Luck phys_addr_t size, phys_addr_t align, int nid, 192e1720feeSMike Rapoport enum memblock_flags flags) 19379442ed1STang Chen { 19479442ed1STang Chen phys_addr_t this_start, this_end, cand; 19579442ed1STang Chen u64 i; 19679442ed1STang Chen 197fc6daaf9STony Luck for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { 19879442ed1STang Chen this_start = clamp(this_start, start, end); 19979442ed1STang Chen this_end = clamp(this_end, start, end); 20079442ed1STang Chen 20179442ed1STang Chen cand = round_up(this_start, align); 20279442ed1STang Chen if (cand < this_end && this_end - cand >= size) 20379442ed1STang Chen return cand; 20479442ed1STang Chen } 20579442ed1STang Chen 20679442ed1STang Chen return 0; 20779442ed1STang Chen } 20879442ed1STang Chen 2097bd0b0f0STejun Heo /** 2101402899eSTang Chen * __memblock_find_range_top_down - find free area utility, in top-down 2111402899eSTang Chen * @start: start of candidate range 21247cec443SMike Rapoport * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 21347cec443SMike Rapoport * %MEMBLOCK_ALLOC_ACCESSIBLE 2141402899eSTang Chen * @size: size of free area to find 2151402899eSTang Chen * @align: alignment of free area to find 216b1154233SGrygorii Strashko * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 217fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes 2181402899eSTang Chen * 2191402899eSTang Chen * Utility called from memblock_find_in_range_node(), find free area top-down. 2201402899eSTang Chen * 22147cec443SMike Rapoport * Return: 22279442ed1STang Chen * Found address on success, 0 on failure. 2231402899eSTang Chen */ 2241402899eSTang Chen static phys_addr_t __init_memblock 2251402899eSTang Chen __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 226fc6daaf9STony Luck phys_addr_t size, phys_addr_t align, int nid, 227e1720feeSMike Rapoport enum memblock_flags flags) 2281402899eSTang Chen { 2291402899eSTang Chen phys_addr_t this_start, this_end, cand; 2301402899eSTang Chen u64 i; 2311402899eSTang Chen 232fc6daaf9STony Luck for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, 233fc6daaf9STony Luck NULL) { 2341402899eSTang Chen this_start = clamp(this_start, start, end); 2351402899eSTang Chen this_end = clamp(this_end, start, end); 2361402899eSTang Chen 2371402899eSTang Chen if (this_end < size) 2381402899eSTang Chen continue; 2391402899eSTang Chen 2401402899eSTang Chen cand = round_down(this_end - size, align); 2411402899eSTang Chen if (cand >= this_start) 2421402899eSTang Chen return cand; 2431402899eSTang Chen } 2441402899eSTang Chen 2451402899eSTang Chen return 0; 2461402899eSTang Chen } 2471402899eSTang Chen 2481402899eSTang Chen /** 2497bd0b0f0STejun Heo * memblock_find_in_range_node - find free area in given range and node 2507bd0b0f0STejun Heo * @size: size of free area to find 2517bd0b0f0STejun Heo * @align: alignment of free area to find 25287029ee9SGrygorii Strashko * @start: start of candidate range 25347cec443SMike Rapoport * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 25447cec443SMike Rapoport * %MEMBLOCK_ALLOC_ACCESSIBLE 255b1154233SGrygorii Strashko * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 256fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes 2577bd0b0f0STejun Heo * 2587bd0b0f0STejun Heo * Find @size free area aligned to @align in the specified range and node. 2597bd0b0f0STejun Heo * 26079442ed1STang Chen * When allocation direction is bottom-up, the @start should be greater 26179442ed1STang Chen * than the end of the kernel image. Otherwise, it will be trimmed. The 26279442ed1STang Chen * reason is that we want the bottom-up allocation just near the kernel 26379442ed1STang Chen * image so it is highly likely that the allocated memory and the kernel 26479442ed1STang Chen * will reside in the same node. 26579442ed1STang Chen * 26679442ed1STang Chen * If bottom-up allocation failed, will try to allocate memory top-down. 26779442ed1STang Chen * 26847cec443SMike Rapoport * Return: 26979442ed1STang Chen * Found address on success, 0 on failure. 2706ed311b2SBenjamin Herrenschmidt */ 271c366ea89SMike Rapoport static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 27287029ee9SGrygorii Strashko phys_addr_t align, phys_addr_t start, 273e1720feeSMike Rapoport phys_addr_t end, int nid, 274e1720feeSMike Rapoport enum memblock_flags flags) 275f7210e6cSTang Chen { 2760cfb8f0cSTang Chen phys_addr_t kernel_end, ret; 27779442ed1STang Chen 278f7210e6cSTang Chen /* pump up @end */ 279fed84c78SQian Cai if (end == MEMBLOCK_ALLOC_ACCESSIBLE || 280fed84c78SQian Cai end == MEMBLOCK_ALLOC_KASAN) 281f7210e6cSTang Chen end = memblock.current_limit; 282f7210e6cSTang Chen 283f7210e6cSTang Chen /* avoid allocating the first page */ 284f7210e6cSTang Chen start = max_t(phys_addr_t, start, PAGE_SIZE); 285f7210e6cSTang Chen end = max(start, end); 28679442ed1STang Chen kernel_end = __pa_symbol(_end); 28779442ed1STang Chen 28879442ed1STang Chen /* 28979442ed1STang Chen * try bottom-up allocation only when bottom-up mode 29079442ed1STang Chen * is set and @end is above the kernel image. 29179442ed1STang Chen */ 29279442ed1STang Chen if (memblock_bottom_up() && end > kernel_end) { 29379442ed1STang Chen phys_addr_t bottom_up_start; 29479442ed1STang Chen 29579442ed1STang Chen /* make sure we will allocate above the kernel */ 29679442ed1STang Chen bottom_up_start = max(start, kernel_end); 29779442ed1STang Chen 29879442ed1STang Chen /* ok, try bottom-up allocation first */ 29979442ed1STang Chen ret = __memblock_find_range_bottom_up(bottom_up_start, end, 300fc6daaf9STony Luck size, align, nid, flags); 30179442ed1STang Chen if (ret) 30279442ed1STang Chen return ret; 30379442ed1STang Chen 30479442ed1STang Chen /* 30579442ed1STang Chen * we always limit bottom-up allocation above the kernel, 30679442ed1STang Chen * but top-down allocation doesn't have the limit, so 30779442ed1STang Chen * retrying top-down allocation may succeed when bottom-up 30879442ed1STang Chen * allocation failed. 30979442ed1STang Chen * 31079442ed1STang Chen * bottom-up allocation is expected to be fail very rarely, 31179442ed1STang Chen * so we use WARN_ONCE() here to see the stack trace if 31279442ed1STang Chen * fail happens. 31379442ed1STang Chen */ 314e3d301caSMichal Hocko WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), 315e3d301caSMichal Hocko "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); 31679442ed1STang Chen } 317f7210e6cSTang Chen 318fc6daaf9STony Luck return __memblock_find_range_top_down(start, end, size, align, nid, 319fc6daaf9STony Luck flags); 320f7210e6cSTang Chen } 3216ed311b2SBenjamin Herrenschmidt 3227bd0b0f0STejun Heo /** 3237bd0b0f0STejun Heo * memblock_find_in_range - find free area in given range 3247bd0b0f0STejun Heo * @start: start of candidate range 32547cec443SMike Rapoport * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 32647cec443SMike Rapoport * %MEMBLOCK_ALLOC_ACCESSIBLE 3277bd0b0f0STejun Heo * @size: size of free area to find 3287bd0b0f0STejun Heo * @align: alignment of free area to find 3297bd0b0f0STejun Heo * 3307bd0b0f0STejun Heo * Find @size free area aligned to @align in the specified range. 3317bd0b0f0STejun Heo * 33247cec443SMike Rapoport * Return: 33379442ed1STang Chen * Found address on success, 0 on failure. 3347bd0b0f0STejun Heo */ 3357bd0b0f0STejun Heo phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 3367bd0b0f0STejun Heo phys_addr_t end, phys_addr_t size, 3377bd0b0f0STejun Heo phys_addr_t align) 3387bd0b0f0STejun Heo { 339a3f5bafcSTony Luck phys_addr_t ret; 340e1720feeSMike Rapoport enum memblock_flags flags = choose_memblock_flags(); 341a3f5bafcSTony Luck 342a3f5bafcSTony Luck again: 343a3f5bafcSTony Luck ret = memblock_find_in_range_node(size, align, start, end, 344a3f5bafcSTony Luck NUMA_NO_NODE, flags); 345a3f5bafcSTony Luck 346a3f5bafcSTony Luck if (!ret && (flags & MEMBLOCK_MIRROR)) { 347a3f5bafcSTony Luck pr_warn("Could not allocate %pap bytes of mirrored memory\n", 348a3f5bafcSTony Luck &size); 349a3f5bafcSTony Luck flags &= ~MEMBLOCK_MIRROR; 350a3f5bafcSTony Luck goto again; 351a3f5bafcSTony Luck } 352a3f5bafcSTony Luck 353a3f5bafcSTony Luck return ret; 3547bd0b0f0STejun Heo } 3557bd0b0f0STejun Heo 35610d06439SYinghai Lu static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 35795f72d1eSYinghai Lu { 3581440c4e2STejun Heo type->total_size -= type->regions[r].size; 3597c0caeb8STejun Heo memmove(&type->regions[r], &type->regions[r + 1], 3607c0caeb8STejun Heo (type->cnt - (r + 1)) * sizeof(type->regions[r])); 361e3239ff9SBenjamin Herrenschmidt type->cnt--; 36295f72d1eSYinghai Lu 3638f7a6605SBenjamin Herrenschmidt /* Special case for empty arrays */ 3648f7a6605SBenjamin Herrenschmidt if (type->cnt == 0) { 3651440c4e2STejun Heo WARN_ON(type->total_size != 0); 3668f7a6605SBenjamin Herrenschmidt type->cnt = 1; 3678f7a6605SBenjamin Herrenschmidt type->regions[0].base = 0; 3688f7a6605SBenjamin Herrenschmidt type->regions[0].size = 0; 36966a20757STang Chen type->regions[0].flags = 0; 3707c0caeb8STejun Heo memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 3718f7a6605SBenjamin Herrenschmidt } 37295f72d1eSYinghai Lu } 37395f72d1eSYinghai Lu 374350e88baSMike Rapoport #ifndef CONFIG_ARCH_KEEP_MEMBLOCK 3753010f876SPavel Tatashin /** 37647cec443SMike Rapoport * memblock_discard - discard memory and reserved arrays if they were allocated 3773010f876SPavel Tatashin */ 3783010f876SPavel Tatashin void __init memblock_discard(void) 37929f67386SYinghai Lu { 3803010f876SPavel Tatashin phys_addr_t addr, size; 38129f67386SYinghai Lu 3823010f876SPavel Tatashin if (memblock.reserved.regions != memblock_reserved_init_regions) { 3833010f876SPavel Tatashin addr = __pa(memblock.reserved.regions); 3843010f876SPavel Tatashin size = PAGE_ALIGN(sizeof(struct memblock_region) * 38529f67386SYinghai Lu memblock.reserved.max); 3863010f876SPavel Tatashin __memblock_free_late(addr, size); 38729f67386SYinghai Lu } 38829f67386SYinghai Lu 38991b540f9SPavel Tatashin if (memblock.memory.regions != memblock_memory_init_regions) { 3903010f876SPavel Tatashin addr = __pa(memblock.memory.regions); 3913010f876SPavel Tatashin size = PAGE_ALIGN(sizeof(struct memblock_region) * 3925e270e25SPhilipp Hachtmann memblock.memory.max); 3933010f876SPavel Tatashin __memblock_free_late(addr, size); 3945e270e25SPhilipp Hachtmann } 3953010f876SPavel Tatashin } 3965e270e25SPhilipp Hachtmann #endif 3975e270e25SPhilipp Hachtmann 39848c3b583SGreg Pearson /** 39948c3b583SGreg Pearson * memblock_double_array - double the size of the memblock regions array 40048c3b583SGreg Pearson * @type: memblock type of the regions array being doubled 40148c3b583SGreg Pearson * @new_area_start: starting address of memory range to avoid overlap with 40248c3b583SGreg Pearson * @new_area_size: size of memory range to avoid overlap with 40348c3b583SGreg Pearson * 40448c3b583SGreg Pearson * Double the size of the @type regions array. If memblock is being used to 40548c3b583SGreg Pearson * allocate memory for a new reserved regions array and there is a previously 40648c3b583SGreg Pearson * allocated memory range [@new_area_start, @new_area_start + @new_area_size] 40748c3b583SGreg Pearson * waiting to be reserved, ensure the memory used by the new array does 40848c3b583SGreg Pearson * not overlap. 40948c3b583SGreg Pearson * 41047cec443SMike Rapoport * Return: 41148c3b583SGreg Pearson * 0 on success, -1 on failure. 41248c3b583SGreg Pearson */ 41348c3b583SGreg Pearson static int __init_memblock memblock_double_array(struct memblock_type *type, 41448c3b583SGreg Pearson phys_addr_t new_area_start, 41548c3b583SGreg Pearson phys_addr_t new_area_size) 416142b45a7SBenjamin Herrenschmidt { 417142b45a7SBenjamin Herrenschmidt struct memblock_region *new_array, *old_array; 41829f67386SYinghai Lu phys_addr_t old_alloc_size, new_alloc_size; 419a36aab89SMike Rapoport phys_addr_t old_size, new_size, addr, new_end; 420142b45a7SBenjamin Herrenschmidt int use_slab = slab_is_available(); 421181eb394SGavin Shan int *in_slab; 422142b45a7SBenjamin Herrenschmidt 423142b45a7SBenjamin Herrenschmidt /* We don't allow resizing until we know about the reserved regions 424142b45a7SBenjamin Herrenschmidt * of memory that aren't suitable for allocation 425142b45a7SBenjamin Herrenschmidt */ 426142b45a7SBenjamin Herrenschmidt if (!memblock_can_resize) 427142b45a7SBenjamin Herrenschmidt return -1; 428142b45a7SBenjamin Herrenschmidt 429142b45a7SBenjamin Herrenschmidt /* Calculate new doubled size */ 430142b45a7SBenjamin Herrenschmidt old_size = type->max * sizeof(struct memblock_region); 431142b45a7SBenjamin Herrenschmidt new_size = old_size << 1; 43229f67386SYinghai Lu /* 43329f67386SYinghai Lu * We need to allocated new one align to PAGE_SIZE, 43429f67386SYinghai Lu * so we can free them completely later. 43529f67386SYinghai Lu */ 43629f67386SYinghai Lu old_alloc_size = PAGE_ALIGN(old_size); 43729f67386SYinghai Lu new_alloc_size = PAGE_ALIGN(new_size); 438142b45a7SBenjamin Herrenschmidt 439181eb394SGavin Shan /* Retrieve the slab flag */ 440181eb394SGavin Shan if (type == &memblock.memory) 441181eb394SGavin Shan in_slab = &memblock_memory_in_slab; 442181eb394SGavin Shan else 443181eb394SGavin Shan in_slab = &memblock_reserved_in_slab; 444181eb394SGavin Shan 445a2974133SMike Rapoport /* Try to find some space for it */ 446142b45a7SBenjamin Herrenschmidt if (use_slab) { 447142b45a7SBenjamin Herrenschmidt new_array = kmalloc(new_size, GFP_KERNEL); 4481f5026a7STejun Heo addr = new_array ? __pa(new_array) : 0; 4494e2f0775SGavin Shan } else { 45048c3b583SGreg Pearson /* only exclude range when trying to double reserved.regions */ 45148c3b583SGreg Pearson if (type != &memblock.reserved) 45248c3b583SGreg Pearson new_area_start = new_area_size = 0; 45348c3b583SGreg Pearson 45448c3b583SGreg Pearson addr = memblock_find_in_range(new_area_start + new_area_size, 45548c3b583SGreg Pearson memblock.current_limit, 45629f67386SYinghai Lu new_alloc_size, PAGE_SIZE); 45748c3b583SGreg Pearson if (!addr && new_area_size) 45848c3b583SGreg Pearson addr = memblock_find_in_range(0, 45948c3b583SGreg Pearson min(new_area_start, memblock.current_limit), 46029f67386SYinghai Lu new_alloc_size, PAGE_SIZE); 46148c3b583SGreg Pearson 46215674868SSachin Kamat new_array = addr ? __va(addr) : NULL; 4634e2f0775SGavin Shan } 4641f5026a7STejun Heo if (!addr) { 465142b45a7SBenjamin Herrenschmidt pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 4660262d9c8SHeiko Carstens type->name, type->max, type->max * 2); 467142b45a7SBenjamin Herrenschmidt return -1; 468142b45a7SBenjamin Herrenschmidt } 469142b45a7SBenjamin Herrenschmidt 470a36aab89SMike Rapoport new_end = addr + new_size - 1; 471a36aab89SMike Rapoport memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]", 472a36aab89SMike Rapoport type->name, type->max * 2, &addr, &new_end); 473ea9e4376SYinghai Lu 474fd07383bSAndrew Morton /* 475fd07383bSAndrew Morton * Found space, we now need to move the array over before we add the 476fd07383bSAndrew Morton * reserved region since it may be our reserved array itself that is 477fd07383bSAndrew Morton * full. 478142b45a7SBenjamin Herrenschmidt */ 479142b45a7SBenjamin Herrenschmidt memcpy(new_array, type->regions, old_size); 480142b45a7SBenjamin Herrenschmidt memset(new_array + type->max, 0, old_size); 481142b45a7SBenjamin Herrenschmidt old_array = type->regions; 482142b45a7SBenjamin Herrenschmidt type->regions = new_array; 483142b45a7SBenjamin Herrenschmidt type->max <<= 1; 484142b45a7SBenjamin Herrenschmidt 485fd07383bSAndrew Morton /* Free old array. We needn't free it if the array is the static one */ 486181eb394SGavin Shan if (*in_slab) 487181eb394SGavin Shan kfree(old_array); 488181eb394SGavin Shan else if (old_array != memblock_memory_init_regions && 489142b45a7SBenjamin Herrenschmidt old_array != memblock_reserved_init_regions) 49029f67386SYinghai Lu memblock_free(__pa(old_array), old_alloc_size); 491142b45a7SBenjamin Herrenschmidt 492fd07383bSAndrew Morton /* 493fd07383bSAndrew Morton * Reserve the new array if that comes from the memblock. Otherwise, we 494fd07383bSAndrew Morton * needn't do it 495181eb394SGavin Shan */ 496181eb394SGavin Shan if (!use_slab) 49729f67386SYinghai Lu BUG_ON(memblock_reserve(addr, new_alloc_size)); 498181eb394SGavin Shan 499181eb394SGavin Shan /* Update slab flag */ 500181eb394SGavin Shan *in_slab = use_slab; 501181eb394SGavin Shan 502142b45a7SBenjamin Herrenschmidt return 0; 503142b45a7SBenjamin Herrenschmidt } 504142b45a7SBenjamin Herrenschmidt 505784656f9STejun Heo /** 506784656f9STejun Heo * memblock_merge_regions - merge neighboring compatible regions 507784656f9STejun Heo * @type: memblock type to scan 508784656f9STejun Heo * 509784656f9STejun Heo * Scan @type and merge neighboring compatible regions. 510784656f9STejun Heo */ 511784656f9STejun Heo static void __init_memblock memblock_merge_regions(struct memblock_type *type) 512784656f9STejun Heo { 513784656f9STejun Heo int i = 0; 514784656f9STejun Heo 515784656f9STejun Heo /* cnt never goes below 1 */ 516784656f9STejun Heo while (i < type->cnt - 1) { 517784656f9STejun Heo struct memblock_region *this = &type->regions[i]; 518784656f9STejun Heo struct memblock_region *next = &type->regions[i + 1]; 519784656f9STejun Heo 5207c0caeb8STejun Heo if (this->base + this->size != next->base || 5217c0caeb8STejun Heo memblock_get_region_node(this) != 52266a20757STang Chen memblock_get_region_node(next) || 52366a20757STang Chen this->flags != next->flags) { 524784656f9STejun Heo BUG_ON(this->base + this->size > next->base); 525784656f9STejun Heo i++; 526784656f9STejun Heo continue; 527784656f9STejun Heo } 528784656f9STejun Heo 529784656f9STejun Heo this->size += next->size; 530c0232ae8SLin Feng /* move forward from next + 1, index of which is i + 2 */ 531c0232ae8SLin Feng memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); 532784656f9STejun Heo type->cnt--; 533784656f9STejun Heo } 534784656f9STejun Heo } 535784656f9STejun Heo 536784656f9STejun Heo /** 537784656f9STejun Heo * memblock_insert_region - insert new memblock region 538784656f9STejun Heo * @type: memblock type to insert into 539784656f9STejun Heo * @idx: index for the insertion point 540784656f9STejun Heo * @base: base address of the new region 541784656f9STejun Heo * @size: size of the new region 542209ff86dSTang Chen * @nid: node id of the new region 54366a20757STang Chen * @flags: flags of the new region 544784656f9STejun Heo * 545784656f9STejun Heo * Insert new memblock region [@base, @base + @size) into @type at @idx. 546412d0008SAlexander Kuleshov * @type must already have extra room to accommodate the new region. 547784656f9STejun Heo */ 548784656f9STejun Heo static void __init_memblock memblock_insert_region(struct memblock_type *type, 549784656f9STejun Heo int idx, phys_addr_t base, 55066a20757STang Chen phys_addr_t size, 551e1720feeSMike Rapoport int nid, 552e1720feeSMike Rapoport enum memblock_flags flags) 553784656f9STejun Heo { 554784656f9STejun Heo struct memblock_region *rgn = &type->regions[idx]; 555784656f9STejun Heo 556784656f9STejun Heo BUG_ON(type->cnt >= type->max); 557784656f9STejun Heo memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 558784656f9STejun Heo rgn->base = base; 559784656f9STejun Heo rgn->size = size; 56066a20757STang Chen rgn->flags = flags; 5617c0caeb8STejun Heo memblock_set_region_node(rgn, nid); 562784656f9STejun Heo type->cnt++; 5631440c4e2STejun Heo type->total_size += size; 564784656f9STejun Heo } 565784656f9STejun Heo 566784656f9STejun Heo /** 567f1af9d3aSPhilipp Hachtmann * memblock_add_range - add new memblock region 568784656f9STejun Heo * @type: memblock type to add new region into 569784656f9STejun Heo * @base: base address of the new region 570784656f9STejun Heo * @size: size of the new region 5717fb0bc3fSTejun Heo * @nid: nid of the new region 57266a20757STang Chen * @flags: flags of the new region 573784656f9STejun Heo * 574784656f9STejun Heo * Add new memblock region [@base, @base + @size) into @type. The new region 575784656f9STejun Heo * is allowed to overlap with existing ones - overlaps don't affect already 576784656f9STejun Heo * existing regions. @type is guaranteed to be minimal (all neighbouring 577784656f9STejun Heo * compatible regions are merged) after the addition. 578784656f9STejun Heo * 57947cec443SMike Rapoport * Return: 580784656f9STejun Heo * 0 on success, -errno on failure. 581784656f9STejun Heo */ 582f1af9d3aSPhilipp Hachtmann int __init_memblock memblock_add_range(struct memblock_type *type, 58366a20757STang Chen phys_addr_t base, phys_addr_t size, 584e1720feeSMike Rapoport int nid, enum memblock_flags flags) 58595f72d1eSYinghai Lu { 586784656f9STejun Heo bool insert = false; 587eb18f1b5STejun Heo phys_addr_t obase = base; 588eb18f1b5STejun Heo phys_addr_t end = base + memblock_cap_size(base, &size); 5898c9c1701SAlexander Kuleshov int idx, nr_new; 5908c9c1701SAlexander Kuleshov struct memblock_region *rgn; 59195f72d1eSYinghai Lu 592b3dc627cSTejun Heo if (!size) 593b3dc627cSTejun Heo return 0; 594b3dc627cSTejun Heo 595784656f9STejun Heo /* special case for empty array */ 596784656f9STejun Heo if (type->regions[0].size == 0) { 5971440c4e2STejun Heo WARN_ON(type->cnt != 1 || type->total_size); 598784656f9STejun Heo type->regions[0].base = base; 599784656f9STejun Heo type->regions[0].size = size; 60066a20757STang Chen type->regions[0].flags = flags; 6017fb0bc3fSTejun Heo memblock_set_region_node(&type->regions[0], nid); 6021440c4e2STejun Heo type->total_size = size; 603784656f9STejun Heo return 0; 604784656f9STejun Heo } 605784656f9STejun Heo repeat: 606784656f9STejun Heo /* 607784656f9STejun Heo * The following is executed twice. Once with %false @insert and 608784656f9STejun Heo * then with %true. The first counts the number of regions needed 609412d0008SAlexander Kuleshov * to accommodate the new area. The second actually inserts them. 610784656f9STejun Heo */ 611784656f9STejun Heo base = obase; 612784656f9STejun Heo nr_new = 0; 613784656f9STejun Heo 61466e8b438SGioh Kim for_each_memblock_type(idx, type, rgn) { 615784656f9STejun Heo phys_addr_t rbase = rgn->base; 616784656f9STejun Heo phys_addr_t rend = rbase + rgn->size; 6178f7a6605SBenjamin Herrenschmidt 618784656f9STejun Heo if (rbase >= end) 6198f7a6605SBenjamin Herrenschmidt break; 620784656f9STejun Heo if (rend <= base) 621784656f9STejun Heo continue; 622784656f9STejun Heo /* 623784656f9STejun Heo * @rgn overlaps. If it separates the lower part of new 624784656f9STejun Heo * area, insert that portion. 6258f7a6605SBenjamin Herrenschmidt */ 626784656f9STejun Heo if (rbase > base) { 627c0a29498SWei Yang #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 628c0a29498SWei Yang WARN_ON(nid != memblock_get_region_node(rgn)); 629c0a29498SWei Yang #endif 6304fcab5f4SWei Yang WARN_ON(flags != rgn->flags); 631784656f9STejun Heo nr_new++; 632784656f9STejun Heo if (insert) 6338c9c1701SAlexander Kuleshov memblock_insert_region(type, idx++, base, 63466a20757STang Chen rbase - base, nid, 63566a20757STang Chen flags); 636784656f9STejun Heo } 637784656f9STejun Heo /* area below @rend is dealt with, forget about it */ 638784656f9STejun Heo base = min(rend, end); 6398f7a6605SBenjamin Herrenschmidt } 6408f7a6605SBenjamin Herrenschmidt 641784656f9STejun Heo /* insert the remaining portion */ 642784656f9STejun Heo if (base < end) { 643784656f9STejun Heo nr_new++; 644784656f9STejun Heo if (insert) 6458c9c1701SAlexander Kuleshov memblock_insert_region(type, idx, base, end - base, 64666a20757STang Chen nid, flags); 6478f7a6605SBenjamin Herrenschmidt } 6488f7a6605SBenjamin Herrenschmidt 649ef3cc4dbSnimisolo if (!nr_new) 650ef3cc4dbSnimisolo return 0; 651ef3cc4dbSnimisolo 652784656f9STejun Heo /* 653784656f9STejun Heo * If this was the first round, resize array and repeat for actual 654784656f9STejun Heo * insertions; otherwise, merge and return. 6558f7a6605SBenjamin Herrenschmidt */ 656784656f9STejun Heo if (!insert) { 657784656f9STejun Heo while (type->cnt + nr_new > type->max) 65848c3b583SGreg Pearson if (memblock_double_array(type, obase, size) < 0) 659784656f9STejun Heo return -ENOMEM; 660784656f9STejun Heo insert = true; 661784656f9STejun Heo goto repeat; 66295f72d1eSYinghai Lu } else { 663784656f9STejun Heo memblock_merge_regions(type); 66495f72d1eSYinghai Lu return 0; 66595f72d1eSYinghai Lu } 666784656f9STejun Heo } 66795f72d1eSYinghai Lu 66848a833ccSMike Rapoport /** 66948a833ccSMike Rapoport * memblock_add_node - add new memblock region within a NUMA node 67048a833ccSMike Rapoport * @base: base address of the new region 67148a833ccSMike Rapoport * @size: size of the new region 67248a833ccSMike Rapoport * @nid: nid of the new region 67348a833ccSMike Rapoport * 67448a833ccSMike Rapoport * Add new memblock region [@base, @base + @size) to the "memory" 67548a833ccSMike Rapoport * type. See memblock_add_range() description for mode details 67648a833ccSMike Rapoport * 67748a833ccSMike Rapoport * Return: 67848a833ccSMike Rapoport * 0 on success, -errno on failure. 67948a833ccSMike Rapoport */ 6807fb0bc3fSTejun Heo int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 6817fb0bc3fSTejun Heo int nid) 6827fb0bc3fSTejun Heo { 683f1af9d3aSPhilipp Hachtmann return memblock_add_range(&memblock.memory, base, size, nid, 0); 6847fb0bc3fSTejun Heo } 6857fb0bc3fSTejun Heo 68648a833ccSMike Rapoport /** 68748a833ccSMike Rapoport * memblock_add - add new memblock region 68848a833ccSMike Rapoport * @base: base address of the new region 68948a833ccSMike Rapoport * @size: size of the new region 69048a833ccSMike Rapoport * 69148a833ccSMike Rapoport * Add new memblock region [@base, @base + @size) to the "memory" 69248a833ccSMike Rapoport * type. See memblock_add_range() description for mode details 69348a833ccSMike Rapoport * 69448a833ccSMike Rapoport * Return: 69548a833ccSMike Rapoport * 0 on success, -errno on failure. 69648a833ccSMike Rapoport */ 697f705ac4bSAlexander Kuleshov int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 6986a4055bcSAlexander Kuleshov { 6995d63f81cSMiles Chen phys_addr_t end = base + size - 1; 7005d63f81cSMiles Chen 701d75f773cSSakari Ailus memblock_dbg("memblock_add: [%pa-%pa] %pS\n", 7025d63f81cSMiles Chen &base, &end, (void *)_RET_IP_); 7036a4055bcSAlexander Kuleshov 704f705ac4bSAlexander Kuleshov return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); 70595f72d1eSYinghai Lu } 70695f72d1eSYinghai Lu 7076a9ceb31STejun Heo /** 7086a9ceb31STejun Heo * memblock_isolate_range - isolate given range into disjoint memblocks 7096a9ceb31STejun Heo * @type: memblock type to isolate range for 7106a9ceb31STejun Heo * @base: base of range to isolate 7116a9ceb31STejun Heo * @size: size of range to isolate 7126a9ceb31STejun Heo * @start_rgn: out parameter for the start of isolated region 7136a9ceb31STejun Heo * @end_rgn: out parameter for the end of isolated region 7146a9ceb31STejun Heo * 7156a9ceb31STejun Heo * Walk @type and ensure that regions don't cross the boundaries defined by 7166a9ceb31STejun Heo * [@base, @base + @size). Crossing regions are split at the boundaries, 7176a9ceb31STejun Heo * which may create at most two more regions. The index of the first 7186a9ceb31STejun Heo * region inside the range is returned in *@start_rgn and end in *@end_rgn. 7196a9ceb31STejun Heo * 72047cec443SMike Rapoport * Return: 7216a9ceb31STejun Heo * 0 on success, -errno on failure. 7226a9ceb31STejun Heo */ 7236a9ceb31STejun Heo static int __init_memblock memblock_isolate_range(struct memblock_type *type, 7246a9ceb31STejun Heo phys_addr_t base, phys_addr_t size, 7256a9ceb31STejun Heo int *start_rgn, int *end_rgn) 7266a9ceb31STejun Heo { 727eb18f1b5STejun Heo phys_addr_t end = base + memblock_cap_size(base, &size); 7288c9c1701SAlexander Kuleshov int idx; 7298c9c1701SAlexander Kuleshov struct memblock_region *rgn; 7306a9ceb31STejun Heo 7316a9ceb31STejun Heo *start_rgn = *end_rgn = 0; 7326a9ceb31STejun Heo 733b3dc627cSTejun Heo if (!size) 734b3dc627cSTejun Heo return 0; 735b3dc627cSTejun Heo 7366a9ceb31STejun Heo /* we'll create at most two more regions */ 7376a9ceb31STejun Heo while (type->cnt + 2 > type->max) 73848c3b583SGreg Pearson if (memblock_double_array(type, base, size) < 0) 7396a9ceb31STejun Heo return -ENOMEM; 7406a9ceb31STejun Heo 74166e8b438SGioh Kim for_each_memblock_type(idx, type, rgn) { 7426a9ceb31STejun Heo phys_addr_t rbase = rgn->base; 7436a9ceb31STejun Heo phys_addr_t rend = rbase + rgn->size; 7446a9ceb31STejun Heo 7456a9ceb31STejun Heo if (rbase >= end) 7466a9ceb31STejun Heo break; 7476a9ceb31STejun Heo if (rend <= base) 7486a9ceb31STejun Heo continue; 7496a9ceb31STejun Heo 7506a9ceb31STejun Heo if (rbase < base) { 7516a9ceb31STejun Heo /* 7526a9ceb31STejun Heo * @rgn intersects from below. Split and continue 7536a9ceb31STejun Heo * to process the next region - the new top half. 7546a9ceb31STejun Heo */ 7556a9ceb31STejun Heo rgn->base = base; 7561440c4e2STejun Heo rgn->size -= base - rbase; 7571440c4e2STejun Heo type->total_size -= base - rbase; 7588c9c1701SAlexander Kuleshov memblock_insert_region(type, idx, rbase, base - rbase, 75966a20757STang Chen memblock_get_region_node(rgn), 76066a20757STang Chen rgn->flags); 7616a9ceb31STejun Heo } else if (rend > end) { 7626a9ceb31STejun Heo /* 7636a9ceb31STejun Heo * @rgn intersects from above. Split and redo the 7646a9ceb31STejun Heo * current region - the new bottom half. 7656a9ceb31STejun Heo */ 7666a9ceb31STejun Heo rgn->base = end; 7671440c4e2STejun Heo rgn->size -= end - rbase; 7681440c4e2STejun Heo type->total_size -= end - rbase; 7698c9c1701SAlexander Kuleshov memblock_insert_region(type, idx--, rbase, end - rbase, 77066a20757STang Chen memblock_get_region_node(rgn), 77166a20757STang Chen rgn->flags); 7726a9ceb31STejun Heo } else { 7736a9ceb31STejun Heo /* @rgn is fully contained, record it */ 7746a9ceb31STejun Heo if (!*end_rgn) 7758c9c1701SAlexander Kuleshov *start_rgn = idx; 7768c9c1701SAlexander Kuleshov *end_rgn = idx + 1; 7776a9ceb31STejun Heo } 7786a9ceb31STejun Heo } 7796a9ceb31STejun Heo 7806a9ceb31STejun Heo return 0; 7816a9ceb31STejun Heo } 7826a9ceb31STejun Heo 78335bd16a2SAlexander Kuleshov static int __init_memblock memblock_remove_range(struct memblock_type *type, 7848f7a6605SBenjamin Herrenschmidt phys_addr_t base, phys_addr_t size) 78595f72d1eSYinghai Lu { 78671936180STejun Heo int start_rgn, end_rgn; 78771936180STejun Heo int i, ret; 78895f72d1eSYinghai Lu 78971936180STejun Heo ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 79071936180STejun Heo if (ret) 79171936180STejun Heo return ret; 79295f72d1eSYinghai Lu 79371936180STejun Heo for (i = end_rgn - 1; i >= start_rgn; i--) 79471936180STejun Heo memblock_remove_region(type, i); 79595f72d1eSYinghai Lu return 0; 79695f72d1eSYinghai Lu } 79795f72d1eSYinghai Lu 798581adcbeSTejun Heo int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 79995f72d1eSYinghai Lu { 80025cf23d7SMinchan Kim phys_addr_t end = base + size - 1; 80125cf23d7SMinchan Kim 80225cf23d7SMinchan Kim memblock_dbg("memblock_remove: [%pa-%pa] %pS\n", 80325cf23d7SMinchan Kim &base, &end, (void *)_RET_IP_); 80425cf23d7SMinchan Kim 805f1af9d3aSPhilipp Hachtmann return memblock_remove_range(&memblock.memory, base, size); 80695f72d1eSYinghai Lu } 80795f72d1eSYinghai Lu 8084d72868cSMike Rapoport /** 8094d72868cSMike Rapoport * memblock_free - free boot memory block 8104d72868cSMike Rapoport * @base: phys starting address of the boot memory block 8114d72868cSMike Rapoport * @size: size of the boot memory block in bytes 8124d72868cSMike Rapoport * 8134d72868cSMike Rapoport * Free boot memory block previously allocated by memblock_alloc_xx() API. 8144d72868cSMike Rapoport * The freeing memory will not be released to the buddy allocator. 8154d72868cSMike Rapoport */ 816581adcbeSTejun Heo int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 81795f72d1eSYinghai Lu { 8185d63f81cSMiles Chen phys_addr_t end = base + size - 1; 8195d63f81cSMiles Chen 820d75f773cSSakari Ailus memblock_dbg(" memblock_free: [%pa-%pa] %pS\n", 8215d63f81cSMiles Chen &base, &end, (void *)_RET_IP_); 82224aa0788STejun Heo 8239099daedSCatalin Marinas kmemleak_free_part_phys(base, size); 824f1af9d3aSPhilipp Hachtmann return memblock_remove_range(&memblock.reserved, base, size); 82595f72d1eSYinghai Lu } 82695f72d1eSYinghai Lu 827f705ac4bSAlexander Kuleshov int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 82895f72d1eSYinghai Lu { 8295d63f81cSMiles Chen phys_addr_t end = base + size - 1; 8305d63f81cSMiles Chen 831d75f773cSSakari Ailus memblock_dbg("memblock_reserve: [%pa-%pa] %pS\n", 8325d63f81cSMiles Chen &base, &end, (void *)_RET_IP_); 83395f72d1eSYinghai Lu 834f705ac4bSAlexander Kuleshov return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); 83595f72d1eSYinghai Lu } 83695f72d1eSYinghai Lu 83735fd0808STejun Heo /** 83847cec443SMike Rapoport * memblock_setclr_flag - set or clear flag for a memory region 83947cec443SMike Rapoport * @base: base address of the region 84047cec443SMike Rapoport * @size: size of the region 84147cec443SMike Rapoport * @set: set or clear the flag 84247cec443SMike Rapoport * @flag: the flag to udpate 84366b16edfSTang Chen * 8444308ce17STony Luck * This function isolates region [@base, @base + @size), and sets/clears flag 84566b16edfSTang Chen * 84647cec443SMike Rapoport * Return: 0 on success, -errno on failure. 84766b16edfSTang Chen */ 8484308ce17STony Luck static int __init_memblock memblock_setclr_flag(phys_addr_t base, 8494308ce17STony Luck phys_addr_t size, int set, int flag) 85066b16edfSTang Chen { 85166b16edfSTang Chen struct memblock_type *type = &memblock.memory; 85266b16edfSTang Chen int i, ret, start_rgn, end_rgn; 85366b16edfSTang Chen 85466b16edfSTang Chen ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 85566b16edfSTang Chen if (ret) 85666b16edfSTang Chen return ret; 85766b16edfSTang Chen 858fe145124SMike Rapoport for (i = start_rgn; i < end_rgn; i++) { 859fe145124SMike Rapoport struct memblock_region *r = &type->regions[i]; 860fe145124SMike Rapoport 8614308ce17STony Luck if (set) 862fe145124SMike Rapoport r->flags |= flag; 8634308ce17STony Luck else 864fe145124SMike Rapoport r->flags &= ~flag; 865fe145124SMike Rapoport } 86666b16edfSTang Chen 86766b16edfSTang Chen memblock_merge_regions(type); 86866b16edfSTang Chen return 0; 86966b16edfSTang Chen } 87066b16edfSTang Chen 87166b16edfSTang Chen /** 8724308ce17STony Luck * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. 8734308ce17STony Luck * @base: the base phys addr of the region 8744308ce17STony Luck * @size: the size of the region 8754308ce17STony Luck * 87647cec443SMike Rapoport * Return: 0 on success, -errno on failure. 8774308ce17STony Luck */ 8784308ce17STony Luck int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) 8794308ce17STony Luck { 8804308ce17STony Luck return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); 8814308ce17STony Luck } 8824308ce17STony Luck 8834308ce17STony Luck /** 88466b16edfSTang Chen * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. 88566b16edfSTang Chen * @base: the base phys addr of the region 88666b16edfSTang Chen * @size: the size of the region 88766b16edfSTang Chen * 88847cec443SMike Rapoport * Return: 0 on success, -errno on failure. 88966b16edfSTang Chen */ 89066b16edfSTang Chen int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) 89166b16edfSTang Chen { 8924308ce17STony Luck return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); 89366b16edfSTang Chen } 89466b16edfSTang Chen 89566b16edfSTang Chen /** 896a3f5bafcSTony Luck * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. 897a3f5bafcSTony Luck * @base: the base phys addr of the region 898a3f5bafcSTony Luck * @size: the size of the region 899a3f5bafcSTony Luck * 90047cec443SMike Rapoport * Return: 0 on success, -errno on failure. 901a3f5bafcSTony Luck */ 902a3f5bafcSTony Luck int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) 903a3f5bafcSTony Luck { 904a3f5bafcSTony Luck system_has_some_mirror = true; 905a3f5bafcSTony Luck 906a3f5bafcSTony Luck return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); 907a3f5bafcSTony Luck } 908a3f5bafcSTony Luck 909bf3d3cc5SArd Biesheuvel /** 910bf3d3cc5SArd Biesheuvel * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. 911bf3d3cc5SArd Biesheuvel * @base: the base phys addr of the region 912bf3d3cc5SArd Biesheuvel * @size: the size of the region 913bf3d3cc5SArd Biesheuvel * 91447cec443SMike Rapoport * Return: 0 on success, -errno on failure. 915bf3d3cc5SArd Biesheuvel */ 916bf3d3cc5SArd Biesheuvel int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) 917bf3d3cc5SArd Biesheuvel { 918bf3d3cc5SArd Biesheuvel return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); 919bf3d3cc5SArd Biesheuvel } 920a3f5bafcSTony Luck 921a3f5bafcSTony Luck /** 9224c546b8aSAKASHI Takahiro * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region. 9234c546b8aSAKASHI Takahiro * @base: the base phys addr of the region 9244c546b8aSAKASHI Takahiro * @size: the size of the region 9254c546b8aSAKASHI Takahiro * 92647cec443SMike Rapoport * Return: 0 on success, -errno on failure. 9274c546b8aSAKASHI Takahiro */ 9284c546b8aSAKASHI Takahiro int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size) 9294c546b8aSAKASHI Takahiro { 9304c546b8aSAKASHI Takahiro return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP); 9314c546b8aSAKASHI Takahiro } 9324c546b8aSAKASHI Takahiro 9334c546b8aSAKASHI Takahiro /** 9348e7a7f86SRobin Holt * __next_reserved_mem_region - next function for for_each_reserved_region() 9358e7a7f86SRobin Holt * @idx: pointer to u64 loop variable 9368e7a7f86SRobin Holt * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL 9378e7a7f86SRobin Holt * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL 9388e7a7f86SRobin Holt * 9398e7a7f86SRobin Holt * Iterate over all reserved memory regions. 9408e7a7f86SRobin Holt */ 9418e7a7f86SRobin Holt void __init_memblock __next_reserved_mem_region(u64 *idx, 9428e7a7f86SRobin Holt phys_addr_t *out_start, 9438e7a7f86SRobin Holt phys_addr_t *out_end) 9448e7a7f86SRobin Holt { 945567d117bSAlexander Kuleshov struct memblock_type *type = &memblock.reserved; 9468e7a7f86SRobin Holt 947cd33a76bSRichard Leitner if (*idx < type->cnt) { 948567d117bSAlexander Kuleshov struct memblock_region *r = &type->regions[*idx]; 9498e7a7f86SRobin Holt phys_addr_t base = r->base; 9508e7a7f86SRobin Holt phys_addr_t size = r->size; 9518e7a7f86SRobin Holt 9528e7a7f86SRobin Holt if (out_start) 9538e7a7f86SRobin Holt *out_start = base; 9548e7a7f86SRobin Holt if (out_end) 9558e7a7f86SRobin Holt *out_end = base + size - 1; 9568e7a7f86SRobin Holt 9578e7a7f86SRobin Holt *idx += 1; 9588e7a7f86SRobin Holt return; 9598e7a7f86SRobin Holt } 9608e7a7f86SRobin Holt 9618e7a7f86SRobin Holt /* signal end of iteration */ 9628e7a7f86SRobin Holt *idx = ULLONG_MAX; 9638e7a7f86SRobin Holt } 9648e7a7f86SRobin Holt 965c9a688a3SMike Rapoport static bool should_skip_region(struct memblock_region *m, int nid, int flags) 966c9a688a3SMike Rapoport { 967c9a688a3SMike Rapoport int m_nid = memblock_get_region_node(m); 968c9a688a3SMike Rapoport 969c9a688a3SMike Rapoport /* only memory regions are associated with nodes, check it */ 970c9a688a3SMike Rapoport if (nid != NUMA_NO_NODE && nid != m_nid) 971c9a688a3SMike Rapoport return true; 972c9a688a3SMike Rapoport 973c9a688a3SMike Rapoport /* skip hotpluggable memory regions if needed */ 974c9a688a3SMike Rapoport if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 975c9a688a3SMike Rapoport return true; 976c9a688a3SMike Rapoport 977c9a688a3SMike Rapoport /* if we want mirror memory skip non-mirror memory regions */ 978c9a688a3SMike Rapoport if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 979c9a688a3SMike Rapoport return true; 980c9a688a3SMike Rapoport 981c9a688a3SMike Rapoport /* skip nomap memory unless we were asked for it explicitly */ 982c9a688a3SMike Rapoport if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) 983c9a688a3SMike Rapoport return true; 984c9a688a3SMike Rapoport 985c9a688a3SMike Rapoport return false; 986c9a688a3SMike Rapoport } 987c9a688a3SMike Rapoport 9888e7a7f86SRobin Holt /** 989a2974133SMike Rapoport * __next_mem_range - next function for for_each_free_mem_range() etc. 99035fd0808STejun Heo * @idx: pointer to u64 loop variable 991b1154233SGrygorii Strashko * @nid: node selector, %NUMA_NO_NODE for all nodes 992fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes 993f1af9d3aSPhilipp Hachtmann * @type_a: pointer to memblock_type from where the range is taken 994f1af9d3aSPhilipp Hachtmann * @type_b: pointer to memblock_type which excludes memory from being taken 995dad7557eSWanpeng Li * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 996dad7557eSWanpeng Li * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 997dad7557eSWanpeng Li * @out_nid: ptr to int for nid of the range, can be %NULL 99835fd0808STejun Heo * 999f1af9d3aSPhilipp Hachtmann * Find the first area from *@idx which matches @nid, fill the out 100035fd0808STejun Heo * parameters, and update *@idx for the next iteration. The lower 32bit of 1001f1af9d3aSPhilipp Hachtmann * *@idx contains index into type_a and the upper 32bit indexes the 1002f1af9d3aSPhilipp Hachtmann * areas before each region in type_b. For example, if type_b regions 100335fd0808STejun Heo * look like the following, 100435fd0808STejun Heo * 100535fd0808STejun Heo * 0:[0-16), 1:[32-48), 2:[128-130) 100635fd0808STejun Heo * 100735fd0808STejun Heo * The upper 32bit indexes the following regions. 100835fd0808STejun Heo * 100935fd0808STejun Heo * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 101035fd0808STejun Heo * 101135fd0808STejun Heo * As both region arrays are sorted, the function advances the two indices 101235fd0808STejun Heo * in lockstep and returns each intersection. 101335fd0808STejun Heo */ 1014e1720feeSMike Rapoport void __init_memblock __next_mem_range(u64 *idx, int nid, 1015e1720feeSMike Rapoport enum memblock_flags flags, 1016f1af9d3aSPhilipp Hachtmann struct memblock_type *type_a, 1017f1af9d3aSPhilipp Hachtmann struct memblock_type *type_b, 101835fd0808STejun Heo phys_addr_t *out_start, 101935fd0808STejun Heo phys_addr_t *out_end, int *out_nid) 102035fd0808STejun Heo { 1021f1af9d3aSPhilipp Hachtmann int idx_a = *idx & 0xffffffff; 1022f1af9d3aSPhilipp Hachtmann int idx_b = *idx >> 32; 1023b1154233SGrygorii Strashko 1024f1af9d3aSPhilipp Hachtmann if (WARN_ONCE(nid == MAX_NUMNODES, 1025f1af9d3aSPhilipp Hachtmann "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1026560dca27SGrygorii Strashko nid = NUMA_NO_NODE; 102735fd0808STejun Heo 1028f1af9d3aSPhilipp Hachtmann for (; idx_a < type_a->cnt; idx_a++) { 1029f1af9d3aSPhilipp Hachtmann struct memblock_region *m = &type_a->regions[idx_a]; 1030f1af9d3aSPhilipp Hachtmann 103135fd0808STejun Heo phys_addr_t m_start = m->base; 103235fd0808STejun Heo phys_addr_t m_end = m->base + m->size; 1033f1af9d3aSPhilipp Hachtmann int m_nid = memblock_get_region_node(m); 103435fd0808STejun Heo 1035c9a688a3SMike Rapoport if (should_skip_region(m, nid, flags)) 1036bf3d3cc5SArd Biesheuvel continue; 1037bf3d3cc5SArd Biesheuvel 1038f1af9d3aSPhilipp Hachtmann if (!type_b) { 1039f1af9d3aSPhilipp Hachtmann if (out_start) 1040f1af9d3aSPhilipp Hachtmann *out_start = m_start; 1041f1af9d3aSPhilipp Hachtmann if (out_end) 1042f1af9d3aSPhilipp Hachtmann *out_end = m_end; 1043f1af9d3aSPhilipp Hachtmann if (out_nid) 1044f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 1045f1af9d3aSPhilipp Hachtmann idx_a++; 1046f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 1047f1af9d3aSPhilipp Hachtmann return; 1048f1af9d3aSPhilipp Hachtmann } 104935fd0808STejun Heo 1050f1af9d3aSPhilipp Hachtmann /* scan areas before each reservation */ 1051f1af9d3aSPhilipp Hachtmann for (; idx_b < type_b->cnt + 1; idx_b++) { 1052f1af9d3aSPhilipp Hachtmann struct memblock_region *r; 1053f1af9d3aSPhilipp Hachtmann phys_addr_t r_start; 1054f1af9d3aSPhilipp Hachtmann phys_addr_t r_end; 1055f1af9d3aSPhilipp Hachtmann 1056f1af9d3aSPhilipp Hachtmann r = &type_b->regions[idx_b]; 1057f1af9d3aSPhilipp Hachtmann r_start = idx_b ? r[-1].base + r[-1].size : 0; 1058f1af9d3aSPhilipp Hachtmann r_end = idx_b < type_b->cnt ? 10591c4bc43dSStefan Agner r->base : PHYS_ADDR_MAX; 1060f1af9d3aSPhilipp Hachtmann 1061f1af9d3aSPhilipp Hachtmann /* 1062f1af9d3aSPhilipp Hachtmann * if idx_b advanced past idx_a, 1063f1af9d3aSPhilipp Hachtmann * break out to advance idx_a 1064f1af9d3aSPhilipp Hachtmann */ 106535fd0808STejun Heo if (r_start >= m_end) 106635fd0808STejun Heo break; 106735fd0808STejun Heo /* if the two regions intersect, we're done */ 106835fd0808STejun Heo if (m_start < r_end) { 106935fd0808STejun Heo if (out_start) 1070f1af9d3aSPhilipp Hachtmann *out_start = 1071f1af9d3aSPhilipp Hachtmann max(m_start, r_start); 107235fd0808STejun Heo if (out_end) 107335fd0808STejun Heo *out_end = min(m_end, r_end); 107435fd0808STejun Heo if (out_nid) 1075f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 107635fd0808STejun Heo /* 1077f1af9d3aSPhilipp Hachtmann * The region which ends first is 1078f1af9d3aSPhilipp Hachtmann * advanced for the next iteration. 107935fd0808STejun Heo */ 108035fd0808STejun Heo if (m_end <= r_end) 1081f1af9d3aSPhilipp Hachtmann idx_a++; 108235fd0808STejun Heo else 1083f1af9d3aSPhilipp Hachtmann idx_b++; 1084f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 108535fd0808STejun Heo return; 108635fd0808STejun Heo } 108735fd0808STejun Heo } 108835fd0808STejun Heo } 108935fd0808STejun Heo 109035fd0808STejun Heo /* signal end of iteration */ 109135fd0808STejun Heo *idx = ULLONG_MAX; 109235fd0808STejun Heo } 109335fd0808STejun Heo 10947bd0b0f0STejun Heo /** 1095f1af9d3aSPhilipp Hachtmann * __next_mem_range_rev - generic next function for for_each_*_range_rev() 1096f1af9d3aSPhilipp Hachtmann * 10977bd0b0f0STejun Heo * @idx: pointer to u64 loop variable 1098ad5ea8cdSAlexander Kuleshov * @nid: node selector, %NUMA_NO_NODE for all nodes 1099fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes 1100f1af9d3aSPhilipp Hachtmann * @type_a: pointer to memblock_type from where the range is taken 1101f1af9d3aSPhilipp Hachtmann * @type_b: pointer to memblock_type which excludes memory from being taken 1102dad7557eSWanpeng Li * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 1103dad7557eSWanpeng Li * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 1104dad7557eSWanpeng Li * @out_nid: ptr to int for nid of the range, can be %NULL 11057bd0b0f0STejun Heo * 110647cec443SMike Rapoport * Finds the next range from type_a which is not marked as unsuitable 110747cec443SMike Rapoport * in type_b. 110847cec443SMike Rapoport * 1109f1af9d3aSPhilipp Hachtmann * Reverse of __next_mem_range(). 11107bd0b0f0STejun Heo */ 1111e1720feeSMike Rapoport void __init_memblock __next_mem_range_rev(u64 *idx, int nid, 1112e1720feeSMike Rapoport enum memblock_flags flags, 1113f1af9d3aSPhilipp Hachtmann struct memblock_type *type_a, 1114f1af9d3aSPhilipp Hachtmann struct memblock_type *type_b, 11157bd0b0f0STejun Heo phys_addr_t *out_start, 11167bd0b0f0STejun Heo phys_addr_t *out_end, int *out_nid) 11177bd0b0f0STejun Heo { 1118f1af9d3aSPhilipp Hachtmann int idx_a = *idx & 0xffffffff; 1119f1af9d3aSPhilipp Hachtmann int idx_b = *idx >> 32; 1120b1154233SGrygorii Strashko 1121560dca27SGrygorii Strashko if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1122560dca27SGrygorii Strashko nid = NUMA_NO_NODE; 11237bd0b0f0STejun Heo 11247bd0b0f0STejun Heo if (*idx == (u64)ULLONG_MAX) { 1125f1af9d3aSPhilipp Hachtmann idx_a = type_a->cnt - 1; 1126e47608abSzijun_hu if (type_b != NULL) 1127f1af9d3aSPhilipp Hachtmann idx_b = type_b->cnt; 1128e47608abSzijun_hu else 1129e47608abSzijun_hu idx_b = 0; 11307bd0b0f0STejun Heo } 11317bd0b0f0STejun Heo 1132f1af9d3aSPhilipp Hachtmann for (; idx_a >= 0; idx_a--) { 1133f1af9d3aSPhilipp Hachtmann struct memblock_region *m = &type_a->regions[idx_a]; 1134f1af9d3aSPhilipp Hachtmann 11357bd0b0f0STejun Heo phys_addr_t m_start = m->base; 11367bd0b0f0STejun Heo phys_addr_t m_end = m->base + m->size; 1137f1af9d3aSPhilipp Hachtmann int m_nid = memblock_get_region_node(m); 11387bd0b0f0STejun Heo 1139c9a688a3SMike Rapoport if (should_skip_region(m, nid, flags)) 1140bf3d3cc5SArd Biesheuvel continue; 1141bf3d3cc5SArd Biesheuvel 1142f1af9d3aSPhilipp Hachtmann if (!type_b) { 1143f1af9d3aSPhilipp Hachtmann if (out_start) 1144f1af9d3aSPhilipp Hachtmann *out_start = m_start; 1145f1af9d3aSPhilipp Hachtmann if (out_end) 1146f1af9d3aSPhilipp Hachtmann *out_end = m_end; 1147f1af9d3aSPhilipp Hachtmann if (out_nid) 1148f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 1149fb399b48Szijun_hu idx_a--; 1150f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 1151f1af9d3aSPhilipp Hachtmann return; 1152f1af9d3aSPhilipp Hachtmann } 11537bd0b0f0STejun Heo 1154f1af9d3aSPhilipp Hachtmann /* scan areas before each reservation */ 1155f1af9d3aSPhilipp Hachtmann for (; idx_b >= 0; idx_b--) { 1156f1af9d3aSPhilipp Hachtmann struct memblock_region *r; 1157f1af9d3aSPhilipp Hachtmann phys_addr_t r_start; 1158f1af9d3aSPhilipp Hachtmann phys_addr_t r_end; 1159f1af9d3aSPhilipp Hachtmann 1160f1af9d3aSPhilipp Hachtmann r = &type_b->regions[idx_b]; 1161f1af9d3aSPhilipp Hachtmann r_start = idx_b ? r[-1].base + r[-1].size : 0; 1162f1af9d3aSPhilipp Hachtmann r_end = idx_b < type_b->cnt ? 11631c4bc43dSStefan Agner r->base : PHYS_ADDR_MAX; 1164f1af9d3aSPhilipp Hachtmann /* 1165f1af9d3aSPhilipp Hachtmann * if idx_b advanced past idx_a, 1166f1af9d3aSPhilipp Hachtmann * break out to advance idx_a 1167f1af9d3aSPhilipp Hachtmann */ 1168f1af9d3aSPhilipp Hachtmann 11697bd0b0f0STejun Heo if (r_end <= m_start) 11707bd0b0f0STejun Heo break; 11717bd0b0f0STejun Heo /* if the two regions intersect, we're done */ 11727bd0b0f0STejun Heo if (m_end > r_start) { 11737bd0b0f0STejun Heo if (out_start) 11747bd0b0f0STejun Heo *out_start = max(m_start, r_start); 11757bd0b0f0STejun Heo if (out_end) 11767bd0b0f0STejun Heo *out_end = min(m_end, r_end); 11777bd0b0f0STejun Heo if (out_nid) 1178f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 11797bd0b0f0STejun Heo if (m_start >= r_start) 1180f1af9d3aSPhilipp Hachtmann idx_a--; 11817bd0b0f0STejun Heo else 1182f1af9d3aSPhilipp Hachtmann idx_b--; 1183f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 11847bd0b0f0STejun Heo return; 11857bd0b0f0STejun Heo } 11867bd0b0f0STejun Heo } 11877bd0b0f0STejun Heo } 1188f1af9d3aSPhilipp Hachtmann /* signal end of iteration */ 11897bd0b0f0STejun Heo *idx = ULLONG_MAX; 11907bd0b0f0STejun Heo } 11917bd0b0f0STejun Heo 11927c0caeb8STejun Heo #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 11937c0caeb8STejun Heo /* 119445e79815SChen Chang * Common iterator interface used to define for_each_mem_pfn_range(). 11957c0caeb8STejun Heo */ 11967c0caeb8STejun Heo void __init_memblock __next_mem_pfn_range(int *idx, int nid, 11977c0caeb8STejun Heo unsigned long *out_start_pfn, 11987c0caeb8STejun Heo unsigned long *out_end_pfn, int *out_nid) 11997c0caeb8STejun Heo { 12007c0caeb8STejun Heo struct memblock_type *type = &memblock.memory; 12017c0caeb8STejun Heo struct memblock_region *r; 12027c0caeb8STejun Heo 12037c0caeb8STejun Heo while (++*idx < type->cnt) { 12047c0caeb8STejun Heo r = &type->regions[*idx]; 12057c0caeb8STejun Heo 12067c0caeb8STejun Heo if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 12077c0caeb8STejun Heo continue; 12087c0caeb8STejun Heo if (nid == MAX_NUMNODES || nid == r->nid) 12097c0caeb8STejun Heo break; 12107c0caeb8STejun Heo } 12117c0caeb8STejun Heo if (*idx >= type->cnt) { 12127c0caeb8STejun Heo *idx = -1; 12137c0caeb8STejun Heo return; 12147c0caeb8STejun Heo } 12157c0caeb8STejun Heo 12167c0caeb8STejun Heo if (out_start_pfn) 12177c0caeb8STejun Heo *out_start_pfn = PFN_UP(r->base); 12187c0caeb8STejun Heo if (out_end_pfn) 12197c0caeb8STejun Heo *out_end_pfn = PFN_DOWN(r->base + r->size); 12207c0caeb8STejun Heo if (out_nid) 12217c0caeb8STejun Heo *out_nid = r->nid; 12227c0caeb8STejun Heo } 12237c0caeb8STejun Heo 12247c0caeb8STejun Heo /** 12257c0caeb8STejun Heo * memblock_set_node - set node ID on memblock regions 12267c0caeb8STejun Heo * @base: base of area to set node ID for 12277c0caeb8STejun Heo * @size: size of area to set node ID for 1228e7e8de59STang Chen * @type: memblock type to set node ID for 12297c0caeb8STejun Heo * @nid: node ID to set 12307c0caeb8STejun Heo * 1231e7e8de59STang Chen * Set the nid of memblock @type regions in [@base, @base + @size) to @nid. 12327c0caeb8STejun Heo * Regions which cross the area boundaries are split as necessary. 12337c0caeb8STejun Heo * 123447cec443SMike Rapoport * Return: 12357c0caeb8STejun Heo * 0 on success, -errno on failure. 12367c0caeb8STejun Heo */ 12377c0caeb8STejun Heo int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 1238e7e8de59STang Chen struct memblock_type *type, int nid) 12397c0caeb8STejun Heo { 12406a9ceb31STejun Heo int start_rgn, end_rgn; 12416a9ceb31STejun Heo int i, ret; 12427c0caeb8STejun Heo 12436a9ceb31STejun Heo ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 12446a9ceb31STejun Heo if (ret) 12456a9ceb31STejun Heo return ret; 12467c0caeb8STejun Heo 12476a9ceb31STejun Heo for (i = start_rgn; i < end_rgn; i++) 1248e9d24ad3SWanpeng Li memblock_set_region_node(&type->regions[i], nid); 12497c0caeb8STejun Heo 12507c0caeb8STejun Heo memblock_merge_regions(type); 12517c0caeb8STejun Heo return 0; 12527c0caeb8STejun Heo } 12537c0caeb8STejun Heo #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1254837566e7SAlexander Duyck #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1255837566e7SAlexander Duyck /** 1256837566e7SAlexander Duyck * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone() 1257837566e7SAlexander Duyck * 1258837566e7SAlexander Duyck * @idx: pointer to u64 loop variable 1259837566e7SAlexander Duyck * @zone: zone in which all of the memory blocks reside 1260837566e7SAlexander Duyck * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL 1261837566e7SAlexander Duyck * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL 1262837566e7SAlexander Duyck * 1263837566e7SAlexander Duyck * This function is meant to be a zone/pfn specific wrapper for the 1264837566e7SAlexander Duyck * for_each_mem_range type iterators. Specifically they are used in the 1265837566e7SAlexander Duyck * deferred memory init routines and as such we were duplicating much of 1266837566e7SAlexander Duyck * this logic throughout the code. So instead of having it in multiple 1267837566e7SAlexander Duyck * locations it seemed like it would make more sense to centralize this to 1268837566e7SAlexander Duyck * one new iterator that does everything they need. 1269837566e7SAlexander Duyck */ 1270837566e7SAlexander Duyck void __init_memblock 1271837566e7SAlexander Duyck __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, 1272837566e7SAlexander Duyck unsigned long *out_spfn, unsigned long *out_epfn) 1273837566e7SAlexander Duyck { 1274837566e7SAlexander Duyck int zone_nid = zone_to_nid(zone); 1275837566e7SAlexander Duyck phys_addr_t spa, epa; 1276837566e7SAlexander Duyck int nid; 1277837566e7SAlexander Duyck 1278837566e7SAlexander Duyck __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, 1279837566e7SAlexander Duyck &memblock.memory, &memblock.reserved, 1280837566e7SAlexander Duyck &spa, &epa, &nid); 1281837566e7SAlexander Duyck 1282837566e7SAlexander Duyck while (*idx != U64_MAX) { 1283837566e7SAlexander Duyck unsigned long epfn = PFN_DOWN(epa); 1284837566e7SAlexander Duyck unsigned long spfn = PFN_UP(spa); 1285837566e7SAlexander Duyck 1286837566e7SAlexander Duyck /* 1287837566e7SAlexander Duyck * Verify the end is at least past the start of the zone and 1288837566e7SAlexander Duyck * that we have at least one PFN to initialize. 1289837566e7SAlexander Duyck */ 1290837566e7SAlexander Duyck if (zone->zone_start_pfn < epfn && spfn < epfn) { 1291837566e7SAlexander Duyck /* if we went too far just stop searching */ 1292837566e7SAlexander Duyck if (zone_end_pfn(zone) <= spfn) { 1293837566e7SAlexander Duyck *idx = U64_MAX; 1294837566e7SAlexander Duyck break; 1295837566e7SAlexander Duyck } 1296837566e7SAlexander Duyck 1297837566e7SAlexander Duyck if (out_spfn) 1298837566e7SAlexander Duyck *out_spfn = max(zone->zone_start_pfn, spfn); 1299837566e7SAlexander Duyck if (out_epfn) 1300837566e7SAlexander Duyck *out_epfn = min(zone_end_pfn(zone), epfn); 1301837566e7SAlexander Duyck 1302837566e7SAlexander Duyck return; 1303837566e7SAlexander Duyck } 1304837566e7SAlexander Duyck 1305837566e7SAlexander Duyck __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, 1306837566e7SAlexander Duyck &memblock.memory, &memblock.reserved, 1307837566e7SAlexander Duyck &spa, &epa, &nid); 1308837566e7SAlexander Duyck } 1309837566e7SAlexander Duyck 1310837566e7SAlexander Duyck /* signal end of iteration */ 1311837566e7SAlexander Duyck if (out_spfn) 1312837566e7SAlexander Duyck *out_spfn = ULONG_MAX; 1313837566e7SAlexander Duyck if (out_epfn) 1314837566e7SAlexander Duyck *out_epfn = 0; 1315837566e7SAlexander Duyck } 1316837566e7SAlexander Duyck 1317837566e7SAlexander Duyck #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 13187c0caeb8STejun Heo 131992d12f95SMike Rapoport /** 132092d12f95SMike Rapoport * memblock_alloc_range_nid - allocate boot memory block 132192d12f95SMike Rapoport * @size: size of memory block to be allocated in bytes 132292d12f95SMike Rapoport * @align: alignment of the region and block's size 132392d12f95SMike Rapoport * @start: the lower bound of the memory region to allocate (phys address) 132492d12f95SMike Rapoport * @end: the upper bound of the memory region to allocate (phys address) 132592d12f95SMike Rapoport * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 132692d12f95SMike Rapoport * 132792d12f95SMike Rapoport * The allocation is performed from memory region limited by 132892d12f95SMike Rapoport * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE. 132992d12f95SMike Rapoport * 133092d12f95SMike Rapoport * If the specified node can not hold the requested memory the 133192d12f95SMike Rapoport * allocation falls back to any node in the system 133292d12f95SMike Rapoport * 133392d12f95SMike Rapoport * For systems with memory mirroring, the allocation is attempted first 133492d12f95SMike Rapoport * from the regions with mirroring enabled and then retried from any 133592d12f95SMike Rapoport * memory region. 133692d12f95SMike Rapoport * 133792d12f95SMike Rapoport * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for 133892d12f95SMike Rapoport * allocated boot memory block, so that it is never reported as leaks. 133992d12f95SMike Rapoport * 134092d12f95SMike Rapoport * Return: 134192d12f95SMike Rapoport * Physical address of allocated memory block on success, %0 on failure. 134292d12f95SMike Rapoport */ 13432bfc2862SAkinobu Mita static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 13442bfc2862SAkinobu Mita phys_addr_t align, phys_addr_t start, 134592d12f95SMike Rapoport phys_addr_t end, int nid) 134695f72d1eSYinghai Lu { 134792d12f95SMike Rapoport enum memblock_flags flags = choose_memblock_flags(); 13486ed311b2SBenjamin Herrenschmidt phys_addr_t found; 134995f72d1eSYinghai Lu 135092d12f95SMike Rapoport if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 135192d12f95SMike Rapoport nid = NUMA_NO_NODE; 135292d12f95SMike Rapoport 13532f770806SMike Rapoport if (!align) { 13542f770806SMike Rapoport /* Can't use WARNs this early in boot on powerpc */ 13552f770806SMike Rapoport dump_stack(); 13562f770806SMike Rapoport align = SMP_CACHE_BYTES; 13572f770806SMike Rapoport } 13582f770806SMike Rapoport 135992d12f95SMike Rapoport again: 1360fc6daaf9STony Luck found = memblock_find_in_range_node(size, align, start, end, nid, 1361fc6daaf9STony Luck flags); 136292d12f95SMike Rapoport if (found && !memblock_reserve(found, size)) 136392d12f95SMike Rapoport goto done; 136492d12f95SMike Rapoport 136592d12f95SMike Rapoport if (nid != NUMA_NO_NODE) { 136692d12f95SMike Rapoport found = memblock_find_in_range_node(size, align, start, 136792d12f95SMike Rapoport end, NUMA_NO_NODE, 136892d12f95SMike Rapoport flags); 136992d12f95SMike Rapoport if (found && !memblock_reserve(found, size)) 137092d12f95SMike Rapoport goto done; 137192d12f95SMike Rapoport } 137292d12f95SMike Rapoport 137392d12f95SMike Rapoport if (flags & MEMBLOCK_MIRROR) { 137492d12f95SMike Rapoport flags &= ~MEMBLOCK_MIRROR; 137592d12f95SMike Rapoport pr_warn("Could not allocate %pap bytes of mirrored memory\n", 137692d12f95SMike Rapoport &size); 137792d12f95SMike Rapoport goto again; 137892d12f95SMike Rapoport } 137992d12f95SMike Rapoport 138092d12f95SMike Rapoport return 0; 138192d12f95SMike Rapoport 138292d12f95SMike Rapoport done: 138392d12f95SMike Rapoport /* Skip kmemleak for kasan_init() due to high volume. */ 138492d12f95SMike Rapoport if (end != MEMBLOCK_ALLOC_KASAN) 1385aedf95eaSCatalin Marinas /* 138692d12f95SMike Rapoport * The min_count is set to 0 so that memblock allocated 138792d12f95SMike Rapoport * blocks are never reported as leaks. This is because many 138892d12f95SMike Rapoport * of these blocks are only referred via the physical 138992d12f95SMike Rapoport * address which is not looked up by kmemleak. 1390aedf95eaSCatalin Marinas */ 13919099daedSCatalin Marinas kmemleak_alloc_phys(found, size, 0, 0); 139292d12f95SMike Rapoport 13936ed311b2SBenjamin Herrenschmidt return found; 1394aedf95eaSCatalin Marinas } 139595f72d1eSYinghai Lu 1396a2974133SMike Rapoport /** 1397a2974133SMike Rapoport * memblock_phys_alloc_range - allocate a memory block inside specified range 1398a2974133SMike Rapoport * @size: size of memory block to be allocated in bytes 1399a2974133SMike Rapoport * @align: alignment of the region and block's size 1400a2974133SMike Rapoport * @start: the lower bound of the memory region to allocate (physical address) 1401a2974133SMike Rapoport * @end: the upper bound of the memory region to allocate (physical address) 1402a2974133SMike Rapoport * 1403a2974133SMike Rapoport * Allocate @size bytes in the between @start and @end. 1404a2974133SMike Rapoport * 1405a2974133SMike Rapoport * Return: physical address of the allocated memory block on success, 1406a2974133SMike Rapoport * %0 on failure. 1407a2974133SMike Rapoport */ 14088a770c2aSMike Rapoport phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size, 14098a770c2aSMike Rapoport phys_addr_t align, 14108a770c2aSMike Rapoport phys_addr_t start, 14118a770c2aSMike Rapoport phys_addr_t end) 14122bfc2862SAkinobu Mita { 141392d12f95SMike Rapoport return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE); 14147bd0b0f0STejun Heo } 14157bd0b0f0STejun Heo 1416a2974133SMike Rapoport /** 1417a2974133SMike Rapoport * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node 1418a2974133SMike Rapoport * @size: size of memory block to be allocated in bytes 1419a2974133SMike Rapoport * @align: alignment of the region and block's size 1420a2974133SMike Rapoport * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1421a2974133SMike Rapoport * 1422a2974133SMike Rapoport * Allocates memory block from the specified NUMA node. If the node 1423a2974133SMike Rapoport * has no available memory, attempts to allocated from any node in the 1424a2974133SMike Rapoport * system. 1425a2974133SMike Rapoport * 1426a2974133SMike Rapoport * Return: physical address of the allocated memory block on success, 1427a2974133SMike Rapoport * %0 on failure. 1428a2974133SMike Rapoport */ 14299a8dd708SMike Rapoport phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 14309d1e2492SBenjamin Herrenschmidt { 143133755574SMike Rapoport return memblock_alloc_range_nid(size, align, 0, 143292d12f95SMike Rapoport MEMBLOCK_ALLOC_ACCESSIBLE, nid); 143395f72d1eSYinghai Lu } 143495f72d1eSYinghai Lu 143526f09e9bSSantosh Shilimkar /** 1436eb31d559SMike Rapoport * memblock_alloc_internal - allocate boot memory block 143726f09e9bSSantosh Shilimkar * @size: size of memory block to be allocated in bytes 143826f09e9bSSantosh Shilimkar * @align: alignment of the region and block's size 143926f09e9bSSantosh Shilimkar * @min_addr: the lower bound of the memory region to allocate (phys address) 144026f09e9bSSantosh Shilimkar * @max_addr: the upper bound of the memory region to allocate (phys address) 144126f09e9bSSantosh Shilimkar * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 144226f09e9bSSantosh Shilimkar * 144392d12f95SMike Rapoport * Allocates memory block using memblock_alloc_range_nid() and 144492d12f95SMike Rapoport * converts the returned physical address to virtual. 144592d12f95SMike Rapoport * 144626f09e9bSSantosh Shilimkar * The @min_addr limit is dropped if it can not be satisfied and the allocation 144792d12f95SMike Rapoport * will fall back to memory below @min_addr. Other constraints, such 144892d12f95SMike Rapoport * as node and mirrored memory will be handled again in 144992d12f95SMike Rapoport * memblock_alloc_range_nid(). 145026f09e9bSSantosh Shilimkar * 145147cec443SMike Rapoport * Return: 145226f09e9bSSantosh Shilimkar * Virtual address of allocated memory block on success, NULL on failure. 145326f09e9bSSantosh Shilimkar */ 1454eb31d559SMike Rapoport static void * __init memblock_alloc_internal( 145526f09e9bSSantosh Shilimkar phys_addr_t size, phys_addr_t align, 145626f09e9bSSantosh Shilimkar phys_addr_t min_addr, phys_addr_t max_addr, 145726f09e9bSSantosh Shilimkar int nid) 145826f09e9bSSantosh Shilimkar { 145926f09e9bSSantosh Shilimkar phys_addr_t alloc; 146026f09e9bSSantosh Shilimkar 146126f09e9bSSantosh Shilimkar /* 146226f09e9bSSantosh Shilimkar * Detect any accidental use of these APIs after slab is ready, as at 146326f09e9bSSantosh Shilimkar * this moment memblock may be deinitialized already and its 1464c6ffc5caSMike Rapoport * internal data may be destroyed (after execution of memblock_free_all) 146526f09e9bSSantosh Shilimkar */ 146626f09e9bSSantosh Shilimkar if (WARN_ON_ONCE(slab_is_available())) 146726f09e9bSSantosh Shilimkar return kzalloc_node(size, GFP_NOWAIT, nid); 146826f09e9bSSantosh Shilimkar 1469*f3057ad7SMike Rapoport if (max_addr > memblock.current_limit) 1470*f3057ad7SMike Rapoport max_addr = memblock.current_limit; 1471*f3057ad7SMike Rapoport 147292d12f95SMike Rapoport alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid); 14732f770806SMike Rapoport 147492d12f95SMike Rapoport /* retry allocation without lower limit */ 147592d12f95SMike Rapoport if (!alloc && min_addr) 147692d12f95SMike Rapoport alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid); 147726f09e9bSSantosh Shilimkar 147892d12f95SMike Rapoport if (!alloc) 1479a3f5bafcSTony Luck return NULL; 148026f09e9bSSantosh Shilimkar 148192d12f95SMike Rapoport return phys_to_virt(alloc); 148226f09e9bSSantosh Shilimkar } 148326f09e9bSSantosh Shilimkar 148426f09e9bSSantosh Shilimkar /** 1485eb31d559SMike Rapoport * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing 1486ea1f5f37SPavel Tatashin * memory and without panicking 1487ea1f5f37SPavel Tatashin * @size: size of memory block to be allocated in bytes 1488ea1f5f37SPavel Tatashin * @align: alignment of the region and block's size 1489ea1f5f37SPavel Tatashin * @min_addr: the lower bound of the memory region from where the allocation 1490ea1f5f37SPavel Tatashin * is preferred (phys address) 1491ea1f5f37SPavel Tatashin * @max_addr: the upper bound of the memory region from where the allocation 149297ad1087SMike Rapoport * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1493ea1f5f37SPavel Tatashin * allocate only from memory limited by memblock.current_limit value 1494ea1f5f37SPavel Tatashin * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1495ea1f5f37SPavel Tatashin * 1496ea1f5f37SPavel Tatashin * Public function, provides additional debug information (including caller 1497ea1f5f37SPavel Tatashin * info), if enabled. Does not zero allocated memory, does not panic if request 1498ea1f5f37SPavel Tatashin * cannot be satisfied. 1499ea1f5f37SPavel Tatashin * 150047cec443SMike Rapoport * Return: 1501ea1f5f37SPavel Tatashin * Virtual address of allocated memory block on success, NULL on failure. 1502ea1f5f37SPavel Tatashin */ 1503eb31d559SMike Rapoport void * __init memblock_alloc_try_nid_raw( 1504ea1f5f37SPavel Tatashin phys_addr_t size, phys_addr_t align, 1505ea1f5f37SPavel Tatashin phys_addr_t min_addr, phys_addr_t max_addr, 1506ea1f5f37SPavel Tatashin int nid) 1507ea1f5f37SPavel Tatashin { 1508ea1f5f37SPavel Tatashin void *ptr; 1509ea1f5f37SPavel Tatashin 1510d75f773cSSakari Ailus memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1511a36aab89SMike Rapoport __func__, (u64)size, (u64)align, nid, &min_addr, 1512a36aab89SMike Rapoport &max_addr, (void *)_RET_IP_); 1513ea1f5f37SPavel Tatashin 1514eb31d559SMike Rapoport ptr = memblock_alloc_internal(size, align, 1515ea1f5f37SPavel Tatashin min_addr, max_addr, nid); 1516ea1f5f37SPavel Tatashin if (ptr && size > 0) 1517f682a97aSAlexander Duyck page_init_poison(ptr, size); 1518f682a97aSAlexander Duyck 1519ea1f5f37SPavel Tatashin return ptr; 1520ea1f5f37SPavel Tatashin } 1521ea1f5f37SPavel Tatashin 1522ea1f5f37SPavel Tatashin /** 1523c0dbe825SMike Rapoport * memblock_alloc_try_nid - allocate boot memory block 152426f09e9bSSantosh Shilimkar * @size: size of memory block to be allocated in bytes 152526f09e9bSSantosh Shilimkar * @align: alignment of the region and block's size 152626f09e9bSSantosh Shilimkar * @min_addr: the lower bound of the memory region from where the allocation 152726f09e9bSSantosh Shilimkar * is preferred (phys address) 152826f09e9bSSantosh Shilimkar * @max_addr: the upper bound of the memory region from where the allocation 152997ad1087SMike Rapoport * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 153026f09e9bSSantosh Shilimkar * allocate only from memory limited by memblock.current_limit value 153126f09e9bSSantosh Shilimkar * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 153226f09e9bSSantosh Shilimkar * 1533c0dbe825SMike Rapoport * Public function, provides additional debug information (including caller 1534c0dbe825SMike Rapoport * info), if enabled. This function zeroes the allocated memory. 153526f09e9bSSantosh Shilimkar * 153647cec443SMike Rapoport * Return: 153726f09e9bSSantosh Shilimkar * Virtual address of allocated memory block on success, NULL on failure. 153826f09e9bSSantosh Shilimkar */ 1539eb31d559SMike Rapoport void * __init memblock_alloc_try_nid( 154026f09e9bSSantosh Shilimkar phys_addr_t size, phys_addr_t align, 154126f09e9bSSantosh Shilimkar phys_addr_t min_addr, phys_addr_t max_addr, 154226f09e9bSSantosh Shilimkar int nid) 154326f09e9bSSantosh Shilimkar { 154426f09e9bSSantosh Shilimkar void *ptr; 154526f09e9bSSantosh Shilimkar 1546d75f773cSSakari Ailus memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1547a36aab89SMike Rapoport __func__, (u64)size, (u64)align, nid, &min_addr, 1548a36aab89SMike Rapoport &max_addr, (void *)_RET_IP_); 1549eb31d559SMike Rapoport ptr = memblock_alloc_internal(size, align, 155026f09e9bSSantosh Shilimkar min_addr, max_addr, nid); 1551c0dbe825SMike Rapoport if (ptr) 1552ea1f5f37SPavel Tatashin memset(ptr, 0, size); 155326f09e9bSSantosh Shilimkar 1554c0dbe825SMike Rapoport return ptr; 155526f09e9bSSantosh Shilimkar } 155626f09e9bSSantosh Shilimkar 155726f09e9bSSantosh Shilimkar /** 1558a2974133SMike Rapoport * __memblock_free_late - free pages directly to buddy allocator 155948a833ccSMike Rapoport * @base: phys starting address of the boot memory block 156026f09e9bSSantosh Shilimkar * @size: size of the boot memory block in bytes 156126f09e9bSSantosh Shilimkar * 1562a2974133SMike Rapoport * This is only useful when the memblock allocator has already been torn 156326f09e9bSSantosh Shilimkar * down, but we are still initializing the system. Pages are released directly 1564a2974133SMike Rapoport * to the buddy allocator. 156526f09e9bSSantosh Shilimkar */ 156626f09e9bSSantosh Shilimkar void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) 156726f09e9bSSantosh Shilimkar { 1568a36aab89SMike Rapoport phys_addr_t cursor, end; 156926f09e9bSSantosh Shilimkar 1570a36aab89SMike Rapoport end = base + size - 1; 1571d75f773cSSakari Ailus memblock_dbg("%s: [%pa-%pa] %pS\n", 1572a36aab89SMike Rapoport __func__, &base, &end, (void *)_RET_IP_); 15739099daedSCatalin Marinas kmemleak_free_part_phys(base, size); 157426f09e9bSSantosh Shilimkar cursor = PFN_UP(base); 157526f09e9bSSantosh Shilimkar end = PFN_DOWN(base + size); 157626f09e9bSSantosh Shilimkar 157726f09e9bSSantosh Shilimkar for (; cursor < end; cursor++) { 15787c2ee349SMike Rapoport memblock_free_pages(pfn_to_page(cursor), cursor, 0); 1579ca79b0c2SArun KS totalram_pages_inc(); 158026f09e9bSSantosh Shilimkar } 158126f09e9bSSantosh Shilimkar } 15829d1e2492SBenjamin Herrenschmidt 15839d1e2492SBenjamin Herrenschmidt /* 15849d1e2492SBenjamin Herrenschmidt * Remaining API functions 15859d1e2492SBenjamin Herrenschmidt */ 15869d1e2492SBenjamin Herrenschmidt 15871f1ffb8aSDavid Gibson phys_addr_t __init_memblock memblock_phys_mem_size(void) 158895f72d1eSYinghai Lu { 15891440c4e2STejun Heo return memblock.memory.total_size; 159095f72d1eSYinghai Lu } 159195f72d1eSYinghai Lu 15928907de5dSSrikar Dronamraju phys_addr_t __init_memblock memblock_reserved_size(void) 15938907de5dSSrikar Dronamraju { 15948907de5dSSrikar Dronamraju return memblock.reserved.total_size; 15958907de5dSSrikar Dronamraju } 15968907de5dSSrikar Dronamraju 1597595ad9afSYinghai Lu phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) 1598595ad9afSYinghai Lu { 1599595ad9afSYinghai Lu unsigned long pages = 0; 1600595ad9afSYinghai Lu struct memblock_region *r; 1601595ad9afSYinghai Lu unsigned long start_pfn, end_pfn; 1602595ad9afSYinghai Lu 1603595ad9afSYinghai Lu for_each_memblock(memory, r) { 1604595ad9afSYinghai Lu start_pfn = memblock_region_memory_base_pfn(r); 1605595ad9afSYinghai Lu end_pfn = memblock_region_memory_end_pfn(r); 1606595ad9afSYinghai Lu start_pfn = min_t(unsigned long, start_pfn, limit_pfn); 1607595ad9afSYinghai Lu end_pfn = min_t(unsigned long, end_pfn, limit_pfn); 1608595ad9afSYinghai Lu pages += end_pfn - start_pfn; 1609595ad9afSYinghai Lu } 1610595ad9afSYinghai Lu 161116763230SFabian Frederick return PFN_PHYS(pages); 1612595ad9afSYinghai Lu } 1613595ad9afSYinghai Lu 16140a93ebefSSam Ravnborg /* lowest address */ 16150a93ebefSSam Ravnborg phys_addr_t __init_memblock memblock_start_of_DRAM(void) 16160a93ebefSSam Ravnborg { 16170a93ebefSSam Ravnborg return memblock.memory.regions[0].base; 16180a93ebefSSam Ravnborg } 16190a93ebefSSam Ravnborg 162010d06439SYinghai Lu phys_addr_t __init_memblock memblock_end_of_DRAM(void) 162195f72d1eSYinghai Lu { 162295f72d1eSYinghai Lu int idx = memblock.memory.cnt - 1; 162395f72d1eSYinghai Lu 1624e3239ff9SBenjamin Herrenschmidt return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 162595f72d1eSYinghai Lu } 162695f72d1eSYinghai Lu 1627a571d4ebSDennis Chen static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) 162895f72d1eSYinghai Lu { 16291c4bc43dSStefan Agner phys_addr_t max_addr = PHYS_ADDR_MAX; 1630136199f0SEmil Medve struct memblock_region *r; 163195f72d1eSYinghai Lu 1632a571d4ebSDennis Chen /* 1633a571d4ebSDennis Chen * translate the memory @limit size into the max address within one of 1634a571d4ebSDennis Chen * the memory memblock regions, if the @limit exceeds the total size 16351c4bc43dSStefan Agner * of those regions, max_addr will keep original value PHYS_ADDR_MAX 1636a571d4ebSDennis Chen */ 1637136199f0SEmil Medve for_each_memblock(memory, r) { 1638c0ce8fefSTejun Heo if (limit <= r->size) { 1639c0ce8fefSTejun Heo max_addr = r->base + limit; 164095f72d1eSYinghai Lu break; 164195f72d1eSYinghai Lu } 1642c0ce8fefSTejun Heo limit -= r->size; 164395f72d1eSYinghai Lu } 1644c0ce8fefSTejun Heo 1645a571d4ebSDennis Chen return max_addr; 1646a571d4ebSDennis Chen } 1647a571d4ebSDennis Chen 1648a571d4ebSDennis Chen void __init memblock_enforce_memory_limit(phys_addr_t limit) 1649a571d4ebSDennis Chen { 16501c4bc43dSStefan Agner phys_addr_t max_addr = PHYS_ADDR_MAX; 1651a571d4ebSDennis Chen 1652a571d4ebSDennis Chen if (!limit) 1653a571d4ebSDennis Chen return; 1654a571d4ebSDennis Chen 1655a571d4ebSDennis Chen max_addr = __find_max_addr(limit); 1656a571d4ebSDennis Chen 1657a571d4ebSDennis Chen /* @limit exceeds the total size of the memory, do nothing */ 16581c4bc43dSStefan Agner if (max_addr == PHYS_ADDR_MAX) 1659a571d4ebSDennis Chen return; 1660a571d4ebSDennis Chen 1661c0ce8fefSTejun Heo /* truncate both memory and reserved regions */ 1662f1af9d3aSPhilipp Hachtmann memblock_remove_range(&memblock.memory, max_addr, 16631c4bc43dSStefan Agner PHYS_ADDR_MAX); 1664f1af9d3aSPhilipp Hachtmann memblock_remove_range(&memblock.reserved, max_addr, 16651c4bc43dSStefan Agner PHYS_ADDR_MAX); 166695f72d1eSYinghai Lu } 166795f72d1eSYinghai Lu 1668c9ca9b4eSAKASHI Takahiro void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size) 1669c9ca9b4eSAKASHI Takahiro { 1670c9ca9b4eSAKASHI Takahiro int start_rgn, end_rgn; 1671c9ca9b4eSAKASHI Takahiro int i, ret; 1672c9ca9b4eSAKASHI Takahiro 1673c9ca9b4eSAKASHI Takahiro if (!size) 1674c9ca9b4eSAKASHI Takahiro return; 1675c9ca9b4eSAKASHI Takahiro 1676c9ca9b4eSAKASHI Takahiro ret = memblock_isolate_range(&memblock.memory, base, size, 1677c9ca9b4eSAKASHI Takahiro &start_rgn, &end_rgn); 1678c9ca9b4eSAKASHI Takahiro if (ret) 1679c9ca9b4eSAKASHI Takahiro return; 1680c9ca9b4eSAKASHI Takahiro 1681c9ca9b4eSAKASHI Takahiro /* remove all the MAP regions */ 1682c9ca9b4eSAKASHI Takahiro for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) 1683c9ca9b4eSAKASHI Takahiro if (!memblock_is_nomap(&memblock.memory.regions[i])) 1684c9ca9b4eSAKASHI Takahiro memblock_remove_region(&memblock.memory, i); 1685c9ca9b4eSAKASHI Takahiro 1686c9ca9b4eSAKASHI Takahiro for (i = start_rgn - 1; i >= 0; i--) 1687c9ca9b4eSAKASHI Takahiro if (!memblock_is_nomap(&memblock.memory.regions[i])) 1688c9ca9b4eSAKASHI Takahiro memblock_remove_region(&memblock.memory, i); 1689c9ca9b4eSAKASHI Takahiro 1690c9ca9b4eSAKASHI Takahiro /* truncate the reserved regions */ 1691c9ca9b4eSAKASHI Takahiro memblock_remove_range(&memblock.reserved, 0, base); 1692c9ca9b4eSAKASHI Takahiro memblock_remove_range(&memblock.reserved, 16931c4bc43dSStefan Agner base + size, PHYS_ADDR_MAX); 1694c9ca9b4eSAKASHI Takahiro } 1695c9ca9b4eSAKASHI Takahiro 1696a571d4ebSDennis Chen void __init memblock_mem_limit_remove_map(phys_addr_t limit) 1697a571d4ebSDennis Chen { 1698a571d4ebSDennis Chen phys_addr_t max_addr; 1699a571d4ebSDennis Chen 1700a571d4ebSDennis Chen if (!limit) 1701a571d4ebSDennis Chen return; 1702a571d4ebSDennis Chen 1703a571d4ebSDennis Chen max_addr = __find_max_addr(limit); 1704a571d4ebSDennis Chen 1705a571d4ebSDennis Chen /* @limit exceeds the total size of the memory, do nothing */ 17061c4bc43dSStefan Agner if (max_addr == PHYS_ADDR_MAX) 1707a571d4ebSDennis Chen return; 1708a571d4ebSDennis Chen 1709c9ca9b4eSAKASHI Takahiro memblock_cap_memory_range(0, max_addr); 1710a571d4ebSDennis Chen } 1711a571d4ebSDennis Chen 1712cd79481dSYinghai Lu static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 171372d4b0b4SBenjamin Herrenschmidt { 171472d4b0b4SBenjamin Herrenschmidt unsigned int left = 0, right = type->cnt; 171572d4b0b4SBenjamin Herrenschmidt 171672d4b0b4SBenjamin Herrenschmidt do { 171772d4b0b4SBenjamin Herrenschmidt unsigned int mid = (right + left) / 2; 171872d4b0b4SBenjamin Herrenschmidt 171972d4b0b4SBenjamin Herrenschmidt if (addr < type->regions[mid].base) 172072d4b0b4SBenjamin Herrenschmidt right = mid; 172172d4b0b4SBenjamin Herrenschmidt else if (addr >= (type->regions[mid].base + 172272d4b0b4SBenjamin Herrenschmidt type->regions[mid].size)) 172372d4b0b4SBenjamin Herrenschmidt left = mid + 1; 172472d4b0b4SBenjamin Herrenschmidt else 172572d4b0b4SBenjamin Herrenschmidt return mid; 172672d4b0b4SBenjamin Herrenschmidt } while (left < right); 172772d4b0b4SBenjamin Herrenschmidt return -1; 172872d4b0b4SBenjamin Herrenschmidt } 172972d4b0b4SBenjamin Herrenschmidt 1730f5a222dcSYueyi Li bool __init_memblock memblock_is_reserved(phys_addr_t addr) 173195f72d1eSYinghai Lu { 173272d4b0b4SBenjamin Herrenschmidt return memblock_search(&memblock.reserved, addr) != -1; 173395f72d1eSYinghai Lu } 173472d4b0b4SBenjamin Herrenschmidt 1735b4ad0c7eSYaowei Bai bool __init_memblock memblock_is_memory(phys_addr_t addr) 173672d4b0b4SBenjamin Herrenschmidt { 173772d4b0b4SBenjamin Herrenschmidt return memblock_search(&memblock.memory, addr) != -1; 173872d4b0b4SBenjamin Herrenschmidt } 173972d4b0b4SBenjamin Herrenschmidt 1740937f0c26SYaowei Bai bool __init_memblock memblock_is_map_memory(phys_addr_t addr) 1741bf3d3cc5SArd Biesheuvel { 1742bf3d3cc5SArd Biesheuvel int i = memblock_search(&memblock.memory, addr); 1743bf3d3cc5SArd Biesheuvel 1744bf3d3cc5SArd Biesheuvel if (i == -1) 1745bf3d3cc5SArd Biesheuvel return false; 1746bf3d3cc5SArd Biesheuvel return !memblock_is_nomap(&memblock.memory.regions[i]); 1747bf3d3cc5SArd Biesheuvel } 1748bf3d3cc5SArd Biesheuvel 1749e76b63f8SYinghai Lu #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1750e76b63f8SYinghai Lu int __init_memblock memblock_search_pfn_nid(unsigned long pfn, 1751e76b63f8SYinghai Lu unsigned long *start_pfn, unsigned long *end_pfn) 1752e76b63f8SYinghai Lu { 1753e76b63f8SYinghai Lu struct memblock_type *type = &memblock.memory; 175416763230SFabian Frederick int mid = memblock_search(type, PFN_PHYS(pfn)); 1755e76b63f8SYinghai Lu 1756e76b63f8SYinghai Lu if (mid == -1) 1757e76b63f8SYinghai Lu return -1; 1758e76b63f8SYinghai Lu 1759f7e2f7e8SFabian Frederick *start_pfn = PFN_DOWN(type->regions[mid].base); 1760f7e2f7e8SFabian Frederick *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); 1761e76b63f8SYinghai Lu 1762e76b63f8SYinghai Lu return type->regions[mid].nid; 1763e76b63f8SYinghai Lu } 1764e76b63f8SYinghai Lu #endif 1765e76b63f8SYinghai Lu 1766eab30949SStephen Boyd /** 1767eab30949SStephen Boyd * memblock_is_region_memory - check if a region is a subset of memory 1768eab30949SStephen Boyd * @base: base of region to check 1769eab30949SStephen Boyd * @size: size of region to check 1770eab30949SStephen Boyd * 1771eab30949SStephen Boyd * Check if the region [@base, @base + @size) is a subset of a memory block. 1772eab30949SStephen Boyd * 177347cec443SMike Rapoport * Return: 1774eab30949SStephen Boyd * 0 if false, non-zero if true 1775eab30949SStephen Boyd */ 1776937f0c26SYaowei Bai bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 177772d4b0b4SBenjamin Herrenschmidt { 1778abb65272STomi Valkeinen int idx = memblock_search(&memblock.memory, base); 1779eb18f1b5STejun Heo phys_addr_t end = base + memblock_cap_size(base, &size); 178072d4b0b4SBenjamin Herrenschmidt 178172d4b0b4SBenjamin Herrenschmidt if (idx == -1) 1782937f0c26SYaowei Bai return false; 1783ef415ef4SWei Yang return (memblock.memory.regions[idx].base + 1784eb18f1b5STejun Heo memblock.memory.regions[idx].size) >= end; 178595f72d1eSYinghai Lu } 178695f72d1eSYinghai Lu 1787eab30949SStephen Boyd /** 1788eab30949SStephen Boyd * memblock_is_region_reserved - check if a region intersects reserved memory 1789eab30949SStephen Boyd * @base: base of region to check 1790eab30949SStephen Boyd * @size: size of region to check 1791eab30949SStephen Boyd * 179247cec443SMike Rapoport * Check if the region [@base, @base + @size) intersects a reserved 179347cec443SMike Rapoport * memory block. 1794eab30949SStephen Boyd * 179547cec443SMike Rapoport * Return: 1796c5c5c9d1STang Chen * True if they intersect, false if not. 1797eab30949SStephen Boyd */ 1798c5c5c9d1STang Chen bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 179995f72d1eSYinghai Lu { 1800eb18f1b5STejun Heo memblock_cap_size(base, &size); 1801c5c5c9d1STang Chen return memblock_overlaps_region(&memblock.reserved, base, size); 180295f72d1eSYinghai Lu } 180395f72d1eSYinghai Lu 18046ede1fd3SYinghai Lu void __init_memblock memblock_trim_memory(phys_addr_t align) 18056ede1fd3SYinghai Lu { 18066ede1fd3SYinghai Lu phys_addr_t start, end, orig_start, orig_end; 1807136199f0SEmil Medve struct memblock_region *r; 18086ede1fd3SYinghai Lu 1809136199f0SEmil Medve for_each_memblock(memory, r) { 1810136199f0SEmil Medve orig_start = r->base; 1811136199f0SEmil Medve orig_end = r->base + r->size; 18126ede1fd3SYinghai Lu start = round_up(orig_start, align); 18136ede1fd3SYinghai Lu end = round_down(orig_end, align); 18146ede1fd3SYinghai Lu 18156ede1fd3SYinghai Lu if (start == orig_start && end == orig_end) 18166ede1fd3SYinghai Lu continue; 18176ede1fd3SYinghai Lu 18186ede1fd3SYinghai Lu if (start < end) { 1819136199f0SEmil Medve r->base = start; 1820136199f0SEmil Medve r->size = end - start; 18216ede1fd3SYinghai Lu } else { 1822136199f0SEmil Medve memblock_remove_region(&memblock.memory, 1823136199f0SEmil Medve r - memblock.memory.regions); 1824136199f0SEmil Medve r--; 18256ede1fd3SYinghai Lu } 18266ede1fd3SYinghai Lu } 18276ede1fd3SYinghai Lu } 1828e63075a3SBenjamin Herrenschmidt 18293661ca66SYinghai Lu void __init_memblock memblock_set_current_limit(phys_addr_t limit) 1830e63075a3SBenjamin Herrenschmidt { 1831e63075a3SBenjamin Herrenschmidt memblock.current_limit = limit; 1832e63075a3SBenjamin Herrenschmidt } 1833e63075a3SBenjamin Herrenschmidt 1834fec51014SLaura Abbott phys_addr_t __init_memblock memblock_get_current_limit(void) 1835fec51014SLaura Abbott { 1836fec51014SLaura Abbott return memblock.current_limit; 1837fec51014SLaura Abbott } 1838fec51014SLaura Abbott 18390262d9c8SHeiko Carstens static void __init_memblock memblock_dump(struct memblock_type *type) 18406ed311b2SBenjamin Herrenschmidt { 18415d63f81cSMiles Chen phys_addr_t base, end, size; 1842e1720feeSMike Rapoport enum memblock_flags flags; 18438c9c1701SAlexander Kuleshov int idx; 18448c9c1701SAlexander Kuleshov struct memblock_region *rgn; 18456ed311b2SBenjamin Herrenschmidt 18460262d9c8SHeiko Carstens pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt); 18476ed311b2SBenjamin Herrenschmidt 184866e8b438SGioh Kim for_each_memblock_type(idx, type, rgn) { 18497c0caeb8STejun Heo char nid_buf[32] = ""; 18506ed311b2SBenjamin Herrenschmidt 18517c0caeb8STejun Heo base = rgn->base; 18527c0caeb8STejun Heo size = rgn->size; 18535d63f81cSMiles Chen end = base + size - 1; 185466a20757STang Chen flags = rgn->flags; 18557c0caeb8STejun Heo #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 18567c0caeb8STejun Heo if (memblock_get_region_node(rgn) != MAX_NUMNODES) 18577c0caeb8STejun Heo snprintf(nid_buf, sizeof(nid_buf), " on node %d", 18587c0caeb8STejun Heo memblock_get_region_node(rgn)); 18597c0caeb8STejun Heo #endif 1860e1720feeSMike Rapoport pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n", 18610262d9c8SHeiko Carstens type->name, idx, &base, &end, &size, nid_buf, flags); 18626ed311b2SBenjamin Herrenschmidt } 18636ed311b2SBenjamin Herrenschmidt } 18646ed311b2SBenjamin Herrenschmidt 18654ff7b82fSTejun Heo void __init_memblock __memblock_dump_all(void) 18666ed311b2SBenjamin Herrenschmidt { 18676ed311b2SBenjamin Herrenschmidt pr_info("MEMBLOCK configuration:\n"); 18685d63f81cSMiles Chen pr_info(" memory size = %pa reserved size = %pa\n", 18695d63f81cSMiles Chen &memblock.memory.total_size, 18705d63f81cSMiles Chen &memblock.reserved.total_size); 18716ed311b2SBenjamin Herrenschmidt 18720262d9c8SHeiko Carstens memblock_dump(&memblock.memory); 18730262d9c8SHeiko Carstens memblock_dump(&memblock.reserved); 1874409efd4cSHeiko Carstens #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 18750262d9c8SHeiko Carstens memblock_dump(&memblock.physmem); 1876409efd4cSHeiko Carstens #endif 18776ed311b2SBenjamin Herrenschmidt } 18786ed311b2SBenjamin Herrenschmidt 18791aadc056STejun Heo void __init memblock_allow_resize(void) 18806ed311b2SBenjamin Herrenschmidt { 1881142b45a7SBenjamin Herrenschmidt memblock_can_resize = 1; 18826ed311b2SBenjamin Herrenschmidt } 18836ed311b2SBenjamin Herrenschmidt 18846ed311b2SBenjamin Herrenschmidt static int __init early_memblock(char *p) 18856ed311b2SBenjamin Herrenschmidt { 18866ed311b2SBenjamin Herrenschmidt if (p && strstr(p, "debug")) 18876ed311b2SBenjamin Herrenschmidt memblock_debug = 1; 18886ed311b2SBenjamin Herrenschmidt return 0; 18896ed311b2SBenjamin Herrenschmidt } 18906ed311b2SBenjamin Herrenschmidt early_param("memblock", early_memblock); 18916ed311b2SBenjamin Herrenschmidt 1892bda49a81SMike Rapoport static void __init __free_pages_memory(unsigned long start, unsigned long end) 1893bda49a81SMike Rapoport { 1894bda49a81SMike Rapoport int order; 1895bda49a81SMike Rapoport 1896bda49a81SMike Rapoport while (start < end) { 1897bda49a81SMike Rapoport order = min(MAX_ORDER - 1UL, __ffs(start)); 1898bda49a81SMike Rapoport 1899bda49a81SMike Rapoport while (start + (1UL << order) > end) 1900bda49a81SMike Rapoport order--; 1901bda49a81SMike Rapoport 1902bda49a81SMike Rapoport memblock_free_pages(pfn_to_page(start), start, order); 1903bda49a81SMike Rapoport 1904bda49a81SMike Rapoport start += (1UL << order); 1905bda49a81SMike Rapoport } 1906bda49a81SMike Rapoport } 1907bda49a81SMike Rapoport 1908bda49a81SMike Rapoport static unsigned long __init __free_memory_core(phys_addr_t start, 1909bda49a81SMike Rapoport phys_addr_t end) 1910bda49a81SMike Rapoport { 1911bda49a81SMike Rapoport unsigned long start_pfn = PFN_UP(start); 1912bda49a81SMike Rapoport unsigned long end_pfn = min_t(unsigned long, 1913bda49a81SMike Rapoport PFN_DOWN(end), max_low_pfn); 1914bda49a81SMike Rapoport 1915bda49a81SMike Rapoport if (start_pfn >= end_pfn) 1916bda49a81SMike Rapoport return 0; 1917bda49a81SMike Rapoport 1918bda49a81SMike Rapoport __free_pages_memory(start_pfn, end_pfn); 1919bda49a81SMike Rapoport 1920bda49a81SMike Rapoport return end_pfn - start_pfn; 1921bda49a81SMike Rapoport } 1922bda49a81SMike Rapoport 1923bda49a81SMike Rapoport static unsigned long __init free_low_memory_core_early(void) 1924bda49a81SMike Rapoport { 1925bda49a81SMike Rapoport unsigned long count = 0; 1926bda49a81SMike Rapoport phys_addr_t start, end; 1927bda49a81SMike Rapoport u64 i; 1928bda49a81SMike Rapoport 1929bda49a81SMike Rapoport memblock_clear_hotplug(0, -1); 1930bda49a81SMike Rapoport 1931bda49a81SMike Rapoport for_each_reserved_mem_region(i, &start, &end) 1932bda49a81SMike Rapoport reserve_bootmem_region(start, end); 1933bda49a81SMike Rapoport 1934bda49a81SMike Rapoport /* 1935bda49a81SMike Rapoport * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id 1936bda49a81SMike Rapoport * because in some case like Node0 doesn't have RAM installed 1937bda49a81SMike Rapoport * low ram will be on Node1 1938bda49a81SMike Rapoport */ 1939bda49a81SMike Rapoport for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, 1940bda49a81SMike Rapoport NULL) 1941bda49a81SMike Rapoport count += __free_memory_core(start, end); 1942bda49a81SMike Rapoport 1943bda49a81SMike Rapoport return count; 1944bda49a81SMike Rapoport } 1945bda49a81SMike Rapoport 1946bda49a81SMike Rapoport static int reset_managed_pages_done __initdata; 1947bda49a81SMike Rapoport 1948bda49a81SMike Rapoport void reset_node_managed_pages(pg_data_t *pgdat) 1949bda49a81SMike Rapoport { 1950bda49a81SMike Rapoport struct zone *z; 1951bda49a81SMike Rapoport 1952bda49a81SMike Rapoport for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 19539705bea5SArun KS atomic_long_set(&z->managed_pages, 0); 1954bda49a81SMike Rapoport } 1955bda49a81SMike Rapoport 1956bda49a81SMike Rapoport void __init reset_all_zones_managed_pages(void) 1957bda49a81SMike Rapoport { 1958bda49a81SMike Rapoport struct pglist_data *pgdat; 1959bda49a81SMike Rapoport 1960bda49a81SMike Rapoport if (reset_managed_pages_done) 1961bda49a81SMike Rapoport return; 1962bda49a81SMike Rapoport 1963bda49a81SMike Rapoport for_each_online_pgdat(pgdat) 1964bda49a81SMike Rapoport reset_node_managed_pages(pgdat); 1965bda49a81SMike Rapoport 1966bda49a81SMike Rapoport reset_managed_pages_done = 1; 1967bda49a81SMike Rapoport } 1968bda49a81SMike Rapoport 1969bda49a81SMike Rapoport /** 1970bda49a81SMike Rapoport * memblock_free_all - release free pages to the buddy allocator 1971bda49a81SMike Rapoport * 1972bda49a81SMike Rapoport * Return: the number of pages actually released. 1973bda49a81SMike Rapoport */ 1974bda49a81SMike Rapoport unsigned long __init memblock_free_all(void) 1975bda49a81SMike Rapoport { 1976bda49a81SMike Rapoport unsigned long pages; 1977bda49a81SMike Rapoport 1978bda49a81SMike Rapoport reset_all_zones_managed_pages(); 1979bda49a81SMike Rapoport 1980bda49a81SMike Rapoport pages = free_low_memory_core_early(); 1981ca79b0c2SArun KS totalram_pages_add(pages); 1982bda49a81SMike Rapoport 1983bda49a81SMike Rapoport return pages; 1984bda49a81SMike Rapoport } 1985bda49a81SMike Rapoport 1986350e88baSMike Rapoport #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK) 19876d03b885SBenjamin Herrenschmidt 19886d03b885SBenjamin Herrenschmidt static int memblock_debug_show(struct seq_file *m, void *private) 19896d03b885SBenjamin Herrenschmidt { 19906d03b885SBenjamin Herrenschmidt struct memblock_type *type = m->private; 19916d03b885SBenjamin Herrenschmidt struct memblock_region *reg; 19926d03b885SBenjamin Herrenschmidt int i; 19935d63f81cSMiles Chen phys_addr_t end; 19946d03b885SBenjamin Herrenschmidt 19956d03b885SBenjamin Herrenschmidt for (i = 0; i < type->cnt; i++) { 19966d03b885SBenjamin Herrenschmidt reg = &type->regions[i]; 19975d63f81cSMiles Chen end = reg->base + reg->size - 1; 19986d03b885SBenjamin Herrenschmidt 19995d63f81cSMiles Chen seq_printf(m, "%4d: ", i); 20005d63f81cSMiles Chen seq_printf(m, "%pa..%pa\n", ®->base, &end); 20016d03b885SBenjamin Herrenschmidt } 20026d03b885SBenjamin Herrenschmidt return 0; 20036d03b885SBenjamin Herrenschmidt } 20045ad35093SAndy Shevchenko DEFINE_SHOW_ATTRIBUTE(memblock_debug); 20056d03b885SBenjamin Herrenschmidt 20066d03b885SBenjamin Herrenschmidt static int __init memblock_init_debugfs(void) 20076d03b885SBenjamin Herrenschmidt { 20086d03b885SBenjamin Herrenschmidt struct dentry *root = debugfs_create_dir("memblock", NULL); 2009d9f7979cSGreg Kroah-Hartman 20100825a6f9SJoe Perches debugfs_create_file("memory", 0444, root, 20110825a6f9SJoe Perches &memblock.memory, &memblock_debug_fops); 20120825a6f9SJoe Perches debugfs_create_file("reserved", 0444, root, 20130825a6f9SJoe Perches &memblock.reserved, &memblock_debug_fops); 201470210ed9SPhilipp Hachtmann #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 20150825a6f9SJoe Perches debugfs_create_file("physmem", 0444, root, 20160825a6f9SJoe Perches &memblock.physmem, &memblock_debug_fops); 201770210ed9SPhilipp Hachtmann #endif 20186d03b885SBenjamin Herrenschmidt 20196d03b885SBenjamin Herrenschmidt return 0; 20206d03b885SBenjamin Herrenschmidt } 20216d03b885SBenjamin Herrenschmidt __initcall(memblock_init_debugfs); 20226d03b885SBenjamin Herrenschmidt 20236d03b885SBenjamin Herrenschmidt #endif /* CONFIG_DEBUG_FS */ 2024