12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 295f72d1eSYinghai Lu /* 395f72d1eSYinghai Lu * Procedures for maintaining information about logical memory blocks. 495f72d1eSYinghai Lu * 595f72d1eSYinghai Lu * Peter Bergner, IBM Corp. June 2001. 695f72d1eSYinghai Lu * Copyright (C) 2001 Peter Bergner. 795f72d1eSYinghai Lu */ 895f72d1eSYinghai Lu 995f72d1eSYinghai Lu #include <linux/kernel.h> 10142b45a7SBenjamin Herrenschmidt #include <linux/slab.h> 1195f72d1eSYinghai Lu #include <linux/init.h> 1295f72d1eSYinghai Lu #include <linux/bitops.h> 13449e8df3SBenjamin Herrenschmidt #include <linux/poison.h> 14c196f76fSBenjamin Herrenschmidt #include <linux/pfn.h> 156d03b885SBenjamin Herrenschmidt #include <linux/debugfs.h> 16514c6032SRandy Dunlap #include <linux/kmemleak.h> 176d03b885SBenjamin Herrenschmidt #include <linux/seq_file.h> 1895f72d1eSYinghai Lu #include <linux/memblock.h> 1995f72d1eSYinghai Lu 20c4c5ad6bSChristoph Hellwig #include <asm/sections.h> 2126f09e9bSSantosh Shilimkar #include <linux/io.h> 2226f09e9bSSantosh Shilimkar 2326f09e9bSSantosh Shilimkar #include "internal.h" 2479442ed1STang Chen 258a5b403dSArd Biesheuvel #define INIT_MEMBLOCK_REGIONS 128 268a5b403dSArd Biesheuvel #define INIT_PHYSMEM_REGIONS 4 278a5b403dSArd Biesheuvel 288a5b403dSArd Biesheuvel #ifndef INIT_MEMBLOCK_RESERVED_REGIONS 298a5b403dSArd Biesheuvel # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS 308a5b403dSArd Biesheuvel #endif 318a5b403dSArd Biesheuvel 323e039c5cSMike Rapoport /** 333e039c5cSMike Rapoport * DOC: memblock overview 343e039c5cSMike Rapoport * 353e039c5cSMike Rapoport * Memblock is a method of managing memory regions during the early 363e039c5cSMike Rapoport * boot period when the usual kernel memory allocators are not up and 373e039c5cSMike Rapoport * running. 383e039c5cSMike Rapoport * 393e039c5cSMike Rapoport * Memblock views the system memory as collections of contiguous 403e039c5cSMike Rapoport * regions. There are several types of these collections: 413e039c5cSMike Rapoport * 423e039c5cSMike Rapoport * * ``memory`` - describes the physical memory available to the 433e039c5cSMike Rapoport * kernel; this may differ from the actual physical memory installed 443e039c5cSMike Rapoport * in the system, for instance when the memory is restricted with 453e039c5cSMike Rapoport * ``mem=`` command line parameter 463e039c5cSMike Rapoport * * ``reserved`` - describes the regions that were allocated 4777649905SDavid Hildenbrand * * ``physmem`` - describes the actual physical memory available during 4877649905SDavid Hildenbrand * boot regardless of the possible restrictions and memory hot(un)plug; 4977649905SDavid Hildenbrand * the ``physmem`` type is only available on some architectures. 503e039c5cSMike Rapoport * 519303c9d5SMauro Carvalho Chehab * Each region is represented by struct memblock_region that 523e039c5cSMike Rapoport * defines the region extents, its attributes and NUMA node id on NUMA 531bf162e4SMauro Carvalho Chehab * systems. Every memory type is described by the struct memblock_type 541bf162e4SMauro Carvalho Chehab * which contains an array of memory regions along with 5577649905SDavid Hildenbrand * the allocator metadata. The "memory" and "reserved" types are nicely 569303c9d5SMauro Carvalho Chehab * wrapped with struct memblock. This structure is statically 5777649905SDavid Hildenbrand * initialized at build time. The region arrays are initially sized to 5877649905SDavid Hildenbrand * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS 5977649905SDavid Hildenbrand * for "reserved". The region array for "physmem" is initially sized to 6077649905SDavid Hildenbrand * %INIT_PHYSMEM_REGIONS. 616e5af9a8SCao jin * The memblock_allow_resize() enables automatic resizing of the region 626e5af9a8SCao jin * arrays during addition of new regions. This feature should be used 636e5af9a8SCao jin * with care so that memory allocated for the region array will not 646e5af9a8SCao jin * overlap with areas that should be reserved, for example initrd. 653e039c5cSMike Rapoport * 663e039c5cSMike Rapoport * The early architecture setup should tell memblock what the physical 676e5af9a8SCao jin * memory layout is by using memblock_add() or memblock_add_node() 686e5af9a8SCao jin * functions. The first function does not assign the region to a NUMA 696e5af9a8SCao jin * node and it is appropriate for UMA systems. Yet, it is possible to 706e5af9a8SCao jin * use it on NUMA systems as well and assign the region to a NUMA node 716e5af9a8SCao jin * later in the setup process using memblock_set_node(). The 726e5af9a8SCao jin * memblock_add_node() performs such an assignment directly. 733e039c5cSMike Rapoport * 74a2974133SMike Rapoport * Once memblock is setup the memory can be allocated using one of the 75a2974133SMike Rapoport * API variants: 76a2974133SMike Rapoport * 776e5af9a8SCao jin * * memblock_phys_alloc*() - these functions return the **physical** 786e5af9a8SCao jin * address of the allocated memory 796e5af9a8SCao jin * * memblock_alloc*() - these functions return the **virtual** address 806e5af9a8SCao jin * of the allocated memory. 81a2974133SMike Rapoport * 82df1758d9SEthon Paul * Note, that both API variants use implicit assumptions about allowed 83a2974133SMike Rapoport * memory ranges and the fallback methods. Consult the documentation 846e5af9a8SCao jin * of memblock_alloc_internal() and memblock_alloc_range_nid() 856e5af9a8SCao jin * functions for more elaborate description. 863e039c5cSMike Rapoport * 876e5af9a8SCao jin * As the system boot progresses, the architecture specific mem_init() 886e5af9a8SCao jin * function frees all the memory to the buddy page allocator. 893e039c5cSMike Rapoport * 906e5af9a8SCao jin * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the 9177649905SDavid Hildenbrand * memblock data structures (except "physmem") will be discarded after the 9277649905SDavid Hildenbrand * system initialization completes. 933e039c5cSMike Rapoport */ 943e039c5cSMike Rapoport 95bda49a81SMike Rapoport #ifndef CONFIG_NEED_MULTIPLE_NODES 96bda49a81SMike Rapoport struct pglist_data __refdata contig_page_data; 97bda49a81SMike Rapoport EXPORT_SYMBOL(contig_page_data); 98bda49a81SMike Rapoport #endif 99bda49a81SMike Rapoport 100bda49a81SMike Rapoport unsigned long max_low_pfn; 101bda49a81SMike Rapoport unsigned long min_low_pfn; 102bda49a81SMike Rapoport unsigned long max_pfn; 103bda49a81SMike Rapoport unsigned long long max_possible_pfn; 104bda49a81SMike Rapoport 105fe091c20STejun Heo static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 1068a5b403dSArd Biesheuvel static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; 10770210ed9SPhilipp Hachtmann #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 10877649905SDavid Hildenbrand static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS]; 10970210ed9SPhilipp Hachtmann #endif 110fe091c20STejun Heo 111fe091c20STejun Heo struct memblock memblock __initdata_memblock = { 112fe091c20STejun Heo .memory.regions = memblock_memory_init_regions, 113fe091c20STejun Heo .memory.cnt = 1, /* empty dummy entry */ 114fe091c20STejun Heo .memory.max = INIT_MEMBLOCK_REGIONS, 1150262d9c8SHeiko Carstens .memory.name = "memory", 116fe091c20STejun Heo 117fe091c20STejun Heo .reserved.regions = memblock_reserved_init_regions, 118fe091c20STejun Heo .reserved.cnt = 1, /* empty dummy entry */ 1198a5b403dSArd Biesheuvel .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, 1200262d9c8SHeiko Carstens .reserved.name = "reserved", 121fe091c20STejun Heo 12279442ed1STang Chen .bottom_up = false, 123fe091c20STejun Heo .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 124fe091c20STejun Heo }; 12595f72d1eSYinghai Lu 12677649905SDavid Hildenbrand #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 12777649905SDavid Hildenbrand struct memblock_type physmem = { 12877649905SDavid Hildenbrand .regions = memblock_physmem_init_regions, 12977649905SDavid Hildenbrand .cnt = 1, /* empty dummy entry */ 13077649905SDavid Hildenbrand .max = INIT_PHYSMEM_REGIONS, 13177649905SDavid Hildenbrand .name = "physmem", 13277649905SDavid Hildenbrand }; 13377649905SDavid Hildenbrand #endif 13477649905SDavid Hildenbrand 1359f3d5eaaSMike Rapoport /* 1369f3d5eaaSMike Rapoport * keep a pointer to &memblock.memory in the text section to use it in 1379f3d5eaaSMike Rapoport * __next_mem_range() and its helpers. 1389f3d5eaaSMike Rapoport * For architectures that do not keep memblock data after init, this 1399f3d5eaaSMike Rapoport * pointer will be reset to NULL at memblock_discard() 1409f3d5eaaSMike Rapoport */ 1419f3d5eaaSMike Rapoport static __refdata struct memblock_type *memblock_memory = &memblock.memory; 1429f3d5eaaSMike Rapoport 143cd991db8SMike Rapoport #define for_each_memblock_type(i, memblock_type, rgn) \ 144cd991db8SMike Rapoport for (i = 0, rgn = &memblock_type->regions[0]; \ 145cd991db8SMike Rapoport i < memblock_type->cnt; \ 146cd991db8SMike Rapoport i++, rgn = &memblock_type->regions[i]) 147cd991db8SMike Rapoport 14887c55870SMike Rapoport #define memblock_dbg(fmt, ...) \ 14987c55870SMike Rapoport do { \ 15087c55870SMike Rapoport if (memblock_debug) \ 15187c55870SMike Rapoport pr_info(fmt, ##__VA_ARGS__); \ 15287c55870SMike Rapoport } while (0) 15387c55870SMike Rapoport 15487c55870SMike Rapoport static int memblock_debug __initdata_memblock; 155a3f5bafcSTony Luck static bool system_has_some_mirror __initdata_memblock = false; 1561aadc056STejun Heo static int memblock_can_resize __initdata_memblock; 157181eb394SGavin Shan static int memblock_memory_in_slab __initdata_memblock = 0; 158181eb394SGavin Shan static int memblock_reserved_in_slab __initdata_memblock = 0; 15995f72d1eSYinghai Lu 160c366ea89SMike Rapoport static enum memblock_flags __init_memblock choose_memblock_flags(void) 161a3f5bafcSTony Luck { 162a3f5bafcSTony Luck return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; 163a3f5bafcSTony Luck } 164a3f5bafcSTony Luck 165eb18f1b5STejun Heo /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 166eb18f1b5STejun Heo static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 167eb18f1b5STejun Heo { 1681c4bc43dSStefan Agner return *size = min(*size, PHYS_ADDR_MAX - base); 169eb18f1b5STejun Heo } 170eb18f1b5STejun Heo 1716ed311b2SBenjamin Herrenschmidt /* 1726ed311b2SBenjamin Herrenschmidt * Address comparison utilities 1736ed311b2SBenjamin Herrenschmidt */ 17410d06439SYinghai Lu static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 1752898cc4cSBenjamin Herrenschmidt phys_addr_t base2, phys_addr_t size2) 17695f72d1eSYinghai Lu { 17795f72d1eSYinghai Lu return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 17895f72d1eSYinghai Lu } 17995f72d1eSYinghai Lu 18095cf82ecSTang Chen bool __init_memblock memblock_overlaps_region(struct memblock_type *type, 1812d7d3eb2SH Hartley Sweeten phys_addr_t base, phys_addr_t size) 1826ed311b2SBenjamin Herrenschmidt { 1836ed311b2SBenjamin Herrenschmidt unsigned long i; 1846ed311b2SBenjamin Herrenschmidt 185f14516fbSAlexander Kuleshov for (i = 0; i < type->cnt; i++) 186f14516fbSAlexander Kuleshov if (memblock_addrs_overlap(base, size, type->regions[i].base, 187f14516fbSAlexander Kuleshov type->regions[i].size)) 1886ed311b2SBenjamin Herrenschmidt break; 189c5c5c9d1STang Chen return i < type->cnt; 1906ed311b2SBenjamin Herrenschmidt } 1916ed311b2SBenjamin Herrenschmidt 19247cec443SMike Rapoport /** 19379442ed1STang Chen * __memblock_find_range_bottom_up - find free area utility in bottom-up 19479442ed1STang Chen * @start: start of candidate range 19547cec443SMike Rapoport * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 19647cec443SMike Rapoport * %MEMBLOCK_ALLOC_ACCESSIBLE 19779442ed1STang Chen * @size: size of free area to find 19879442ed1STang Chen * @align: alignment of free area to find 199b1154233SGrygorii Strashko * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 200fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes 20179442ed1STang Chen * 20279442ed1STang Chen * Utility called from memblock_find_in_range_node(), find free area bottom-up. 20379442ed1STang Chen * 20447cec443SMike Rapoport * Return: 20579442ed1STang Chen * Found address on success, 0 on failure. 20679442ed1STang Chen */ 20779442ed1STang Chen static phys_addr_t __init_memblock 20879442ed1STang Chen __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 209fc6daaf9STony Luck phys_addr_t size, phys_addr_t align, int nid, 210e1720feeSMike Rapoport enum memblock_flags flags) 21179442ed1STang Chen { 21279442ed1STang Chen phys_addr_t this_start, this_end, cand; 21379442ed1STang Chen u64 i; 21479442ed1STang Chen 215fc6daaf9STony Luck for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { 21679442ed1STang Chen this_start = clamp(this_start, start, end); 21779442ed1STang Chen this_end = clamp(this_end, start, end); 21879442ed1STang Chen 21979442ed1STang Chen cand = round_up(this_start, align); 22079442ed1STang Chen if (cand < this_end && this_end - cand >= size) 22179442ed1STang Chen return cand; 22279442ed1STang Chen } 22379442ed1STang Chen 22479442ed1STang Chen return 0; 22579442ed1STang Chen } 22679442ed1STang Chen 2277bd0b0f0STejun Heo /** 2281402899eSTang Chen * __memblock_find_range_top_down - find free area utility, in top-down 2291402899eSTang Chen * @start: start of candidate range 23047cec443SMike Rapoport * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 23147cec443SMike Rapoport * %MEMBLOCK_ALLOC_ACCESSIBLE 2321402899eSTang Chen * @size: size of free area to find 2331402899eSTang Chen * @align: alignment of free area to find 234b1154233SGrygorii Strashko * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 235fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes 2361402899eSTang Chen * 2371402899eSTang Chen * Utility called from memblock_find_in_range_node(), find free area top-down. 2381402899eSTang Chen * 23947cec443SMike Rapoport * Return: 24079442ed1STang Chen * Found address on success, 0 on failure. 2411402899eSTang Chen */ 2421402899eSTang Chen static phys_addr_t __init_memblock 2431402899eSTang Chen __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 244fc6daaf9STony Luck phys_addr_t size, phys_addr_t align, int nid, 245e1720feeSMike Rapoport enum memblock_flags flags) 2461402899eSTang Chen { 2471402899eSTang Chen phys_addr_t this_start, this_end, cand; 2481402899eSTang Chen u64 i; 2491402899eSTang Chen 250fc6daaf9STony Luck for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, 251fc6daaf9STony Luck NULL) { 2521402899eSTang Chen this_start = clamp(this_start, start, end); 2531402899eSTang Chen this_end = clamp(this_end, start, end); 2541402899eSTang Chen 2551402899eSTang Chen if (this_end < size) 2561402899eSTang Chen continue; 2571402899eSTang Chen 2581402899eSTang Chen cand = round_down(this_end - size, align); 2591402899eSTang Chen if (cand >= this_start) 2601402899eSTang Chen return cand; 2611402899eSTang Chen } 2621402899eSTang Chen 2631402899eSTang Chen return 0; 2641402899eSTang Chen } 2651402899eSTang Chen 2661402899eSTang Chen /** 2677bd0b0f0STejun Heo * memblock_find_in_range_node - find free area in given range and node 2687bd0b0f0STejun Heo * @size: size of free area to find 2697bd0b0f0STejun Heo * @align: alignment of free area to find 27087029ee9SGrygorii Strashko * @start: start of candidate range 27147cec443SMike Rapoport * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 27247cec443SMike Rapoport * %MEMBLOCK_ALLOC_ACCESSIBLE 273b1154233SGrygorii Strashko * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 274fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes 2757bd0b0f0STejun Heo * 2767bd0b0f0STejun Heo * Find @size free area aligned to @align in the specified range and node. 2777bd0b0f0STejun Heo * 27879442ed1STang Chen * When allocation direction is bottom-up, the @start should be greater 27979442ed1STang Chen * than the end of the kernel image. Otherwise, it will be trimmed. The 28079442ed1STang Chen * reason is that we want the bottom-up allocation just near the kernel 28179442ed1STang Chen * image so it is highly likely that the allocated memory and the kernel 28279442ed1STang Chen * will reside in the same node. 28379442ed1STang Chen * 28479442ed1STang Chen * If bottom-up allocation failed, will try to allocate memory top-down. 28579442ed1STang Chen * 28647cec443SMike Rapoport * Return: 28779442ed1STang Chen * Found address on success, 0 on failure. 2886ed311b2SBenjamin Herrenschmidt */ 289c366ea89SMike Rapoport static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 29087029ee9SGrygorii Strashko phys_addr_t align, phys_addr_t start, 291e1720feeSMike Rapoport phys_addr_t end, int nid, 292e1720feeSMike Rapoport enum memblock_flags flags) 293f7210e6cSTang Chen { 2940cfb8f0cSTang Chen phys_addr_t kernel_end, ret; 29579442ed1STang Chen 296f7210e6cSTang Chen /* pump up @end */ 297fed84c78SQian Cai if (end == MEMBLOCK_ALLOC_ACCESSIBLE || 298fed84c78SQian Cai end == MEMBLOCK_ALLOC_KASAN) 299f7210e6cSTang Chen end = memblock.current_limit; 300f7210e6cSTang Chen 301f7210e6cSTang Chen /* avoid allocating the first page */ 302f7210e6cSTang Chen start = max_t(phys_addr_t, start, PAGE_SIZE); 303f7210e6cSTang Chen end = max(start, end); 30479442ed1STang Chen kernel_end = __pa_symbol(_end); 30579442ed1STang Chen 30679442ed1STang Chen /* 30779442ed1STang Chen * try bottom-up allocation only when bottom-up mode 30879442ed1STang Chen * is set and @end is above the kernel image. 30979442ed1STang Chen */ 31079442ed1STang Chen if (memblock_bottom_up() && end > kernel_end) { 31179442ed1STang Chen phys_addr_t bottom_up_start; 31279442ed1STang Chen 31379442ed1STang Chen /* make sure we will allocate above the kernel */ 31479442ed1STang Chen bottom_up_start = max(start, kernel_end); 31579442ed1STang Chen 31679442ed1STang Chen /* ok, try bottom-up allocation first */ 31779442ed1STang Chen ret = __memblock_find_range_bottom_up(bottom_up_start, end, 318fc6daaf9STony Luck size, align, nid, flags); 31979442ed1STang Chen if (ret) 32079442ed1STang Chen return ret; 32179442ed1STang Chen 32279442ed1STang Chen /* 32379442ed1STang Chen * we always limit bottom-up allocation above the kernel, 32479442ed1STang Chen * but top-down allocation doesn't have the limit, so 32579442ed1STang Chen * retrying top-down allocation may succeed when bottom-up 32679442ed1STang Chen * allocation failed. 32779442ed1STang Chen * 32879442ed1STang Chen * bottom-up allocation is expected to be fail very rarely, 32979442ed1STang Chen * so we use WARN_ONCE() here to see the stack trace if 33079442ed1STang Chen * fail happens. 33179442ed1STang Chen */ 332e3d301caSMichal Hocko WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), 333e3d301caSMichal Hocko "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); 33479442ed1STang Chen } 335f7210e6cSTang Chen 336fc6daaf9STony Luck return __memblock_find_range_top_down(start, end, size, align, nid, 337fc6daaf9STony Luck flags); 338f7210e6cSTang Chen } 3396ed311b2SBenjamin Herrenschmidt 3407bd0b0f0STejun Heo /** 3417bd0b0f0STejun Heo * memblock_find_in_range - find free area in given range 3427bd0b0f0STejun Heo * @start: start of candidate range 34347cec443SMike Rapoport * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 34447cec443SMike Rapoport * %MEMBLOCK_ALLOC_ACCESSIBLE 3457bd0b0f0STejun Heo * @size: size of free area to find 3467bd0b0f0STejun Heo * @align: alignment of free area to find 3477bd0b0f0STejun Heo * 3487bd0b0f0STejun Heo * Find @size free area aligned to @align in the specified range. 3497bd0b0f0STejun Heo * 35047cec443SMike Rapoport * Return: 35179442ed1STang Chen * Found address on success, 0 on failure. 3527bd0b0f0STejun Heo */ 3537bd0b0f0STejun Heo phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 3547bd0b0f0STejun Heo phys_addr_t end, phys_addr_t size, 3557bd0b0f0STejun Heo phys_addr_t align) 3567bd0b0f0STejun Heo { 357a3f5bafcSTony Luck phys_addr_t ret; 358e1720feeSMike Rapoport enum memblock_flags flags = choose_memblock_flags(); 359a3f5bafcSTony Luck 360a3f5bafcSTony Luck again: 361a3f5bafcSTony Luck ret = memblock_find_in_range_node(size, align, start, end, 362a3f5bafcSTony Luck NUMA_NO_NODE, flags); 363a3f5bafcSTony Luck 364a3f5bafcSTony Luck if (!ret && (flags & MEMBLOCK_MIRROR)) { 365a3f5bafcSTony Luck pr_warn("Could not allocate %pap bytes of mirrored memory\n", 366a3f5bafcSTony Luck &size); 367a3f5bafcSTony Luck flags &= ~MEMBLOCK_MIRROR; 368a3f5bafcSTony Luck goto again; 369a3f5bafcSTony Luck } 370a3f5bafcSTony Luck 371a3f5bafcSTony Luck return ret; 3727bd0b0f0STejun Heo } 3737bd0b0f0STejun Heo 37410d06439SYinghai Lu static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 37595f72d1eSYinghai Lu { 3761440c4e2STejun Heo type->total_size -= type->regions[r].size; 3777c0caeb8STejun Heo memmove(&type->regions[r], &type->regions[r + 1], 3787c0caeb8STejun Heo (type->cnt - (r + 1)) * sizeof(type->regions[r])); 379e3239ff9SBenjamin Herrenschmidt type->cnt--; 38095f72d1eSYinghai Lu 3818f7a6605SBenjamin Herrenschmidt /* Special case for empty arrays */ 3828f7a6605SBenjamin Herrenschmidt if (type->cnt == 0) { 3831440c4e2STejun Heo WARN_ON(type->total_size != 0); 3848f7a6605SBenjamin Herrenschmidt type->cnt = 1; 3858f7a6605SBenjamin Herrenschmidt type->regions[0].base = 0; 3868f7a6605SBenjamin Herrenschmidt type->regions[0].size = 0; 38766a20757STang Chen type->regions[0].flags = 0; 3887c0caeb8STejun Heo memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 3898f7a6605SBenjamin Herrenschmidt } 39095f72d1eSYinghai Lu } 39195f72d1eSYinghai Lu 392350e88baSMike Rapoport #ifndef CONFIG_ARCH_KEEP_MEMBLOCK 3933010f876SPavel Tatashin /** 39447cec443SMike Rapoport * memblock_discard - discard memory and reserved arrays if they were allocated 3953010f876SPavel Tatashin */ 3963010f876SPavel Tatashin void __init memblock_discard(void) 39729f67386SYinghai Lu { 3983010f876SPavel Tatashin phys_addr_t addr, size; 39929f67386SYinghai Lu 4003010f876SPavel Tatashin if (memblock.reserved.regions != memblock_reserved_init_regions) { 4013010f876SPavel Tatashin addr = __pa(memblock.reserved.regions); 4023010f876SPavel Tatashin size = PAGE_ALIGN(sizeof(struct memblock_region) * 40329f67386SYinghai Lu memblock.reserved.max); 4043010f876SPavel Tatashin __memblock_free_late(addr, size); 40529f67386SYinghai Lu } 40629f67386SYinghai Lu 40791b540f9SPavel Tatashin if (memblock.memory.regions != memblock_memory_init_regions) { 4083010f876SPavel Tatashin addr = __pa(memblock.memory.regions); 4093010f876SPavel Tatashin size = PAGE_ALIGN(sizeof(struct memblock_region) * 4105e270e25SPhilipp Hachtmann memblock.memory.max); 4113010f876SPavel Tatashin __memblock_free_late(addr, size); 4125e270e25SPhilipp Hachtmann } 4139f3d5eaaSMike Rapoport 4149f3d5eaaSMike Rapoport memblock_memory = NULL; 4153010f876SPavel Tatashin } 4165e270e25SPhilipp Hachtmann #endif 4175e270e25SPhilipp Hachtmann 41848c3b583SGreg Pearson /** 41948c3b583SGreg Pearson * memblock_double_array - double the size of the memblock regions array 42048c3b583SGreg Pearson * @type: memblock type of the regions array being doubled 42148c3b583SGreg Pearson * @new_area_start: starting address of memory range to avoid overlap with 42248c3b583SGreg Pearson * @new_area_size: size of memory range to avoid overlap with 42348c3b583SGreg Pearson * 42448c3b583SGreg Pearson * Double the size of the @type regions array. If memblock is being used to 42548c3b583SGreg Pearson * allocate memory for a new reserved regions array and there is a previously 42648c3b583SGreg Pearson * allocated memory range [@new_area_start, @new_area_start + @new_area_size] 42748c3b583SGreg Pearson * waiting to be reserved, ensure the memory used by the new array does 42848c3b583SGreg Pearson * not overlap. 42948c3b583SGreg Pearson * 43047cec443SMike Rapoport * Return: 43148c3b583SGreg Pearson * 0 on success, -1 on failure. 43248c3b583SGreg Pearson */ 43348c3b583SGreg Pearson static int __init_memblock memblock_double_array(struct memblock_type *type, 43448c3b583SGreg Pearson phys_addr_t new_area_start, 43548c3b583SGreg Pearson phys_addr_t new_area_size) 436142b45a7SBenjamin Herrenschmidt { 437142b45a7SBenjamin Herrenschmidt struct memblock_region *new_array, *old_array; 43829f67386SYinghai Lu phys_addr_t old_alloc_size, new_alloc_size; 439a36aab89SMike Rapoport phys_addr_t old_size, new_size, addr, new_end; 440142b45a7SBenjamin Herrenschmidt int use_slab = slab_is_available(); 441181eb394SGavin Shan int *in_slab; 442142b45a7SBenjamin Herrenschmidt 443142b45a7SBenjamin Herrenschmidt /* We don't allow resizing until we know about the reserved regions 444142b45a7SBenjamin Herrenschmidt * of memory that aren't suitable for allocation 445142b45a7SBenjamin Herrenschmidt */ 446142b45a7SBenjamin Herrenschmidt if (!memblock_can_resize) 447142b45a7SBenjamin Herrenschmidt return -1; 448142b45a7SBenjamin Herrenschmidt 449142b45a7SBenjamin Herrenschmidt /* Calculate new doubled size */ 450142b45a7SBenjamin Herrenschmidt old_size = type->max * sizeof(struct memblock_region); 451142b45a7SBenjamin Herrenschmidt new_size = old_size << 1; 45229f67386SYinghai Lu /* 45329f67386SYinghai Lu * We need to allocated new one align to PAGE_SIZE, 45429f67386SYinghai Lu * so we can free them completely later. 45529f67386SYinghai Lu */ 45629f67386SYinghai Lu old_alloc_size = PAGE_ALIGN(old_size); 45729f67386SYinghai Lu new_alloc_size = PAGE_ALIGN(new_size); 458142b45a7SBenjamin Herrenschmidt 459181eb394SGavin Shan /* Retrieve the slab flag */ 460181eb394SGavin Shan if (type == &memblock.memory) 461181eb394SGavin Shan in_slab = &memblock_memory_in_slab; 462181eb394SGavin Shan else 463181eb394SGavin Shan in_slab = &memblock_reserved_in_slab; 464181eb394SGavin Shan 465a2974133SMike Rapoport /* Try to find some space for it */ 466142b45a7SBenjamin Herrenschmidt if (use_slab) { 467142b45a7SBenjamin Herrenschmidt new_array = kmalloc(new_size, GFP_KERNEL); 4681f5026a7STejun Heo addr = new_array ? __pa(new_array) : 0; 4694e2f0775SGavin Shan } else { 47048c3b583SGreg Pearson /* only exclude range when trying to double reserved.regions */ 47148c3b583SGreg Pearson if (type != &memblock.reserved) 47248c3b583SGreg Pearson new_area_start = new_area_size = 0; 47348c3b583SGreg Pearson 47448c3b583SGreg Pearson addr = memblock_find_in_range(new_area_start + new_area_size, 47548c3b583SGreg Pearson memblock.current_limit, 47629f67386SYinghai Lu new_alloc_size, PAGE_SIZE); 47748c3b583SGreg Pearson if (!addr && new_area_size) 47848c3b583SGreg Pearson addr = memblock_find_in_range(0, 47948c3b583SGreg Pearson min(new_area_start, memblock.current_limit), 48029f67386SYinghai Lu new_alloc_size, PAGE_SIZE); 48148c3b583SGreg Pearson 48215674868SSachin Kamat new_array = addr ? __va(addr) : NULL; 4834e2f0775SGavin Shan } 4841f5026a7STejun Heo if (!addr) { 485142b45a7SBenjamin Herrenschmidt pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 4860262d9c8SHeiko Carstens type->name, type->max, type->max * 2); 487142b45a7SBenjamin Herrenschmidt return -1; 488142b45a7SBenjamin Herrenschmidt } 489142b45a7SBenjamin Herrenschmidt 490a36aab89SMike Rapoport new_end = addr + new_size - 1; 491a36aab89SMike Rapoport memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]", 492a36aab89SMike Rapoport type->name, type->max * 2, &addr, &new_end); 493ea9e4376SYinghai Lu 494fd07383bSAndrew Morton /* 495fd07383bSAndrew Morton * Found space, we now need to move the array over before we add the 496fd07383bSAndrew Morton * reserved region since it may be our reserved array itself that is 497fd07383bSAndrew Morton * full. 498142b45a7SBenjamin Herrenschmidt */ 499142b45a7SBenjamin Herrenschmidt memcpy(new_array, type->regions, old_size); 500142b45a7SBenjamin Herrenschmidt memset(new_array + type->max, 0, old_size); 501142b45a7SBenjamin Herrenschmidt old_array = type->regions; 502142b45a7SBenjamin Herrenschmidt type->regions = new_array; 503142b45a7SBenjamin Herrenschmidt type->max <<= 1; 504142b45a7SBenjamin Herrenschmidt 505fd07383bSAndrew Morton /* Free old array. We needn't free it if the array is the static one */ 506181eb394SGavin Shan if (*in_slab) 507181eb394SGavin Shan kfree(old_array); 508181eb394SGavin Shan else if (old_array != memblock_memory_init_regions && 509142b45a7SBenjamin Herrenschmidt old_array != memblock_reserved_init_regions) 51029f67386SYinghai Lu memblock_free(__pa(old_array), old_alloc_size); 511142b45a7SBenjamin Herrenschmidt 512fd07383bSAndrew Morton /* 513fd07383bSAndrew Morton * Reserve the new array if that comes from the memblock. Otherwise, we 514fd07383bSAndrew Morton * needn't do it 515181eb394SGavin Shan */ 516181eb394SGavin Shan if (!use_slab) 51729f67386SYinghai Lu BUG_ON(memblock_reserve(addr, new_alloc_size)); 518181eb394SGavin Shan 519181eb394SGavin Shan /* Update slab flag */ 520181eb394SGavin Shan *in_slab = use_slab; 521181eb394SGavin Shan 522142b45a7SBenjamin Herrenschmidt return 0; 523142b45a7SBenjamin Herrenschmidt } 524142b45a7SBenjamin Herrenschmidt 525784656f9STejun Heo /** 526784656f9STejun Heo * memblock_merge_regions - merge neighboring compatible regions 527784656f9STejun Heo * @type: memblock type to scan 528784656f9STejun Heo * 529784656f9STejun Heo * Scan @type and merge neighboring compatible regions. 530784656f9STejun Heo */ 531784656f9STejun Heo static void __init_memblock memblock_merge_regions(struct memblock_type *type) 532784656f9STejun Heo { 533784656f9STejun Heo int i = 0; 534784656f9STejun Heo 535784656f9STejun Heo /* cnt never goes below 1 */ 536784656f9STejun Heo while (i < type->cnt - 1) { 537784656f9STejun Heo struct memblock_region *this = &type->regions[i]; 538784656f9STejun Heo struct memblock_region *next = &type->regions[i + 1]; 539784656f9STejun Heo 5407c0caeb8STejun Heo if (this->base + this->size != next->base || 5417c0caeb8STejun Heo memblock_get_region_node(this) != 54266a20757STang Chen memblock_get_region_node(next) || 54366a20757STang Chen this->flags != next->flags) { 544784656f9STejun Heo BUG_ON(this->base + this->size > next->base); 545784656f9STejun Heo i++; 546784656f9STejun Heo continue; 547784656f9STejun Heo } 548784656f9STejun Heo 549784656f9STejun Heo this->size += next->size; 550c0232ae8SLin Feng /* move forward from next + 1, index of which is i + 2 */ 551c0232ae8SLin Feng memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); 552784656f9STejun Heo type->cnt--; 553784656f9STejun Heo } 554784656f9STejun Heo } 555784656f9STejun Heo 556784656f9STejun Heo /** 557784656f9STejun Heo * memblock_insert_region - insert new memblock region 558784656f9STejun Heo * @type: memblock type to insert into 559784656f9STejun Heo * @idx: index for the insertion point 560784656f9STejun Heo * @base: base address of the new region 561784656f9STejun Heo * @size: size of the new region 562209ff86dSTang Chen * @nid: node id of the new region 56366a20757STang Chen * @flags: flags of the new region 564784656f9STejun Heo * 565784656f9STejun Heo * Insert new memblock region [@base, @base + @size) into @type at @idx. 566412d0008SAlexander Kuleshov * @type must already have extra room to accommodate the new region. 567784656f9STejun Heo */ 568784656f9STejun Heo static void __init_memblock memblock_insert_region(struct memblock_type *type, 569784656f9STejun Heo int idx, phys_addr_t base, 57066a20757STang Chen phys_addr_t size, 571e1720feeSMike Rapoport int nid, 572e1720feeSMike Rapoport enum memblock_flags flags) 573784656f9STejun Heo { 574784656f9STejun Heo struct memblock_region *rgn = &type->regions[idx]; 575784656f9STejun Heo 576784656f9STejun Heo BUG_ON(type->cnt >= type->max); 577784656f9STejun Heo memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 578784656f9STejun Heo rgn->base = base; 579784656f9STejun Heo rgn->size = size; 58066a20757STang Chen rgn->flags = flags; 5817c0caeb8STejun Heo memblock_set_region_node(rgn, nid); 582784656f9STejun Heo type->cnt++; 5831440c4e2STejun Heo type->total_size += size; 584784656f9STejun Heo } 585784656f9STejun Heo 586784656f9STejun Heo /** 587f1af9d3aSPhilipp Hachtmann * memblock_add_range - add new memblock region 588784656f9STejun Heo * @type: memblock type to add new region into 589784656f9STejun Heo * @base: base address of the new region 590784656f9STejun Heo * @size: size of the new region 5917fb0bc3fSTejun Heo * @nid: nid of the new region 59266a20757STang Chen * @flags: flags of the new region 593784656f9STejun Heo * 594784656f9STejun Heo * Add new memblock region [@base, @base + @size) into @type. The new region 595784656f9STejun Heo * is allowed to overlap with existing ones - overlaps don't affect already 596784656f9STejun Heo * existing regions. @type is guaranteed to be minimal (all neighbouring 597784656f9STejun Heo * compatible regions are merged) after the addition. 598784656f9STejun Heo * 59947cec443SMike Rapoport * Return: 600784656f9STejun Heo * 0 on success, -errno on failure. 601784656f9STejun Heo */ 60202634a44SAnshuman Khandual static int __init_memblock memblock_add_range(struct memblock_type *type, 60366a20757STang Chen phys_addr_t base, phys_addr_t size, 604e1720feeSMike Rapoport int nid, enum memblock_flags flags) 60595f72d1eSYinghai Lu { 606784656f9STejun Heo bool insert = false; 607eb18f1b5STejun Heo phys_addr_t obase = base; 608eb18f1b5STejun Heo phys_addr_t end = base + memblock_cap_size(base, &size); 6098c9c1701SAlexander Kuleshov int idx, nr_new; 6108c9c1701SAlexander Kuleshov struct memblock_region *rgn; 61195f72d1eSYinghai Lu 612b3dc627cSTejun Heo if (!size) 613b3dc627cSTejun Heo return 0; 614b3dc627cSTejun Heo 615784656f9STejun Heo /* special case for empty array */ 616784656f9STejun Heo if (type->regions[0].size == 0) { 6171440c4e2STejun Heo WARN_ON(type->cnt != 1 || type->total_size); 618784656f9STejun Heo type->regions[0].base = base; 619784656f9STejun Heo type->regions[0].size = size; 62066a20757STang Chen type->regions[0].flags = flags; 6217fb0bc3fSTejun Heo memblock_set_region_node(&type->regions[0], nid); 6221440c4e2STejun Heo type->total_size = size; 623784656f9STejun Heo return 0; 624784656f9STejun Heo } 625784656f9STejun Heo repeat: 626784656f9STejun Heo /* 627784656f9STejun Heo * The following is executed twice. Once with %false @insert and 628784656f9STejun Heo * then with %true. The first counts the number of regions needed 629412d0008SAlexander Kuleshov * to accommodate the new area. The second actually inserts them. 630784656f9STejun Heo */ 631784656f9STejun Heo base = obase; 632784656f9STejun Heo nr_new = 0; 633784656f9STejun Heo 63466e8b438SGioh Kim for_each_memblock_type(idx, type, rgn) { 635784656f9STejun Heo phys_addr_t rbase = rgn->base; 636784656f9STejun Heo phys_addr_t rend = rbase + rgn->size; 6378f7a6605SBenjamin Herrenschmidt 638784656f9STejun Heo if (rbase >= end) 6398f7a6605SBenjamin Herrenschmidt break; 640784656f9STejun Heo if (rend <= base) 641784656f9STejun Heo continue; 642784656f9STejun Heo /* 643784656f9STejun Heo * @rgn overlaps. If it separates the lower part of new 644784656f9STejun Heo * area, insert that portion. 6458f7a6605SBenjamin Herrenschmidt */ 646784656f9STejun Heo if (rbase > base) { 6473f08a302SMike Rapoport #ifdef CONFIG_NEED_MULTIPLE_NODES 648c0a29498SWei Yang WARN_ON(nid != memblock_get_region_node(rgn)); 649c0a29498SWei Yang #endif 6504fcab5f4SWei Yang WARN_ON(flags != rgn->flags); 651784656f9STejun Heo nr_new++; 652784656f9STejun Heo if (insert) 6538c9c1701SAlexander Kuleshov memblock_insert_region(type, idx++, base, 65466a20757STang Chen rbase - base, nid, 65566a20757STang Chen flags); 656784656f9STejun Heo } 657784656f9STejun Heo /* area below @rend is dealt with, forget about it */ 658784656f9STejun Heo base = min(rend, end); 6598f7a6605SBenjamin Herrenschmidt } 6608f7a6605SBenjamin Herrenschmidt 661784656f9STejun Heo /* insert the remaining portion */ 662784656f9STejun Heo if (base < end) { 663784656f9STejun Heo nr_new++; 664784656f9STejun Heo if (insert) 6658c9c1701SAlexander Kuleshov memblock_insert_region(type, idx, base, end - base, 66666a20757STang Chen nid, flags); 6678f7a6605SBenjamin Herrenschmidt } 6688f7a6605SBenjamin Herrenschmidt 669ef3cc4dbSnimisolo if (!nr_new) 670ef3cc4dbSnimisolo return 0; 671ef3cc4dbSnimisolo 672784656f9STejun Heo /* 673784656f9STejun Heo * If this was the first round, resize array and repeat for actual 674784656f9STejun Heo * insertions; otherwise, merge and return. 6758f7a6605SBenjamin Herrenschmidt */ 676784656f9STejun Heo if (!insert) { 677784656f9STejun Heo while (type->cnt + nr_new > type->max) 67848c3b583SGreg Pearson if (memblock_double_array(type, obase, size) < 0) 679784656f9STejun Heo return -ENOMEM; 680784656f9STejun Heo insert = true; 681784656f9STejun Heo goto repeat; 68295f72d1eSYinghai Lu } else { 683784656f9STejun Heo memblock_merge_regions(type); 68495f72d1eSYinghai Lu return 0; 68595f72d1eSYinghai Lu } 686784656f9STejun Heo } 68795f72d1eSYinghai Lu 68848a833ccSMike Rapoport /** 68948a833ccSMike Rapoport * memblock_add_node - add new memblock region within a NUMA node 69048a833ccSMike Rapoport * @base: base address of the new region 69148a833ccSMike Rapoport * @size: size of the new region 69248a833ccSMike Rapoport * @nid: nid of the new region 69348a833ccSMike Rapoport * 69448a833ccSMike Rapoport * Add new memblock region [@base, @base + @size) to the "memory" 69548a833ccSMike Rapoport * type. See memblock_add_range() description for mode details 69648a833ccSMike Rapoport * 69748a833ccSMike Rapoport * Return: 69848a833ccSMike Rapoport * 0 on success, -errno on failure. 69948a833ccSMike Rapoport */ 7007fb0bc3fSTejun Heo int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 7017fb0bc3fSTejun Heo int nid) 7027fb0bc3fSTejun Heo { 703f1af9d3aSPhilipp Hachtmann return memblock_add_range(&memblock.memory, base, size, nid, 0); 7047fb0bc3fSTejun Heo } 7057fb0bc3fSTejun Heo 70648a833ccSMike Rapoport /** 70748a833ccSMike Rapoport * memblock_add - add new memblock region 70848a833ccSMike Rapoport * @base: base address of the new region 70948a833ccSMike Rapoport * @size: size of the new region 71048a833ccSMike Rapoport * 71148a833ccSMike Rapoport * Add new memblock region [@base, @base + @size) to the "memory" 71248a833ccSMike Rapoport * type. See memblock_add_range() description for mode details 71348a833ccSMike Rapoport * 71448a833ccSMike Rapoport * Return: 71548a833ccSMike Rapoport * 0 on success, -errno on failure. 71648a833ccSMike Rapoport */ 717f705ac4bSAlexander Kuleshov int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 7186a4055bcSAlexander Kuleshov { 7195d63f81cSMiles Chen phys_addr_t end = base + size - 1; 7205d63f81cSMiles Chen 721a090d711SAnshuman Khandual memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 7225d63f81cSMiles Chen &base, &end, (void *)_RET_IP_); 7236a4055bcSAlexander Kuleshov 724f705ac4bSAlexander Kuleshov return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); 72595f72d1eSYinghai Lu } 72695f72d1eSYinghai Lu 7276a9ceb31STejun Heo /** 7286a9ceb31STejun Heo * memblock_isolate_range - isolate given range into disjoint memblocks 7296a9ceb31STejun Heo * @type: memblock type to isolate range for 7306a9ceb31STejun Heo * @base: base of range to isolate 7316a9ceb31STejun Heo * @size: size of range to isolate 7326a9ceb31STejun Heo * @start_rgn: out parameter for the start of isolated region 7336a9ceb31STejun Heo * @end_rgn: out parameter for the end of isolated region 7346a9ceb31STejun Heo * 7356a9ceb31STejun Heo * Walk @type and ensure that regions don't cross the boundaries defined by 7366a9ceb31STejun Heo * [@base, @base + @size). Crossing regions are split at the boundaries, 7376a9ceb31STejun Heo * which may create at most two more regions. The index of the first 7386a9ceb31STejun Heo * region inside the range is returned in *@start_rgn and end in *@end_rgn. 7396a9ceb31STejun Heo * 74047cec443SMike Rapoport * Return: 7416a9ceb31STejun Heo * 0 on success, -errno on failure. 7426a9ceb31STejun Heo */ 7436a9ceb31STejun Heo static int __init_memblock memblock_isolate_range(struct memblock_type *type, 7446a9ceb31STejun Heo phys_addr_t base, phys_addr_t size, 7456a9ceb31STejun Heo int *start_rgn, int *end_rgn) 7466a9ceb31STejun Heo { 747eb18f1b5STejun Heo phys_addr_t end = base + memblock_cap_size(base, &size); 7488c9c1701SAlexander Kuleshov int idx; 7498c9c1701SAlexander Kuleshov struct memblock_region *rgn; 7506a9ceb31STejun Heo 7516a9ceb31STejun Heo *start_rgn = *end_rgn = 0; 7526a9ceb31STejun Heo 753b3dc627cSTejun Heo if (!size) 754b3dc627cSTejun Heo return 0; 755b3dc627cSTejun Heo 7566a9ceb31STejun Heo /* we'll create at most two more regions */ 7576a9ceb31STejun Heo while (type->cnt + 2 > type->max) 75848c3b583SGreg Pearson if (memblock_double_array(type, base, size) < 0) 7596a9ceb31STejun Heo return -ENOMEM; 7606a9ceb31STejun Heo 76166e8b438SGioh Kim for_each_memblock_type(idx, type, rgn) { 7626a9ceb31STejun Heo phys_addr_t rbase = rgn->base; 7636a9ceb31STejun Heo phys_addr_t rend = rbase + rgn->size; 7646a9ceb31STejun Heo 7656a9ceb31STejun Heo if (rbase >= end) 7666a9ceb31STejun Heo break; 7676a9ceb31STejun Heo if (rend <= base) 7686a9ceb31STejun Heo continue; 7696a9ceb31STejun Heo 7706a9ceb31STejun Heo if (rbase < base) { 7716a9ceb31STejun Heo /* 7726a9ceb31STejun Heo * @rgn intersects from below. Split and continue 7736a9ceb31STejun Heo * to process the next region - the new top half. 7746a9ceb31STejun Heo */ 7756a9ceb31STejun Heo rgn->base = base; 7761440c4e2STejun Heo rgn->size -= base - rbase; 7771440c4e2STejun Heo type->total_size -= base - rbase; 7788c9c1701SAlexander Kuleshov memblock_insert_region(type, idx, rbase, base - rbase, 77966a20757STang Chen memblock_get_region_node(rgn), 78066a20757STang Chen rgn->flags); 7816a9ceb31STejun Heo } else if (rend > end) { 7826a9ceb31STejun Heo /* 7836a9ceb31STejun Heo * @rgn intersects from above. Split and redo the 7846a9ceb31STejun Heo * current region - the new bottom half. 7856a9ceb31STejun Heo */ 7866a9ceb31STejun Heo rgn->base = end; 7871440c4e2STejun Heo rgn->size -= end - rbase; 7881440c4e2STejun Heo type->total_size -= end - rbase; 7898c9c1701SAlexander Kuleshov memblock_insert_region(type, idx--, rbase, end - rbase, 79066a20757STang Chen memblock_get_region_node(rgn), 79166a20757STang Chen rgn->flags); 7926a9ceb31STejun Heo } else { 7936a9ceb31STejun Heo /* @rgn is fully contained, record it */ 7946a9ceb31STejun Heo if (!*end_rgn) 7958c9c1701SAlexander Kuleshov *start_rgn = idx; 7968c9c1701SAlexander Kuleshov *end_rgn = idx + 1; 7976a9ceb31STejun Heo } 7986a9ceb31STejun Heo } 7996a9ceb31STejun Heo 8006a9ceb31STejun Heo return 0; 8016a9ceb31STejun Heo } 8026a9ceb31STejun Heo 80335bd16a2SAlexander Kuleshov static int __init_memblock memblock_remove_range(struct memblock_type *type, 8048f7a6605SBenjamin Herrenschmidt phys_addr_t base, phys_addr_t size) 80595f72d1eSYinghai Lu { 80671936180STejun Heo int start_rgn, end_rgn; 80771936180STejun Heo int i, ret; 80895f72d1eSYinghai Lu 80971936180STejun Heo ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 81071936180STejun Heo if (ret) 81171936180STejun Heo return ret; 81295f72d1eSYinghai Lu 81371936180STejun Heo for (i = end_rgn - 1; i >= start_rgn; i--) 81471936180STejun Heo memblock_remove_region(type, i); 81595f72d1eSYinghai Lu return 0; 81695f72d1eSYinghai Lu } 81795f72d1eSYinghai Lu 818581adcbeSTejun Heo int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 81995f72d1eSYinghai Lu { 82025cf23d7SMinchan Kim phys_addr_t end = base + size - 1; 82125cf23d7SMinchan Kim 822a090d711SAnshuman Khandual memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 82325cf23d7SMinchan Kim &base, &end, (void *)_RET_IP_); 82425cf23d7SMinchan Kim 825f1af9d3aSPhilipp Hachtmann return memblock_remove_range(&memblock.memory, base, size); 82695f72d1eSYinghai Lu } 82795f72d1eSYinghai Lu 8284d72868cSMike Rapoport /** 8294d72868cSMike Rapoport * memblock_free - free boot memory block 8304d72868cSMike Rapoport * @base: phys starting address of the boot memory block 8314d72868cSMike Rapoport * @size: size of the boot memory block in bytes 8324d72868cSMike Rapoport * 8334d72868cSMike Rapoport * Free boot memory block previously allocated by memblock_alloc_xx() API. 8344d72868cSMike Rapoport * The freeing memory will not be released to the buddy allocator. 8354d72868cSMike Rapoport */ 836581adcbeSTejun Heo int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 83795f72d1eSYinghai Lu { 8385d63f81cSMiles Chen phys_addr_t end = base + size - 1; 8395d63f81cSMiles Chen 840a090d711SAnshuman Khandual memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 8415d63f81cSMiles Chen &base, &end, (void *)_RET_IP_); 84224aa0788STejun Heo 8439099daedSCatalin Marinas kmemleak_free_part_phys(base, size); 844f1af9d3aSPhilipp Hachtmann return memblock_remove_range(&memblock.reserved, base, size); 84595f72d1eSYinghai Lu } 84695f72d1eSYinghai Lu 847f705ac4bSAlexander Kuleshov int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 84895f72d1eSYinghai Lu { 8495d63f81cSMiles Chen phys_addr_t end = base + size - 1; 8505d63f81cSMiles Chen 851a090d711SAnshuman Khandual memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 8525d63f81cSMiles Chen &base, &end, (void *)_RET_IP_); 85395f72d1eSYinghai Lu 854f705ac4bSAlexander Kuleshov return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); 85595f72d1eSYinghai Lu } 85695f72d1eSYinghai Lu 85702634a44SAnshuman Khandual #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 85802634a44SAnshuman Khandual int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size) 85902634a44SAnshuman Khandual { 86002634a44SAnshuman Khandual phys_addr_t end = base + size - 1; 86102634a44SAnshuman Khandual 86202634a44SAnshuman Khandual memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 86302634a44SAnshuman Khandual &base, &end, (void *)_RET_IP_); 86402634a44SAnshuman Khandual 86577649905SDavid Hildenbrand return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0); 86602634a44SAnshuman Khandual } 86702634a44SAnshuman Khandual #endif 86802634a44SAnshuman Khandual 86935fd0808STejun Heo /** 87047cec443SMike Rapoport * memblock_setclr_flag - set or clear flag for a memory region 87147cec443SMike Rapoport * @base: base address of the region 87247cec443SMike Rapoport * @size: size of the region 87347cec443SMike Rapoport * @set: set or clear the flag 87447cec443SMike Rapoport * @flag: the flag to udpate 87566b16edfSTang Chen * 8764308ce17STony Luck * This function isolates region [@base, @base + @size), and sets/clears flag 87766b16edfSTang Chen * 87847cec443SMike Rapoport * Return: 0 on success, -errno on failure. 87966b16edfSTang Chen */ 8804308ce17STony Luck static int __init_memblock memblock_setclr_flag(phys_addr_t base, 8814308ce17STony Luck phys_addr_t size, int set, int flag) 88266b16edfSTang Chen { 88366b16edfSTang Chen struct memblock_type *type = &memblock.memory; 88466b16edfSTang Chen int i, ret, start_rgn, end_rgn; 88566b16edfSTang Chen 88666b16edfSTang Chen ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 88766b16edfSTang Chen if (ret) 88866b16edfSTang Chen return ret; 88966b16edfSTang Chen 890fe145124SMike Rapoport for (i = start_rgn; i < end_rgn; i++) { 891fe145124SMike Rapoport struct memblock_region *r = &type->regions[i]; 892fe145124SMike Rapoport 8934308ce17STony Luck if (set) 894fe145124SMike Rapoport r->flags |= flag; 8954308ce17STony Luck else 896fe145124SMike Rapoport r->flags &= ~flag; 897fe145124SMike Rapoport } 89866b16edfSTang Chen 89966b16edfSTang Chen memblock_merge_regions(type); 90066b16edfSTang Chen return 0; 90166b16edfSTang Chen } 90266b16edfSTang Chen 90366b16edfSTang Chen /** 9044308ce17STony Luck * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. 9054308ce17STony Luck * @base: the base phys addr of the region 9064308ce17STony Luck * @size: the size of the region 9074308ce17STony Luck * 90847cec443SMike Rapoport * Return: 0 on success, -errno on failure. 9094308ce17STony Luck */ 9104308ce17STony Luck int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) 9114308ce17STony Luck { 9124308ce17STony Luck return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); 9134308ce17STony Luck } 9144308ce17STony Luck 9154308ce17STony Luck /** 91666b16edfSTang Chen * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. 91766b16edfSTang Chen * @base: the base phys addr of the region 91866b16edfSTang Chen * @size: the size of the region 91966b16edfSTang Chen * 92047cec443SMike Rapoport * Return: 0 on success, -errno on failure. 92166b16edfSTang Chen */ 92266b16edfSTang Chen int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) 92366b16edfSTang Chen { 9244308ce17STony Luck return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); 92566b16edfSTang Chen } 92666b16edfSTang Chen 92766b16edfSTang Chen /** 928a3f5bafcSTony Luck * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. 929a3f5bafcSTony Luck * @base: the base phys addr of the region 930a3f5bafcSTony Luck * @size: the size of the region 931a3f5bafcSTony Luck * 93247cec443SMike Rapoport * Return: 0 on success, -errno on failure. 933a3f5bafcSTony Luck */ 934a3f5bafcSTony Luck int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) 935a3f5bafcSTony Luck { 936a3f5bafcSTony Luck system_has_some_mirror = true; 937a3f5bafcSTony Luck 938a3f5bafcSTony Luck return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); 939a3f5bafcSTony Luck } 940a3f5bafcSTony Luck 941bf3d3cc5SArd Biesheuvel /** 942bf3d3cc5SArd Biesheuvel * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. 943bf3d3cc5SArd Biesheuvel * @base: the base phys addr of the region 944bf3d3cc5SArd Biesheuvel * @size: the size of the region 945bf3d3cc5SArd Biesheuvel * 94647cec443SMike Rapoport * Return: 0 on success, -errno on failure. 947bf3d3cc5SArd Biesheuvel */ 948bf3d3cc5SArd Biesheuvel int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) 949bf3d3cc5SArd Biesheuvel { 950bf3d3cc5SArd Biesheuvel return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); 951bf3d3cc5SArd Biesheuvel } 952a3f5bafcSTony Luck 953a3f5bafcSTony Luck /** 9544c546b8aSAKASHI Takahiro * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region. 9554c546b8aSAKASHI Takahiro * @base: the base phys addr of the region 9564c546b8aSAKASHI Takahiro * @size: the size of the region 9574c546b8aSAKASHI Takahiro * 95847cec443SMike Rapoport * Return: 0 on success, -errno on failure. 9594c546b8aSAKASHI Takahiro */ 9604c546b8aSAKASHI Takahiro int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size) 9614c546b8aSAKASHI Takahiro { 9624c546b8aSAKASHI Takahiro return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP); 9634c546b8aSAKASHI Takahiro } 9644c546b8aSAKASHI Takahiro 9659f3d5eaaSMike Rapoport static bool should_skip_region(struct memblock_type *type, 9669f3d5eaaSMike Rapoport struct memblock_region *m, 9679f3d5eaaSMike Rapoport int nid, int flags) 968c9a688a3SMike Rapoport { 969c9a688a3SMike Rapoport int m_nid = memblock_get_region_node(m); 970c9a688a3SMike Rapoport 9719f3d5eaaSMike Rapoport /* we never skip regions when iterating memblock.reserved or physmem */ 9729f3d5eaaSMike Rapoport if (type != memblock_memory) 9739f3d5eaaSMike Rapoport return false; 9749f3d5eaaSMike Rapoport 975c9a688a3SMike Rapoport /* only memory regions are associated with nodes, check it */ 976c9a688a3SMike Rapoport if (nid != NUMA_NO_NODE && nid != m_nid) 977c9a688a3SMike Rapoport return true; 978c9a688a3SMike Rapoport 979c9a688a3SMike Rapoport /* skip hotpluggable memory regions if needed */ 980c9a688a3SMike Rapoport if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 981c9a688a3SMike Rapoport return true; 982c9a688a3SMike Rapoport 983c9a688a3SMike Rapoport /* if we want mirror memory skip non-mirror memory regions */ 984c9a688a3SMike Rapoport if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 985c9a688a3SMike Rapoport return true; 986c9a688a3SMike Rapoport 987c9a688a3SMike Rapoport /* skip nomap memory unless we were asked for it explicitly */ 988c9a688a3SMike Rapoport if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) 989c9a688a3SMike Rapoport return true; 990c9a688a3SMike Rapoport 991c9a688a3SMike Rapoport return false; 992c9a688a3SMike Rapoport } 993c9a688a3SMike Rapoport 9948e7a7f86SRobin Holt /** 995a2974133SMike Rapoport * __next_mem_range - next function for for_each_free_mem_range() etc. 99635fd0808STejun Heo * @idx: pointer to u64 loop variable 997b1154233SGrygorii Strashko * @nid: node selector, %NUMA_NO_NODE for all nodes 998fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes 999f1af9d3aSPhilipp Hachtmann * @type_a: pointer to memblock_type from where the range is taken 1000f1af9d3aSPhilipp Hachtmann * @type_b: pointer to memblock_type which excludes memory from being taken 1001dad7557eSWanpeng Li * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 1002dad7557eSWanpeng Li * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 1003dad7557eSWanpeng Li * @out_nid: ptr to int for nid of the range, can be %NULL 100435fd0808STejun Heo * 1005f1af9d3aSPhilipp Hachtmann * Find the first area from *@idx which matches @nid, fill the out 100635fd0808STejun Heo * parameters, and update *@idx for the next iteration. The lower 32bit of 1007f1af9d3aSPhilipp Hachtmann * *@idx contains index into type_a and the upper 32bit indexes the 1008f1af9d3aSPhilipp Hachtmann * areas before each region in type_b. For example, if type_b regions 100935fd0808STejun Heo * look like the following, 101035fd0808STejun Heo * 101135fd0808STejun Heo * 0:[0-16), 1:[32-48), 2:[128-130) 101235fd0808STejun Heo * 101335fd0808STejun Heo * The upper 32bit indexes the following regions. 101435fd0808STejun Heo * 101535fd0808STejun Heo * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 101635fd0808STejun Heo * 101735fd0808STejun Heo * As both region arrays are sorted, the function advances the two indices 101835fd0808STejun Heo * in lockstep and returns each intersection. 101935fd0808STejun Heo */ 102077649905SDavid Hildenbrand void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, 1021f1af9d3aSPhilipp Hachtmann struct memblock_type *type_a, 102277649905SDavid Hildenbrand struct memblock_type *type_b, phys_addr_t *out_start, 102335fd0808STejun Heo phys_addr_t *out_end, int *out_nid) 102435fd0808STejun Heo { 1025f1af9d3aSPhilipp Hachtmann int idx_a = *idx & 0xffffffff; 1026f1af9d3aSPhilipp Hachtmann int idx_b = *idx >> 32; 1027b1154233SGrygorii Strashko 1028f1af9d3aSPhilipp Hachtmann if (WARN_ONCE(nid == MAX_NUMNODES, 1029f1af9d3aSPhilipp Hachtmann "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1030560dca27SGrygorii Strashko nid = NUMA_NO_NODE; 103135fd0808STejun Heo 1032f1af9d3aSPhilipp Hachtmann for (; idx_a < type_a->cnt; idx_a++) { 1033f1af9d3aSPhilipp Hachtmann struct memblock_region *m = &type_a->regions[idx_a]; 1034f1af9d3aSPhilipp Hachtmann 103535fd0808STejun Heo phys_addr_t m_start = m->base; 103635fd0808STejun Heo phys_addr_t m_end = m->base + m->size; 1037f1af9d3aSPhilipp Hachtmann int m_nid = memblock_get_region_node(m); 103835fd0808STejun Heo 10399f3d5eaaSMike Rapoport if (should_skip_region(type_a, m, nid, flags)) 1040bf3d3cc5SArd Biesheuvel continue; 1041bf3d3cc5SArd Biesheuvel 1042f1af9d3aSPhilipp Hachtmann if (!type_b) { 1043f1af9d3aSPhilipp Hachtmann if (out_start) 1044f1af9d3aSPhilipp Hachtmann *out_start = m_start; 1045f1af9d3aSPhilipp Hachtmann if (out_end) 1046f1af9d3aSPhilipp Hachtmann *out_end = m_end; 1047f1af9d3aSPhilipp Hachtmann if (out_nid) 1048f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 1049f1af9d3aSPhilipp Hachtmann idx_a++; 1050f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 1051f1af9d3aSPhilipp Hachtmann return; 1052f1af9d3aSPhilipp Hachtmann } 105335fd0808STejun Heo 1054f1af9d3aSPhilipp Hachtmann /* scan areas before each reservation */ 1055f1af9d3aSPhilipp Hachtmann for (; idx_b < type_b->cnt + 1; idx_b++) { 1056f1af9d3aSPhilipp Hachtmann struct memblock_region *r; 1057f1af9d3aSPhilipp Hachtmann phys_addr_t r_start; 1058f1af9d3aSPhilipp Hachtmann phys_addr_t r_end; 1059f1af9d3aSPhilipp Hachtmann 1060f1af9d3aSPhilipp Hachtmann r = &type_b->regions[idx_b]; 1061f1af9d3aSPhilipp Hachtmann r_start = idx_b ? r[-1].base + r[-1].size : 0; 1062f1af9d3aSPhilipp Hachtmann r_end = idx_b < type_b->cnt ? 10631c4bc43dSStefan Agner r->base : PHYS_ADDR_MAX; 1064f1af9d3aSPhilipp Hachtmann 1065f1af9d3aSPhilipp Hachtmann /* 1066f1af9d3aSPhilipp Hachtmann * if idx_b advanced past idx_a, 1067f1af9d3aSPhilipp Hachtmann * break out to advance idx_a 1068f1af9d3aSPhilipp Hachtmann */ 106935fd0808STejun Heo if (r_start >= m_end) 107035fd0808STejun Heo break; 107135fd0808STejun Heo /* if the two regions intersect, we're done */ 107235fd0808STejun Heo if (m_start < r_end) { 107335fd0808STejun Heo if (out_start) 1074f1af9d3aSPhilipp Hachtmann *out_start = 1075f1af9d3aSPhilipp Hachtmann max(m_start, r_start); 107635fd0808STejun Heo if (out_end) 107735fd0808STejun Heo *out_end = min(m_end, r_end); 107835fd0808STejun Heo if (out_nid) 1079f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 108035fd0808STejun Heo /* 1081f1af9d3aSPhilipp Hachtmann * The region which ends first is 1082f1af9d3aSPhilipp Hachtmann * advanced for the next iteration. 108335fd0808STejun Heo */ 108435fd0808STejun Heo if (m_end <= r_end) 1085f1af9d3aSPhilipp Hachtmann idx_a++; 108635fd0808STejun Heo else 1087f1af9d3aSPhilipp Hachtmann idx_b++; 1088f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 108935fd0808STejun Heo return; 109035fd0808STejun Heo } 109135fd0808STejun Heo } 109235fd0808STejun Heo } 109335fd0808STejun Heo 109435fd0808STejun Heo /* signal end of iteration */ 109535fd0808STejun Heo *idx = ULLONG_MAX; 109635fd0808STejun Heo } 109735fd0808STejun Heo 10987bd0b0f0STejun Heo /** 1099f1af9d3aSPhilipp Hachtmann * __next_mem_range_rev - generic next function for for_each_*_range_rev() 1100f1af9d3aSPhilipp Hachtmann * 11017bd0b0f0STejun Heo * @idx: pointer to u64 loop variable 1102ad5ea8cdSAlexander Kuleshov * @nid: node selector, %NUMA_NO_NODE for all nodes 1103fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes 1104f1af9d3aSPhilipp Hachtmann * @type_a: pointer to memblock_type from where the range is taken 1105f1af9d3aSPhilipp Hachtmann * @type_b: pointer to memblock_type which excludes memory from being taken 1106dad7557eSWanpeng Li * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 1107dad7557eSWanpeng Li * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 1108dad7557eSWanpeng Li * @out_nid: ptr to int for nid of the range, can be %NULL 11097bd0b0f0STejun Heo * 111047cec443SMike Rapoport * Finds the next range from type_a which is not marked as unsuitable 111147cec443SMike Rapoport * in type_b. 111247cec443SMike Rapoport * 1113f1af9d3aSPhilipp Hachtmann * Reverse of __next_mem_range(). 11147bd0b0f0STejun Heo */ 1115e1720feeSMike Rapoport void __init_memblock __next_mem_range_rev(u64 *idx, int nid, 1116e1720feeSMike Rapoport enum memblock_flags flags, 1117f1af9d3aSPhilipp Hachtmann struct memblock_type *type_a, 1118f1af9d3aSPhilipp Hachtmann struct memblock_type *type_b, 11197bd0b0f0STejun Heo phys_addr_t *out_start, 11207bd0b0f0STejun Heo phys_addr_t *out_end, int *out_nid) 11217bd0b0f0STejun Heo { 1122f1af9d3aSPhilipp Hachtmann int idx_a = *idx & 0xffffffff; 1123f1af9d3aSPhilipp Hachtmann int idx_b = *idx >> 32; 1124b1154233SGrygorii Strashko 1125560dca27SGrygorii Strashko if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1126560dca27SGrygorii Strashko nid = NUMA_NO_NODE; 11277bd0b0f0STejun Heo 11287bd0b0f0STejun Heo if (*idx == (u64)ULLONG_MAX) { 1129f1af9d3aSPhilipp Hachtmann idx_a = type_a->cnt - 1; 1130e47608abSzijun_hu if (type_b != NULL) 1131f1af9d3aSPhilipp Hachtmann idx_b = type_b->cnt; 1132e47608abSzijun_hu else 1133e47608abSzijun_hu idx_b = 0; 11347bd0b0f0STejun Heo } 11357bd0b0f0STejun Heo 1136f1af9d3aSPhilipp Hachtmann for (; idx_a >= 0; idx_a--) { 1137f1af9d3aSPhilipp Hachtmann struct memblock_region *m = &type_a->regions[idx_a]; 1138f1af9d3aSPhilipp Hachtmann 11397bd0b0f0STejun Heo phys_addr_t m_start = m->base; 11407bd0b0f0STejun Heo phys_addr_t m_end = m->base + m->size; 1141f1af9d3aSPhilipp Hachtmann int m_nid = memblock_get_region_node(m); 11427bd0b0f0STejun Heo 11439f3d5eaaSMike Rapoport if (should_skip_region(type_a, m, nid, flags)) 1144bf3d3cc5SArd Biesheuvel continue; 1145bf3d3cc5SArd Biesheuvel 1146f1af9d3aSPhilipp Hachtmann if (!type_b) { 1147f1af9d3aSPhilipp Hachtmann if (out_start) 1148f1af9d3aSPhilipp Hachtmann *out_start = m_start; 1149f1af9d3aSPhilipp Hachtmann if (out_end) 1150f1af9d3aSPhilipp Hachtmann *out_end = m_end; 1151f1af9d3aSPhilipp Hachtmann if (out_nid) 1152f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 1153fb399b48Szijun_hu idx_a--; 1154f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 1155f1af9d3aSPhilipp Hachtmann return; 1156f1af9d3aSPhilipp Hachtmann } 11577bd0b0f0STejun Heo 1158f1af9d3aSPhilipp Hachtmann /* scan areas before each reservation */ 1159f1af9d3aSPhilipp Hachtmann for (; idx_b >= 0; idx_b--) { 1160f1af9d3aSPhilipp Hachtmann struct memblock_region *r; 1161f1af9d3aSPhilipp Hachtmann phys_addr_t r_start; 1162f1af9d3aSPhilipp Hachtmann phys_addr_t r_end; 1163f1af9d3aSPhilipp Hachtmann 1164f1af9d3aSPhilipp Hachtmann r = &type_b->regions[idx_b]; 1165f1af9d3aSPhilipp Hachtmann r_start = idx_b ? r[-1].base + r[-1].size : 0; 1166f1af9d3aSPhilipp Hachtmann r_end = idx_b < type_b->cnt ? 11671c4bc43dSStefan Agner r->base : PHYS_ADDR_MAX; 1168f1af9d3aSPhilipp Hachtmann /* 1169f1af9d3aSPhilipp Hachtmann * if idx_b advanced past idx_a, 1170f1af9d3aSPhilipp Hachtmann * break out to advance idx_a 1171f1af9d3aSPhilipp Hachtmann */ 1172f1af9d3aSPhilipp Hachtmann 11737bd0b0f0STejun Heo if (r_end <= m_start) 11747bd0b0f0STejun Heo break; 11757bd0b0f0STejun Heo /* if the two regions intersect, we're done */ 11767bd0b0f0STejun Heo if (m_end > r_start) { 11777bd0b0f0STejun Heo if (out_start) 11787bd0b0f0STejun Heo *out_start = max(m_start, r_start); 11797bd0b0f0STejun Heo if (out_end) 11807bd0b0f0STejun Heo *out_end = min(m_end, r_end); 11817bd0b0f0STejun Heo if (out_nid) 1182f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 11837bd0b0f0STejun Heo if (m_start >= r_start) 1184f1af9d3aSPhilipp Hachtmann idx_a--; 11857bd0b0f0STejun Heo else 1186f1af9d3aSPhilipp Hachtmann idx_b--; 1187f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 11887bd0b0f0STejun Heo return; 11897bd0b0f0STejun Heo } 11907bd0b0f0STejun Heo } 11917bd0b0f0STejun Heo } 1192f1af9d3aSPhilipp Hachtmann /* signal end of iteration */ 11937bd0b0f0STejun Heo *idx = ULLONG_MAX; 11947bd0b0f0STejun Heo } 11957bd0b0f0STejun Heo 11967c0caeb8STejun Heo /* 119745e79815SChen Chang * Common iterator interface used to define for_each_mem_pfn_range(). 11987c0caeb8STejun Heo */ 11997c0caeb8STejun Heo void __init_memblock __next_mem_pfn_range(int *idx, int nid, 12007c0caeb8STejun Heo unsigned long *out_start_pfn, 12017c0caeb8STejun Heo unsigned long *out_end_pfn, int *out_nid) 12027c0caeb8STejun Heo { 12037c0caeb8STejun Heo struct memblock_type *type = &memblock.memory; 12047c0caeb8STejun Heo struct memblock_region *r; 1205d622abf7SMike Rapoport int r_nid; 12067c0caeb8STejun Heo 12077c0caeb8STejun Heo while (++*idx < type->cnt) { 12087c0caeb8STejun Heo r = &type->regions[*idx]; 1209d622abf7SMike Rapoport r_nid = memblock_get_region_node(r); 12107c0caeb8STejun Heo 12117c0caeb8STejun Heo if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 12127c0caeb8STejun Heo continue; 1213d622abf7SMike Rapoport if (nid == MAX_NUMNODES || nid == r_nid) 12147c0caeb8STejun Heo break; 12157c0caeb8STejun Heo } 12167c0caeb8STejun Heo if (*idx >= type->cnt) { 12177c0caeb8STejun Heo *idx = -1; 12187c0caeb8STejun Heo return; 12197c0caeb8STejun Heo } 12207c0caeb8STejun Heo 12217c0caeb8STejun Heo if (out_start_pfn) 12227c0caeb8STejun Heo *out_start_pfn = PFN_UP(r->base); 12237c0caeb8STejun Heo if (out_end_pfn) 12247c0caeb8STejun Heo *out_end_pfn = PFN_DOWN(r->base + r->size); 12257c0caeb8STejun Heo if (out_nid) 1226d622abf7SMike Rapoport *out_nid = r_nid; 12277c0caeb8STejun Heo } 12287c0caeb8STejun Heo 12297c0caeb8STejun Heo /** 12307c0caeb8STejun Heo * memblock_set_node - set node ID on memblock regions 12317c0caeb8STejun Heo * @base: base of area to set node ID for 12327c0caeb8STejun Heo * @size: size of area to set node ID for 1233e7e8de59STang Chen * @type: memblock type to set node ID for 12347c0caeb8STejun Heo * @nid: node ID to set 12357c0caeb8STejun Heo * 1236e7e8de59STang Chen * Set the nid of memblock @type regions in [@base, @base + @size) to @nid. 12377c0caeb8STejun Heo * Regions which cross the area boundaries are split as necessary. 12387c0caeb8STejun Heo * 123947cec443SMike Rapoport * Return: 12407c0caeb8STejun Heo * 0 on success, -errno on failure. 12417c0caeb8STejun Heo */ 12427c0caeb8STejun Heo int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 1243e7e8de59STang Chen struct memblock_type *type, int nid) 12447c0caeb8STejun Heo { 12453f08a302SMike Rapoport #ifdef CONFIG_NEED_MULTIPLE_NODES 12466a9ceb31STejun Heo int start_rgn, end_rgn; 12476a9ceb31STejun Heo int i, ret; 12487c0caeb8STejun Heo 12496a9ceb31STejun Heo ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 12506a9ceb31STejun Heo if (ret) 12516a9ceb31STejun Heo return ret; 12527c0caeb8STejun Heo 12536a9ceb31STejun Heo for (i = start_rgn; i < end_rgn; i++) 1254e9d24ad3SWanpeng Li memblock_set_region_node(&type->regions[i], nid); 12557c0caeb8STejun Heo 12567c0caeb8STejun Heo memblock_merge_regions(type); 12573f08a302SMike Rapoport #endif 12587c0caeb8STejun Heo return 0; 12597c0caeb8STejun Heo } 12603f08a302SMike Rapoport 1261837566e7SAlexander Duyck #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1262837566e7SAlexander Duyck /** 1263837566e7SAlexander Duyck * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone() 1264837566e7SAlexander Duyck * 1265837566e7SAlexander Duyck * @idx: pointer to u64 loop variable 1266837566e7SAlexander Duyck * @zone: zone in which all of the memory blocks reside 1267837566e7SAlexander Duyck * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL 1268837566e7SAlexander Duyck * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL 1269837566e7SAlexander Duyck * 1270837566e7SAlexander Duyck * This function is meant to be a zone/pfn specific wrapper for the 1271837566e7SAlexander Duyck * for_each_mem_range type iterators. Specifically they are used in the 1272837566e7SAlexander Duyck * deferred memory init routines and as such we were duplicating much of 1273837566e7SAlexander Duyck * this logic throughout the code. So instead of having it in multiple 1274837566e7SAlexander Duyck * locations it seemed like it would make more sense to centralize this to 1275837566e7SAlexander Duyck * one new iterator that does everything they need. 1276837566e7SAlexander Duyck */ 1277837566e7SAlexander Duyck void __init_memblock 1278837566e7SAlexander Duyck __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, 1279837566e7SAlexander Duyck unsigned long *out_spfn, unsigned long *out_epfn) 1280837566e7SAlexander Duyck { 1281837566e7SAlexander Duyck int zone_nid = zone_to_nid(zone); 1282837566e7SAlexander Duyck phys_addr_t spa, epa; 1283837566e7SAlexander Duyck int nid; 1284837566e7SAlexander Duyck 1285837566e7SAlexander Duyck __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, 1286837566e7SAlexander Duyck &memblock.memory, &memblock.reserved, 1287837566e7SAlexander Duyck &spa, &epa, &nid); 1288837566e7SAlexander Duyck 1289837566e7SAlexander Duyck while (*idx != U64_MAX) { 1290837566e7SAlexander Duyck unsigned long epfn = PFN_DOWN(epa); 1291837566e7SAlexander Duyck unsigned long spfn = PFN_UP(spa); 1292837566e7SAlexander Duyck 1293837566e7SAlexander Duyck /* 1294837566e7SAlexander Duyck * Verify the end is at least past the start of the zone and 1295837566e7SAlexander Duyck * that we have at least one PFN to initialize. 1296837566e7SAlexander Duyck */ 1297837566e7SAlexander Duyck if (zone->zone_start_pfn < epfn && spfn < epfn) { 1298837566e7SAlexander Duyck /* if we went too far just stop searching */ 1299837566e7SAlexander Duyck if (zone_end_pfn(zone) <= spfn) { 1300837566e7SAlexander Duyck *idx = U64_MAX; 1301837566e7SAlexander Duyck break; 1302837566e7SAlexander Duyck } 1303837566e7SAlexander Duyck 1304837566e7SAlexander Duyck if (out_spfn) 1305837566e7SAlexander Duyck *out_spfn = max(zone->zone_start_pfn, spfn); 1306837566e7SAlexander Duyck if (out_epfn) 1307837566e7SAlexander Duyck *out_epfn = min(zone_end_pfn(zone), epfn); 1308837566e7SAlexander Duyck 1309837566e7SAlexander Duyck return; 1310837566e7SAlexander Duyck } 1311837566e7SAlexander Duyck 1312837566e7SAlexander Duyck __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, 1313837566e7SAlexander Duyck &memblock.memory, &memblock.reserved, 1314837566e7SAlexander Duyck &spa, &epa, &nid); 1315837566e7SAlexander Duyck } 1316837566e7SAlexander Duyck 1317837566e7SAlexander Duyck /* signal end of iteration */ 1318837566e7SAlexander Duyck if (out_spfn) 1319837566e7SAlexander Duyck *out_spfn = ULONG_MAX; 1320837566e7SAlexander Duyck if (out_epfn) 1321837566e7SAlexander Duyck *out_epfn = 0; 1322837566e7SAlexander Duyck } 1323837566e7SAlexander Duyck 1324837566e7SAlexander Duyck #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 13257c0caeb8STejun Heo 132692d12f95SMike Rapoport /** 132792d12f95SMike Rapoport * memblock_alloc_range_nid - allocate boot memory block 132892d12f95SMike Rapoport * @size: size of memory block to be allocated in bytes 132992d12f95SMike Rapoport * @align: alignment of the region and block's size 133092d12f95SMike Rapoport * @start: the lower bound of the memory region to allocate (phys address) 133192d12f95SMike Rapoport * @end: the upper bound of the memory region to allocate (phys address) 133292d12f95SMike Rapoport * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 13330ac398b1SYunfeng Ye * @exact_nid: control the allocation fall back to other nodes 133492d12f95SMike Rapoport * 133592d12f95SMike Rapoport * The allocation is performed from memory region limited by 133695830666SCao jin * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE. 133792d12f95SMike Rapoport * 13380ac398b1SYunfeng Ye * If the specified node can not hold the requested memory and @exact_nid 13390ac398b1SYunfeng Ye * is false, the allocation falls back to any node in the system. 134092d12f95SMike Rapoport * 134192d12f95SMike Rapoport * For systems with memory mirroring, the allocation is attempted first 134292d12f95SMike Rapoport * from the regions with mirroring enabled and then retried from any 134392d12f95SMike Rapoport * memory region. 134492d12f95SMike Rapoport * 134592d12f95SMike Rapoport * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for 134692d12f95SMike Rapoport * allocated boot memory block, so that it is never reported as leaks. 134792d12f95SMike Rapoport * 134892d12f95SMike Rapoport * Return: 134992d12f95SMike Rapoport * Physical address of allocated memory block on success, %0 on failure. 135092d12f95SMike Rapoport */ 13518676af1fSAslan Bakirov phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 13522bfc2862SAkinobu Mita phys_addr_t align, phys_addr_t start, 13530ac398b1SYunfeng Ye phys_addr_t end, int nid, 13540ac398b1SYunfeng Ye bool exact_nid) 135595f72d1eSYinghai Lu { 135692d12f95SMike Rapoport enum memblock_flags flags = choose_memblock_flags(); 13576ed311b2SBenjamin Herrenschmidt phys_addr_t found; 135895f72d1eSYinghai Lu 135992d12f95SMike Rapoport if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 136092d12f95SMike Rapoport nid = NUMA_NO_NODE; 136192d12f95SMike Rapoport 13622f770806SMike Rapoport if (!align) { 13632f770806SMike Rapoport /* Can't use WARNs this early in boot on powerpc */ 13642f770806SMike Rapoport dump_stack(); 13652f770806SMike Rapoport align = SMP_CACHE_BYTES; 13662f770806SMike Rapoport } 13672f770806SMike Rapoport 136892d12f95SMike Rapoport again: 1369fc6daaf9STony Luck found = memblock_find_in_range_node(size, align, start, end, nid, 1370fc6daaf9STony Luck flags); 137192d12f95SMike Rapoport if (found && !memblock_reserve(found, size)) 137292d12f95SMike Rapoport goto done; 137392d12f95SMike Rapoport 13740ac398b1SYunfeng Ye if (nid != NUMA_NO_NODE && !exact_nid) { 137592d12f95SMike Rapoport found = memblock_find_in_range_node(size, align, start, 137692d12f95SMike Rapoport end, NUMA_NO_NODE, 137792d12f95SMike Rapoport flags); 137892d12f95SMike Rapoport if (found && !memblock_reserve(found, size)) 137992d12f95SMike Rapoport goto done; 138092d12f95SMike Rapoport } 138192d12f95SMike Rapoport 138292d12f95SMike Rapoport if (flags & MEMBLOCK_MIRROR) { 138392d12f95SMike Rapoport flags &= ~MEMBLOCK_MIRROR; 138492d12f95SMike Rapoport pr_warn("Could not allocate %pap bytes of mirrored memory\n", 138592d12f95SMike Rapoport &size); 138692d12f95SMike Rapoport goto again; 138792d12f95SMike Rapoport } 138892d12f95SMike Rapoport 138992d12f95SMike Rapoport return 0; 139092d12f95SMike Rapoport 139192d12f95SMike Rapoport done: 139292d12f95SMike Rapoport /* Skip kmemleak for kasan_init() due to high volume. */ 139392d12f95SMike Rapoport if (end != MEMBLOCK_ALLOC_KASAN) 1394aedf95eaSCatalin Marinas /* 139592d12f95SMike Rapoport * The min_count is set to 0 so that memblock allocated 139692d12f95SMike Rapoport * blocks are never reported as leaks. This is because many 139792d12f95SMike Rapoport * of these blocks are only referred via the physical 139892d12f95SMike Rapoport * address which is not looked up by kmemleak. 1399aedf95eaSCatalin Marinas */ 14009099daedSCatalin Marinas kmemleak_alloc_phys(found, size, 0, 0); 140192d12f95SMike Rapoport 14026ed311b2SBenjamin Herrenschmidt return found; 1403aedf95eaSCatalin Marinas } 140495f72d1eSYinghai Lu 1405a2974133SMike Rapoport /** 1406a2974133SMike Rapoport * memblock_phys_alloc_range - allocate a memory block inside specified range 1407a2974133SMike Rapoport * @size: size of memory block to be allocated in bytes 1408a2974133SMike Rapoport * @align: alignment of the region and block's size 1409a2974133SMike Rapoport * @start: the lower bound of the memory region to allocate (physical address) 1410a2974133SMike Rapoport * @end: the upper bound of the memory region to allocate (physical address) 1411a2974133SMike Rapoport * 1412a2974133SMike Rapoport * Allocate @size bytes in the between @start and @end. 1413a2974133SMike Rapoport * 1414a2974133SMike Rapoport * Return: physical address of the allocated memory block on success, 1415a2974133SMike Rapoport * %0 on failure. 1416a2974133SMike Rapoport */ 14178a770c2aSMike Rapoport phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size, 14188a770c2aSMike Rapoport phys_addr_t align, 14198a770c2aSMike Rapoport phys_addr_t start, 14208a770c2aSMike Rapoport phys_addr_t end) 14212bfc2862SAkinobu Mita { 14220ac398b1SYunfeng Ye return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, 14230ac398b1SYunfeng Ye false); 14247bd0b0f0STejun Heo } 14257bd0b0f0STejun Heo 1426a2974133SMike Rapoport /** 1427a2974133SMike Rapoport * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node 1428a2974133SMike Rapoport * @size: size of memory block to be allocated in bytes 1429a2974133SMike Rapoport * @align: alignment of the region and block's size 1430a2974133SMike Rapoport * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1431a2974133SMike Rapoport * 1432a2974133SMike Rapoport * Allocates memory block from the specified NUMA node. If the node 1433a2974133SMike Rapoport * has no available memory, attempts to allocated from any node in the 1434a2974133SMike Rapoport * system. 1435a2974133SMike Rapoport * 1436a2974133SMike Rapoport * Return: physical address of the allocated memory block on success, 1437a2974133SMike Rapoport * %0 on failure. 1438a2974133SMike Rapoport */ 14399a8dd708SMike Rapoport phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 14409d1e2492SBenjamin Herrenschmidt { 144133755574SMike Rapoport return memblock_alloc_range_nid(size, align, 0, 14420ac398b1SYunfeng Ye MEMBLOCK_ALLOC_ACCESSIBLE, nid, false); 144395f72d1eSYinghai Lu } 144495f72d1eSYinghai Lu 144526f09e9bSSantosh Shilimkar /** 1446eb31d559SMike Rapoport * memblock_alloc_internal - allocate boot memory block 144726f09e9bSSantosh Shilimkar * @size: size of memory block to be allocated in bytes 144826f09e9bSSantosh Shilimkar * @align: alignment of the region and block's size 144926f09e9bSSantosh Shilimkar * @min_addr: the lower bound of the memory region to allocate (phys address) 145026f09e9bSSantosh Shilimkar * @max_addr: the upper bound of the memory region to allocate (phys address) 145126f09e9bSSantosh Shilimkar * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 14520ac398b1SYunfeng Ye * @exact_nid: control the allocation fall back to other nodes 145326f09e9bSSantosh Shilimkar * 145492d12f95SMike Rapoport * Allocates memory block using memblock_alloc_range_nid() and 145592d12f95SMike Rapoport * converts the returned physical address to virtual. 145692d12f95SMike Rapoport * 145726f09e9bSSantosh Shilimkar * The @min_addr limit is dropped if it can not be satisfied and the allocation 145892d12f95SMike Rapoport * will fall back to memory below @min_addr. Other constraints, such 145992d12f95SMike Rapoport * as node and mirrored memory will be handled again in 146092d12f95SMike Rapoport * memblock_alloc_range_nid(). 146126f09e9bSSantosh Shilimkar * 146247cec443SMike Rapoport * Return: 146326f09e9bSSantosh Shilimkar * Virtual address of allocated memory block on success, NULL on failure. 146426f09e9bSSantosh Shilimkar */ 1465eb31d559SMike Rapoport static void * __init memblock_alloc_internal( 146626f09e9bSSantosh Shilimkar phys_addr_t size, phys_addr_t align, 146726f09e9bSSantosh Shilimkar phys_addr_t min_addr, phys_addr_t max_addr, 14680ac398b1SYunfeng Ye int nid, bool exact_nid) 146926f09e9bSSantosh Shilimkar { 147026f09e9bSSantosh Shilimkar phys_addr_t alloc; 147126f09e9bSSantosh Shilimkar 147226f09e9bSSantosh Shilimkar /* 147326f09e9bSSantosh Shilimkar * Detect any accidental use of these APIs after slab is ready, as at 147426f09e9bSSantosh Shilimkar * this moment memblock may be deinitialized already and its 1475c6ffc5caSMike Rapoport * internal data may be destroyed (after execution of memblock_free_all) 147626f09e9bSSantosh Shilimkar */ 147726f09e9bSSantosh Shilimkar if (WARN_ON_ONCE(slab_is_available())) 147826f09e9bSSantosh Shilimkar return kzalloc_node(size, GFP_NOWAIT, nid); 147926f09e9bSSantosh Shilimkar 1480f3057ad7SMike Rapoport if (max_addr > memblock.current_limit) 1481f3057ad7SMike Rapoport max_addr = memblock.current_limit; 1482f3057ad7SMike Rapoport 14830ac398b1SYunfeng Ye alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid, 14840ac398b1SYunfeng Ye exact_nid); 14852f770806SMike Rapoport 148692d12f95SMike Rapoport /* retry allocation without lower limit */ 148792d12f95SMike Rapoport if (!alloc && min_addr) 14880ac398b1SYunfeng Ye alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid, 14890ac398b1SYunfeng Ye exact_nid); 149026f09e9bSSantosh Shilimkar 149192d12f95SMike Rapoport if (!alloc) 1492a3f5bafcSTony Luck return NULL; 149326f09e9bSSantosh Shilimkar 149492d12f95SMike Rapoport return phys_to_virt(alloc); 149526f09e9bSSantosh Shilimkar } 149626f09e9bSSantosh Shilimkar 149726f09e9bSSantosh Shilimkar /** 14980ac398b1SYunfeng Ye * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node 14990ac398b1SYunfeng Ye * without zeroing memory 15000ac398b1SYunfeng Ye * @size: size of memory block to be allocated in bytes 15010ac398b1SYunfeng Ye * @align: alignment of the region and block's size 15020ac398b1SYunfeng Ye * @min_addr: the lower bound of the memory region from where the allocation 15030ac398b1SYunfeng Ye * is preferred (phys address) 15040ac398b1SYunfeng Ye * @max_addr: the upper bound of the memory region from where the allocation 15050ac398b1SYunfeng Ye * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 15060ac398b1SYunfeng Ye * allocate only from memory limited by memblock.current_limit value 15070ac398b1SYunfeng Ye * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 15080ac398b1SYunfeng Ye * 15090ac398b1SYunfeng Ye * Public function, provides additional debug information (including caller 15100ac398b1SYunfeng Ye * info), if enabled. Does not zero allocated memory. 15110ac398b1SYunfeng Ye * 15120ac398b1SYunfeng Ye * Return: 15130ac398b1SYunfeng Ye * Virtual address of allocated memory block on success, NULL on failure. 15140ac398b1SYunfeng Ye */ 15150ac398b1SYunfeng Ye void * __init memblock_alloc_exact_nid_raw( 15160ac398b1SYunfeng Ye phys_addr_t size, phys_addr_t align, 15170ac398b1SYunfeng Ye phys_addr_t min_addr, phys_addr_t max_addr, 15180ac398b1SYunfeng Ye int nid) 15190ac398b1SYunfeng Ye { 15200ac398b1SYunfeng Ye void *ptr; 15210ac398b1SYunfeng Ye 15220ac398b1SYunfeng Ye memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 15230ac398b1SYunfeng Ye __func__, (u64)size, (u64)align, nid, &min_addr, 15240ac398b1SYunfeng Ye &max_addr, (void *)_RET_IP_); 15250ac398b1SYunfeng Ye 15260ac398b1SYunfeng Ye ptr = memblock_alloc_internal(size, align, 15270ac398b1SYunfeng Ye min_addr, max_addr, nid, true); 15280ac398b1SYunfeng Ye if (ptr && size > 0) 15290ac398b1SYunfeng Ye page_init_poison(ptr, size); 15300ac398b1SYunfeng Ye 15310ac398b1SYunfeng Ye return ptr; 15320ac398b1SYunfeng Ye } 15330ac398b1SYunfeng Ye 15340ac398b1SYunfeng Ye /** 1535eb31d559SMike Rapoport * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing 1536ea1f5f37SPavel Tatashin * memory and without panicking 1537ea1f5f37SPavel Tatashin * @size: size of memory block to be allocated in bytes 1538ea1f5f37SPavel Tatashin * @align: alignment of the region and block's size 1539ea1f5f37SPavel Tatashin * @min_addr: the lower bound of the memory region from where the allocation 1540ea1f5f37SPavel Tatashin * is preferred (phys address) 1541ea1f5f37SPavel Tatashin * @max_addr: the upper bound of the memory region from where the allocation 154297ad1087SMike Rapoport * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1543ea1f5f37SPavel Tatashin * allocate only from memory limited by memblock.current_limit value 1544ea1f5f37SPavel Tatashin * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1545ea1f5f37SPavel Tatashin * 1546ea1f5f37SPavel Tatashin * Public function, provides additional debug information (including caller 1547ea1f5f37SPavel Tatashin * info), if enabled. Does not zero allocated memory, does not panic if request 1548ea1f5f37SPavel Tatashin * cannot be satisfied. 1549ea1f5f37SPavel Tatashin * 155047cec443SMike Rapoport * Return: 1551ea1f5f37SPavel Tatashin * Virtual address of allocated memory block on success, NULL on failure. 1552ea1f5f37SPavel Tatashin */ 1553eb31d559SMike Rapoport void * __init memblock_alloc_try_nid_raw( 1554ea1f5f37SPavel Tatashin phys_addr_t size, phys_addr_t align, 1555ea1f5f37SPavel Tatashin phys_addr_t min_addr, phys_addr_t max_addr, 1556ea1f5f37SPavel Tatashin int nid) 1557ea1f5f37SPavel Tatashin { 1558ea1f5f37SPavel Tatashin void *ptr; 1559ea1f5f37SPavel Tatashin 1560d75f773cSSakari Ailus memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1561a36aab89SMike Rapoport __func__, (u64)size, (u64)align, nid, &min_addr, 1562a36aab89SMike Rapoport &max_addr, (void *)_RET_IP_); 1563ea1f5f37SPavel Tatashin 1564eb31d559SMike Rapoport ptr = memblock_alloc_internal(size, align, 15650ac398b1SYunfeng Ye min_addr, max_addr, nid, false); 1566ea1f5f37SPavel Tatashin if (ptr && size > 0) 1567f682a97aSAlexander Duyck page_init_poison(ptr, size); 1568f682a97aSAlexander Duyck 1569ea1f5f37SPavel Tatashin return ptr; 1570ea1f5f37SPavel Tatashin } 1571ea1f5f37SPavel Tatashin 1572ea1f5f37SPavel Tatashin /** 1573c0dbe825SMike Rapoport * memblock_alloc_try_nid - allocate boot memory block 157426f09e9bSSantosh Shilimkar * @size: size of memory block to be allocated in bytes 157526f09e9bSSantosh Shilimkar * @align: alignment of the region and block's size 157626f09e9bSSantosh Shilimkar * @min_addr: the lower bound of the memory region from where the allocation 157726f09e9bSSantosh Shilimkar * is preferred (phys address) 157826f09e9bSSantosh Shilimkar * @max_addr: the upper bound of the memory region from where the allocation 157997ad1087SMike Rapoport * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 158026f09e9bSSantosh Shilimkar * allocate only from memory limited by memblock.current_limit value 158126f09e9bSSantosh Shilimkar * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 158226f09e9bSSantosh Shilimkar * 1583c0dbe825SMike Rapoport * Public function, provides additional debug information (including caller 1584c0dbe825SMike Rapoport * info), if enabled. This function zeroes the allocated memory. 158526f09e9bSSantosh Shilimkar * 158647cec443SMike Rapoport * Return: 158726f09e9bSSantosh Shilimkar * Virtual address of allocated memory block on success, NULL on failure. 158826f09e9bSSantosh Shilimkar */ 1589eb31d559SMike Rapoport void * __init memblock_alloc_try_nid( 159026f09e9bSSantosh Shilimkar phys_addr_t size, phys_addr_t align, 159126f09e9bSSantosh Shilimkar phys_addr_t min_addr, phys_addr_t max_addr, 159226f09e9bSSantosh Shilimkar int nid) 159326f09e9bSSantosh Shilimkar { 159426f09e9bSSantosh Shilimkar void *ptr; 159526f09e9bSSantosh Shilimkar 1596d75f773cSSakari Ailus memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1597a36aab89SMike Rapoport __func__, (u64)size, (u64)align, nid, &min_addr, 1598a36aab89SMike Rapoport &max_addr, (void *)_RET_IP_); 1599eb31d559SMike Rapoport ptr = memblock_alloc_internal(size, align, 16000ac398b1SYunfeng Ye min_addr, max_addr, nid, false); 1601c0dbe825SMike Rapoport if (ptr) 1602ea1f5f37SPavel Tatashin memset(ptr, 0, size); 160326f09e9bSSantosh Shilimkar 1604c0dbe825SMike Rapoport return ptr; 160526f09e9bSSantosh Shilimkar } 160626f09e9bSSantosh Shilimkar 160726f09e9bSSantosh Shilimkar /** 1608a2974133SMike Rapoport * __memblock_free_late - free pages directly to buddy allocator 160948a833ccSMike Rapoport * @base: phys starting address of the boot memory block 161026f09e9bSSantosh Shilimkar * @size: size of the boot memory block in bytes 161126f09e9bSSantosh Shilimkar * 1612a2974133SMike Rapoport * This is only useful when the memblock allocator has already been torn 161326f09e9bSSantosh Shilimkar * down, but we are still initializing the system. Pages are released directly 1614a2974133SMike Rapoport * to the buddy allocator. 161526f09e9bSSantosh Shilimkar */ 161626f09e9bSSantosh Shilimkar void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) 161726f09e9bSSantosh Shilimkar { 1618a36aab89SMike Rapoport phys_addr_t cursor, end; 161926f09e9bSSantosh Shilimkar 1620a36aab89SMike Rapoport end = base + size - 1; 1621d75f773cSSakari Ailus memblock_dbg("%s: [%pa-%pa] %pS\n", 1622a36aab89SMike Rapoport __func__, &base, &end, (void *)_RET_IP_); 16239099daedSCatalin Marinas kmemleak_free_part_phys(base, size); 162426f09e9bSSantosh Shilimkar cursor = PFN_UP(base); 162526f09e9bSSantosh Shilimkar end = PFN_DOWN(base + size); 162626f09e9bSSantosh Shilimkar 162726f09e9bSSantosh Shilimkar for (; cursor < end; cursor++) { 16287c2ee349SMike Rapoport memblock_free_pages(pfn_to_page(cursor), cursor, 0); 1629ca79b0c2SArun KS totalram_pages_inc(); 163026f09e9bSSantosh Shilimkar } 163126f09e9bSSantosh Shilimkar } 16329d1e2492SBenjamin Herrenschmidt 16339d1e2492SBenjamin Herrenschmidt /* 16349d1e2492SBenjamin Herrenschmidt * Remaining API functions 16359d1e2492SBenjamin Herrenschmidt */ 16369d1e2492SBenjamin Herrenschmidt 16371f1ffb8aSDavid Gibson phys_addr_t __init_memblock memblock_phys_mem_size(void) 163895f72d1eSYinghai Lu { 16391440c4e2STejun Heo return memblock.memory.total_size; 164095f72d1eSYinghai Lu } 164195f72d1eSYinghai Lu 16428907de5dSSrikar Dronamraju phys_addr_t __init_memblock memblock_reserved_size(void) 16438907de5dSSrikar Dronamraju { 16448907de5dSSrikar Dronamraju return memblock.reserved.total_size; 16458907de5dSSrikar Dronamraju } 16468907de5dSSrikar Dronamraju 16470a93ebefSSam Ravnborg /* lowest address */ 16480a93ebefSSam Ravnborg phys_addr_t __init_memblock memblock_start_of_DRAM(void) 16490a93ebefSSam Ravnborg { 16500a93ebefSSam Ravnborg return memblock.memory.regions[0].base; 16510a93ebefSSam Ravnborg } 16520a93ebefSSam Ravnborg 165310d06439SYinghai Lu phys_addr_t __init_memblock memblock_end_of_DRAM(void) 165495f72d1eSYinghai Lu { 165595f72d1eSYinghai Lu int idx = memblock.memory.cnt - 1; 165695f72d1eSYinghai Lu 1657e3239ff9SBenjamin Herrenschmidt return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 165895f72d1eSYinghai Lu } 165995f72d1eSYinghai Lu 1660a571d4ebSDennis Chen static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) 166195f72d1eSYinghai Lu { 16621c4bc43dSStefan Agner phys_addr_t max_addr = PHYS_ADDR_MAX; 1663136199f0SEmil Medve struct memblock_region *r; 166495f72d1eSYinghai Lu 1665a571d4ebSDennis Chen /* 1666a571d4ebSDennis Chen * translate the memory @limit size into the max address within one of 1667a571d4ebSDennis Chen * the memory memblock regions, if the @limit exceeds the total size 16681c4bc43dSStefan Agner * of those regions, max_addr will keep original value PHYS_ADDR_MAX 1669a571d4ebSDennis Chen */ 1670cc6de168SMike Rapoport for_each_mem_region(r) { 1671c0ce8fefSTejun Heo if (limit <= r->size) { 1672c0ce8fefSTejun Heo max_addr = r->base + limit; 167395f72d1eSYinghai Lu break; 167495f72d1eSYinghai Lu } 1675c0ce8fefSTejun Heo limit -= r->size; 167695f72d1eSYinghai Lu } 1677c0ce8fefSTejun Heo 1678a571d4ebSDennis Chen return max_addr; 1679a571d4ebSDennis Chen } 1680a571d4ebSDennis Chen 1681a571d4ebSDennis Chen void __init memblock_enforce_memory_limit(phys_addr_t limit) 1682a571d4ebSDennis Chen { 168349aef717SColin Ian King phys_addr_t max_addr; 1684a571d4ebSDennis Chen 1685a571d4ebSDennis Chen if (!limit) 1686a571d4ebSDennis Chen return; 1687a571d4ebSDennis Chen 1688a571d4ebSDennis Chen max_addr = __find_max_addr(limit); 1689a571d4ebSDennis Chen 1690a571d4ebSDennis Chen /* @limit exceeds the total size of the memory, do nothing */ 16911c4bc43dSStefan Agner if (max_addr == PHYS_ADDR_MAX) 1692a571d4ebSDennis Chen return; 1693a571d4ebSDennis Chen 1694c0ce8fefSTejun Heo /* truncate both memory and reserved regions */ 1695f1af9d3aSPhilipp Hachtmann memblock_remove_range(&memblock.memory, max_addr, 16961c4bc43dSStefan Agner PHYS_ADDR_MAX); 1697f1af9d3aSPhilipp Hachtmann memblock_remove_range(&memblock.reserved, max_addr, 16981c4bc43dSStefan Agner PHYS_ADDR_MAX); 169995f72d1eSYinghai Lu } 170095f72d1eSYinghai Lu 1701c9ca9b4eSAKASHI Takahiro void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size) 1702c9ca9b4eSAKASHI Takahiro { 1703c9ca9b4eSAKASHI Takahiro int start_rgn, end_rgn; 1704c9ca9b4eSAKASHI Takahiro int i, ret; 1705c9ca9b4eSAKASHI Takahiro 1706c9ca9b4eSAKASHI Takahiro if (!size) 1707c9ca9b4eSAKASHI Takahiro return; 1708c9ca9b4eSAKASHI Takahiro 1709c9ca9b4eSAKASHI Takahiro ret = memblock_isolate_range(&memblock.memory, base, size, 1710c9ca9b4eSAKASHI Takahiro &start_rgn, &end_rgn); 1711c9ca9b4eSAKASHI Takahiro if (ret) 1712c9ca9b4eSAKASHI Takahiro return; 1713c9ca9b4eSAKASHI Takahiro 1714c9ca9b4eSAKASHI Takahiro /* remove all the MAP regions */ 1715c9ca9b4eSAKASHI Takahiro for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) 1716c9ca9b4eSAKASHI Takahiro if (!memblock_is_nomap(&memblock.memory.regions[i])) 1717c9ca9b4eSAKASHI Takahiro memblock_remove_region(&memblock.memory, i); 1718c9ca9b4eSAKASHI Takahiro 1719c9ca9b4eSAKASHI Takahiro for (i = start_rgn - 1; i >= 0; i--) 1720c9ca9b4eSAKASHI Takahiro if (!memblock_is_nomap(&memblock.memory.regions[i])) 1721c9ca9b4eSAKASHI Takahiro memblock_remove_region(&memblock.memory, i); 1722c9ca9b4eSAKASHI Takahiro 1723c9ca9b4eSAKASHI Takahiro /* truncate the reserved regions */ 1724c9ca9b4eSAKASHI Takahiro memblock_remove_range(&memblock.reserved, 0, base); 1725c9ca9b4eSAKASHI Takahiro memblock_remove_range(&memblock.reserved, 17261c4bc43dSStefan Agner base + size, PHYS_ADDR_MAX); 1727c9ca9b4eSAKASHI Takahiro } 1728c9ca9b4eSAKASHI Takahiro 1729a571d4ebSDennis Chen void __init memblock_mem_limit_remove_map(phys_addr_t limit) 1730a571d4ebSDennis Chen { 1731a571d4ebSDennis Chen phys_addr_t max_addr; 1732a571d4ebSDennis Chen 1733a571d4ebSDennis Chen if (!limit) 1734a571d4ebSDennis Chen return; 1735a571d4ebSDennis Chen 1736a571d4ebSDennis Chen max_addr = __find_max_addr(limit); 1737a571d4ebSDennis Chen 1738a571d4ebSDennis Chen /* @limit exceeds the total size of the memory, do nothing */ 17391c4bc43dSStefan Agner if (max_addr == PHYS_ADDR_MAX) 1740a571d4ebSDennis Chen return; 1741a571d4ebSDennis Chen 1742c9ca9b4eSAKASHI Takahiro memblock_cap_memory_range(0, max_addr); 1743a571d4ebSDennis Chen } 1744a571d4ebSDennis Chen 1745cd79481dSYinghai Lu static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 174672d4b0b4SBenjamin Herrenschmidt { 174772d4b0b4SBenjamin Herrenschmidt unsigned int left = 0, right = type->cnt; 174872d4b0b4SBenjamin Herrenschmidt 174972d4b0b4SBenjamin Herrenschmidt do { 175072d4b0b4SBenjamin Herrenschmidt unsigned int mid = (right + left) / 2; 175172d4b0b4SBenjamin Herrenschmidt 175272d4b0b4SBenjamin Herrenschmidt if (addr < type->regions[mid].base) 175372d4b0b4SBenjamin Herrenschmidt right = mid; 175472d4b0b4SBenjamin Herrenschmidt else if (addr >= (type->regions[mid].base + 175572d4b0b4SBenjamin Herrenschmidt type->regions[mid].size)) 175672d4b0b4SBenjamin Herrenschmidt left = mid + 1; 175772d4b0b4SBenjamin Herrenschmidt else 175872d4b0b4SBenjamin Herrenschmidt return mid; 175972d4b0b4SBenjamin Herrenschmidt } while (left < right); 176072d4b0b4SBenjamin Herrenschmidt return -1; 176172d4b0b4SBenjamin Herrenschmidt } 176272d4b0b4SBenjamin Herrenschmidt 1763f5a222dcSYueyi Li bool __init_memblock memblock_is_reserved(phys_addr_t addr) 176495f72d1eSYinghai Lu { 176572d4b0b4SBenjamin Herrenschmidt return memblock_search(&memblock.reserved, addr) != -1; 176695f72d1eSYinghai Lu } 176772d4b0b4SBenjamin Herrenschmidt 1768b4ad0c7eSYaowei Bai bool __init_memblock memblock_is_memory(phys_addr_t addr) 176972d4b0b4SBenjamin Herrenschmidt { 177072d4b0b4SBenjamin Herrenschmidt return memblock_search(&memblock.memory, addr) != -1; 177172d4b0b4SBenjamin Herrenschmidt } 177272d4b0b4SBenjamin Herrenschmidt 1773937f0c26SYaowei Bai bool __init_memblock memblock_is_map_memory(phys_addr_t addr) 1774bf3d3cc5SArd Biesheuvel { 1775bf3d3cc5SArd Biesheuvel int i = memblock_search(&memblock.memory, addr); 1776bf3d3cc5SArd Biesheuvel 1777bf3d3cc5SArd Biesheuvel if (i == -1) 1778bf3d3cc5SArd Biesheuvel return false; 1779bf3d3cc5SArd Biesheuvel return !memblock_is_nomap(&memblock.memory.regions[i]); 1780bf3d3cc5SArd Biesheuvel } 1781bf3d3cc5SArd Biesheuvel 1782e76b63f8SYinghai Lu int __init_memblock memblock_search_pfn_nid(unsigned long pfn, 1783e76b63f8SYinghai Lu unsigned long *start_pfn, unsigned long *end_pfn) 1784e76b63f8SYinghai Lu { 1785e76b63f8SYinghai Lu struct memblock_type *type = &memblock.memory; 178616763230SFabian Frederick int mid = memblock_search(type, PFN_PHYS(pfn)); 1787e76b63f8SYinghai Lu 1788e76b63f8SYinghai Lu if (mid == -1) 1789e76b63f8SYinghai Lu return -1; 1790e76b63f8SYinghai Lu 1791f7e2f7e8SFabian Frederick *start_pfn = PFN_DOWN(type->regions[mid].base); 1792f7e2f7e8SFabian Frederick *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); 1793e76b63f8SYinghai Lu 1794d622abf7SMike Rapoport return memblock_get_region_node(&type->regions[mid]); 1795e76b63f8SYinghai Lu } 1796e76b63f8SYinghai Lu 1797eab30949SStephen Boyd /** 1798eab30949SStephen Boyd * memblock_is_region_memory - check if a region is a subset of memory 1799eab30949SStephen Boyd * @base: base of region to check 1800eab30949SStephen Boyd * @size: size of region to check 1801eab30949SStephen Boyd * 1802eab30949SStephen Boyd * Check if the region [@base, @base + @size) is a subset of a memory block. 1803eab30949SStephen Boyd * 180447cec443SMike Rapoport * Return: 1805eab30949SStephen Boyd * 0 if false, non-zero if true 1806eab30949SStephen Boyd */ 1807937f0c26SYaowei Bai bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 180872d4b0b4SBenjamin Herrenschmidt { 1809abb65272STomi Valkeinen int idx = memblock_search(&memblock.memory, base); 1810eb18f1b5STejun Heo phys_addr_t end = base + memblock_cap_size(base, &size); 181172d4b0b4SBenjamin Herrenschmidt 181272d4b0b4SBenjamin Herrenschmidt if (idx == -1) 1813937f0c26SYaowei Bai return false; 1814ef415ef4SWei Yang return (memblock.memory.regions[idx].base + 1815eb18f1b5STejun Heo memblock.memory.regions[idx].size) >= end; 181695f72d1eSYinghai Lu } 181795f72d1eSYinghai Lu 1818eab30949SStephen Boyd /** 1819eab30949SStephen Boyd * memblock_is_region_reserved - check if a region intersects reserved memory 1820eab30949SStephen Boyd * @base: base of region to check 1821eab30949SStephen Boyd * @size: size of region to check 1822eab30949SStephen Boyd * 182347cec443SMike Rapoport * Check if the region [@base, @base + @size) intersects a reserved 182447cec443SMike Rapoport * memory block. 1825eab30949SStephen Boyd * 182647cec443SMike Rapoport * Return: 1827c5c5c9d1STang Chen * True if they intersect, false if not. 1828eab30949SStephen Boyd */ 1829c5c5c9d1STang Chen bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 183095f72d1eSYinghai Lu { 1831eb18f1b5STejun Heo memblock_cap_size(base, &size); 1832c5c5c9d1STang Chen return memblock_overlaps_region(&memblock.reserved, base, size); 183395f72d1eSYinghai Lu } 183495f72d1eSYinghai Lu 18356ede1fd3SYinghai Lu void __init_memblock memblock_trim_memory(phys_addr_t align) 18366ede1fd3SYinghai Lu { 18376ede1fd3SYinghai Lu phys_addr_t start, end, orig_start, orig_end; 1838136199f0SEmil Medve struct memblock_region *r; 18396ede1fd3SYinghai Lu 1840cc6de168SMike Rapoport for_each_mem_region(r) { 1841136199f0SEmil Medve orig_start = r->base; 1842136199f0SEmil Medve orig_end = r->base + r->size; 18436ede1fd3SYinghai Lu start = round_up(orig_start, align); 18446ede1fd3SYinghai Lu end = round_down(orig_end, align); 18456ede1fd3SYinghai Lu 18466ede1fd3SYinghai Lu if (start == orig_start && end == orig_end) 18476ede1fd3SYinghai Lu continue; 18486ede1fd3SYinghai Lu 18496ede1fd3SYinghai Lu if (start < end) { 1850136199f0SEmil Medve r->base = start; 1851136199f0SEmil Medve r->size = end - start; 18526ede1fd3SYinghai Lu } else { 1853136199f0SEmil Medve memblock_remove_region(&memblock.memory, 1854136199f0SEmil Medve r - memblock.memory.regions); 1855136199f0SEmil Medve r--; 18566ede1fd3SYinghai Lu } 18576ede1fd3SYinghai Lu } 18586ede1fd3SYinghai Lu } 1859e63075a3SBenjamin Herrenschmidt 18603661ca66SYinghai Lu void __init_memblock memblock_set_current_limit(phys_addr_t limit) 1861e63075a3SBenjamin Herrenschmidt { 1862e63075a3SBenjamin Herrenschmidt memblock.current_limit = limit; 1863e63075a3SBenjamin Herrenschmidt } 1864e63075a3SBenjamin Herrenschmidt 1865fec51014SLaura Abbott phys_addr_t __init_memblock memblock_get_current_limit(void) 1866fec51014SLaura Abbott { 1867fec51014SLaura Abbott return memblock.current_limit; 1868fec51014SLaura Abbott } 1869fec51014SLaura Abbott 18700262d9c8SHeiko Carstens static void __init_memblock memblock_dump(struct memblock_type *type) 18716ed311b2SBenjamin Herrenschmidt { 18725d63f81cSMiles Chen phys_addr_t base, end, size; 1873e1720feeSMike Rapoport enum memblock_flags flags; 18748c9c1701SAlexander Kuleshov int idx; 18758c9c1701SAlexander Kuleshov struct memblock_region *rgn; 18766ed311b2SBenjamin Herrenschmidt 18770262d9c8SHeiko Carstens pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt); 18786ed311b2SBenjamin Herrenschmidt 187966e8b438SGioh Kim for_each_memblock_type(idx, type, rgn) { 18807c0caeb8STejun Heo char nid_buf[32] = ""; 18816ed311b2SBenjamin Herrenschmidt 18827c0caeb8STejun Heo base = rgn->base; 18837c0caeb8STejun Heo size = rgn->size; 18845d63f81cSMiles Chen end = base + size - 1; 188566a20757STang Chen flags = rgn->flags; 18863f08a302SMike Rapoport #ifdef CONFIG_NEED_MULTIPLE_NODES 18877c0caeb8STejun Heo if (memblock_get_region_node(rgn) != MAX_NUMNODES) 18887c0caeb8STejun Heo snprintf(nid_buf, sizeof(nid_buf), " on node %d", 18897c0caeb8STejun Heo memblock_get_region_node(rgn)); 18907c0caeb8STejun Heo #endif 1891e1720feeSMike Rapoport pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n", 18920262d9c8SHeiko Carstens type->name, idx, &base, &end, &size, nid_buf, flags); 18936ed311b2SBenjamin Herrenschmidt } 18946ed311b2SBenjamin Herrenschmidt } 18956ed311b2SBenjamin Herrenschmidt 189687c55870SMike Rapoport static void __init_memblock __memblock_dump_all(void) 18976ed311b2SBenjamin Herrenschmidt { 18986ed311b2SBenjamin Herrenschmidt pr_info("MEMBLOCK configuration:\n"); 18995d63f81cSMiles Chen pr_info(" memory size = %pa reserved size = %pa\n", 19005d63f81cSMiles Chen &memblock.memory.total_size, 19015d63f81cSMiles Chen &memblock.reserved.total_size); 19026ed311b2SBenjamin Herrenschmidt 19030262d9c8SHeiko Carstens memblock_dump(&memblock.memory); 19040262d9c8SHeiko Carstens memblock_dump(&memblock.reserved); 1905409efd4cSHeiko Carstens #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 190677649905SDavid Hildenbrand memblock_dump(&physmem); 1907409efd4cSHeiko Carstens #endif 19086ed311b2SBenjamin Herrenschmidt } 19096ed311b2SBenjamin Herrenschmidt 191087c55870SMike Rapoport void __init_memblock memblock_dump_all(void) 191187c55870SMike Rapoport { 191287c55870SMike Rapoport if (memblock_debug) 191387c55870SMike Rapoport __memblock_dump_all(); 191487c55870SMike Rapoport } 191587c55870SMike Rapoport 19161aadc056STejun Heo void __init memblock_allow_resize(void) 19176ed311b2SBenjamin Herrenschmidt { 1918142b45a7SBenjamin Herrenschmidt memblock_can_resize = 1; 19196ed311b2SBenjamin Herrenschmidt } 19206ed311b2SBenjamin Herrenschmidt 19216ed311b2SBenjamin Herrenschmidt static int __init early_memblock(char *p) 19226ed311b2SBenjamin Herrenschmidt { 19236ed311b2SBenjamin Herrenschmidt if (p && strstr(p, "debug")) 19246ed311b2SBenjamin Herrenschmidt memblock_debug = 1; 19256ed311b2SBenjamin Herrenschmidt return 0; 19266ed311b2SBenjamin Herrenschmidt } 19276ed311b2SBenjamin Herrenschmidt early_param("memblock", early_memblock); 19286ed311b2SBenjamin Herrenschmidt 1929*4f5b0c17SMike Rapoport static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn) 1930*4f5b0c17SMike Rapoport { 1931*4f5b0c17SMike Rapoport struct page *start_pg, *end_pg; 1932*4f5b0c17SMike Rapoport phys_addr_t pg, pgend; 1933*4f5b0c17SMike Rapoport 1934*4f5b0c17SMike Rapoport /* 1935*4f5b0c17SMike Rapoport * Convert start_pfn/end_pfn to a struct page pointer. 1936*4f5b0c17SMike Rapoport */ 1937*4f5b0c17SMike Rapoport start_pg = pfn_to_page(start_pfn - 1) + 1; 1938*4f5b0c17SMike Rapoport end_pg = pfn_to_page(end_pfn - 1) + 1; 1939*4f5b0c17SMike Rapoport 1940*4f5b0c17SMike Rapoport /* 1941*4f5b0c17SMike Rapoport * Convert to physical addresses, and round start upwards and end 1942*4f5b0c17SMike Rapoport * downwards. 1943*4f5b0c17SMike Rapoport */ 1944*4f5b0c17SMike Rapoport pg = PAGE_ALIGN(__pa(start_pg)); 1945*4f5b0c17SMike Rapoport pgend = __pa(end_pg) & PAGE_MASK; 1946*4f5b0c17SMike Rapoport 1947*4f5b0c17SMike Rapoport /* 1948*4f5b0c17SMike Rapoport * If there are free pages between these, free the section of the 1949*4f5b0c17SMike Rapoport * memmap array. 1950*4f5b0c17SMike Rapoport */ 1951*4f5b0c17SMike Rapoport if (pg < pgend) 1952*4f5b0c17SMike Rapoport memblock_free(pg, pgend - pg); 1953*4f5b0c17SMike Rapoport } 1954*4f5b0c17SMike Rapoport 1955*4f5b0c17SMike Rapoport /* 1956*4f5b0c17SMike Rapoport * The mem_map array can get very big. Free the unused area of the memory map. 1957*4f5b0c17SMike Rapoport */ 1958*4f5b0c17SMike Rapoport static void __init free_unused_memmap(void) 1959*4f5b0c17SMike Rapoport { 1960*4f5b0c17SMike Rapoport unsigned long start, end, prev_end = 0; 1961*4f5b0c17SMike Rapoport int i; 1962*4f5b0c17SMike Rapoport 1963*4f5b0c17SMike Rapoport if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) || 1964*4f5b0c17SMike Rapoport IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) 1965*4f5b0c17SMike Rapoport return; 1966*4f5b0c17SMike Rapoport 1967*4f5b0c17SMike Rapoport /* 1968*4f5b0c17SMike Rapoport * This relies on each bank being in address order. 1969*4f5b0c17SMike Rapoport * The banks are sorted previously in bootmem_init(). 1970*4f5b0c17SMike Rapoport */ 1971*4f5b0c17SMike Rapoport for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { 1972*4f5b0c17SMike Rapoport #ifdef CONFIG_SPARSEMEM 1973*4f5b0c17SMike Rapoport /* 1974*4f5b0c17SMike Rapoport * Take care not to free memmap entries that don't exist 1975*4f5b0c17SMike Rapoport * due to SPARSEMEM sections which aren't present. 1976*4f5b0c17SMike Rapoport */ 1977*4f5b0c17SMike Rapoport start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); 1978*4f5b0c17SMike Rapoport #else 1979*4f5b0c17SMike Rapoport /* 1980*4f5b0c17SMike Rapoport * Align down here since the VM subsystem insists that the 1981*4f5b0c17SMike Rapoport * memmap entries are valid from the bank start aligned to 1982*4f5b0c17SMike Rapoport * MAX_ORDER_NR_PAGES. 1983*4f5b0c17SMike Rapoport */ 1984*4f5b0c17SMike Rapoport start = round_down(start, MAX_ORDER_NR_PAGES); 1985*4f5b0c17SMike Rapoport #endif 1986*4f5b0c17SMike Rapoport 1987*4f5b0c17SMike Rapoport /* 1988*4f5b0c17SMike Rapoport * If we had a previous bank, and there is a space 1989*4f5b0c17SMike Rapoport * between the current bank and the previous, free it. 1990*4f5b0c17SMike Rapoport */ 1991*4f5b0c17SMike Rapoport if (prev_end && prev_end < start) 1992*4f5b0c17SMike Rapoport free_memmap(prev_end, start); 1993*4f5b0c17SMike Rapoport 1994*4f5b0c17SMike Rapoport /* 1995*4f5b0c17SMike Rapoport * Align up here since the VM subsystem insists that the 1996*4f5b0c17SMike Rapoport * memmap entries are valid from the bank end aligned to 1997*4f5b0c17SMike Rapoport * MAX_ORDER_NR_PAGES. 1998*4f5b0c17SMike Rapoport */ 1999*4f5b0c17SMike Rapoport prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); 2000*4f5b0c17SMike Rapoport } 2001*4f5b0c17SMike Rapoport 2002*4f5b0c17SMike Rapoport #ifdef CONFIG_SPARSEMEM 2003*4f5b0c17SMike Rapoport if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) 2004*4f5b0c17SMike Rapoport free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); 2005*4f5b0c17SMike Rapoport #endif 2006*4f5b0c17SMike Rapoport } 2007*4f5b0c17SMike Rapoport 2008bda49a81SMike Rapoport static void __init __free_pages_memory(unsigned long start, unsigned long end) 2009bda49a81SMike Rapoport { 2010bda49a81SMike Rapoport int order; 2011bda49a81SMike Rapoport 2012bda49a81SMike Rapoport while (start < end) { 2013bda49a81SMike Rapoport order = min(MAX_ORDER - 1UL, __ffs(start)); 2014bda49a81SMike Rapoport 2015bda49a81SMike Rapoport while (start + (1UL << order) > end) 2016bda49a81SMike Rapoport order--; 2017bda49a81SMike Rapoport 2018bda49a81SMike Rapoport memblock_free_pages(pfn_to_page(start), start, order); 2019bda49a81SMike Rapoport 2020bda49a81SMike Rapoport start += (1UL << order); 2021bda49a81SMike Rapoport } 2022bda49a81SMike Rapoport } 2023bda49a81SMike Rapoport 2024bda49a81SMike Rapoport static unsigned long __init __free_memory_core(phys_addr_t start, 2025bda49a81SMike Rapoport phys_addr_t end) 2026bda49a81SMike Rapoport { 2027bda49a81SMike Rapoport unsigned long start_pfn = PFN_UP(start); 2028bda49a81SMike Rapoport unsigned long end_pfn = min_t(unsigned long, 2029bda49a81SMike Rapoport PFN_DOWN(end), max_low_pfn); 2030bda49a81SMike Rapoport 2031bda49a81SMike Rapoport if (start_pfn >= end_pfn) 2032bda49a81SMike Rapoport return 0; 2033bda49a81SMike Rapoport 2034bda49a81SMike Rapoport __free_pages_memory(start_pfn, end_pfn); 2035bda49a81SMike Rapoport 2036bda49a81SMike Rapoport return end_pfn - start_pfn; 2037bda49a81SMike Rapoport } 2038bda49a81SMike Rapoport 2039bda49a81SMike Rapoport static unsigned long __init free_low_memory_core_early(void) 2040bda49a81SMike Rapoport { 2041bda49a81SMike Rapoport unsigned long count = 0; 2042bda49a81SMike Rapoport phys_addr_t start, end; 2043bda49a81SMike Rapoport u64 i; 2044bda49a81SMike Rapoport 2045bda49a81SMike Rapoport memblock_clear_hotplug(0, -1); 2046bda49a81SMike Rapoport 20479f3d5eaaSMike Rapoport for_each_reserved_mem_range(i, &start, &end) 2048bda49a81SMike Rapoport reserve_bootmem_region(start, end); 2049bda49a81SMike Rapoport 2050bda49a81SMike Rapoport /* 2051bda49a81SMike Rapoport * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id 2052bda49a81SMike Rapoport * because in some case like Node0 doesn't have RAM installed 2053bda49a81SMike Rapoport * low ram will be on Node1 2054bda49a81SMike Rapoport */ 2055bda49a81SMike Rapoport for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, 2056bda49a81SMike Rapoport NULL) 2057bda49a81SMike Rapoport count += __free_memory_core(start, end); 2058bda49a81SMike Rapoport 2059bda49a81SMike Rapoport return count; 2060bda49a81SMike Rapoport } 2061bda49a81SMike Rapoport 2062bda49a81SMike Rapoport static int reset_managed_pages_done __initdata; 2063bda49a81SMike Rapoport 2064bda49a81SMike Rapoport void reset_node_managed_pages(pg_data_t *pgdat) 2065bda49a81SMike Rapoport { 2066bda49a81SMike Rapoport struct zone *z; 2067bda49a81SMike Rapoport 2068bda49a81SMike Rapoport for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 20699705bea5SArun KS atomic_long_set(&z->managed_pages, 0); 2070bda49a81SMike Rapoport } 2071bda49a81SMike Rapoport 2072bda49a81SMike Rapoport void __init reset_all_zones_managed_pages(void) 2073bda49a81SMike Rapoport { 2074bda49a81SMike Rapoport struct pglist_data *pgdat; 2075bda49a81SMike Rapoport 2076bda49a81SMike Rapoport if (reset_managed_pages_done) 2077bda49a81SMike Rapoport return; 2078bda49a81SMike Rapoport 2079bda49a81SMike Rapoport for_each_online_pgdat(pgdat) 2080bda49a81SMike Rapoport reset_node_managed_pages(pgdat); 2081bda49a81SMike Rapoport 2082bda49a81SMike Rapoport reset_managed_pages_done = 1; 2083bda49a81SMike Rapoport } 2084bda49a81SMike Rapoport 2085bda49a81SMike Rapoport /** 2086bda49a81SMike Rapoport * memblock_free_all - release free pages to the buddy allocator 2087bda49a81SMike Rapoport * 2088bda49a81SMike Rapoport * Return: the number of pages actually released. 2089bda49a81SMike Rapoport */ 2090bda49a81SMike Rapoport unsigned long __init memblock_free_all(void) 2091bda49a81SMike Rapoport { 2092bda49a81SMike Rapoport unsigned long pages; 2093bda49a81SMike Rapoport 2094*4f5b0c17SMike Rapoport free_unused_memmap(); 2095bda49a81SMike Rapoport reset_all_zones_managed_pages(); 2096bda49a81SMike Rapoport 2097bda49a81SMike Rapoport pages = free_low_memory_core_early(); 2098ca79b0c2SArun KS totalram_pages_add(pages); 2099bda49a81SMike Rapoport 2100bda49a81SMike Rapoport return pages; 2101bda49a81SMike Rapoport } 2102bda49a81SMike Rapoport 2103350e88baSMike Rapoport #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK) 21046d03b885SBenjamin Herrenschmidt 21056d03b885SBenjamin Herrenschmidt static int memblock_debug_show(struct seq_file *m, void *private) 21066d03b885SBenjamin Herrenschmidt { 21076d03b885SBenjamin Herrenschmidt struct memblock_type *type = m->private; 21086d03b885SBenjamin Herrenschmidt struct memblock_region *reg; 21096d03b885SBenjamin Herrenschmidt int i; 21105d63f81cSMiles Chen phys_addr_t end; 21116d03b885SBenjamin Herrenschmidt 21126d03b885SBenjamin Herrenschmidt for (i = 0; i < type->cnt; i++) { 21136d03b885SBenjamin Herrenschmidt reg = &type->regions[i]; 21145d63f81cSMiles Chen end = reg->base + reg->size - 1; 21156d03b885SBenjamin Herrenschmidt 21165d63f81cSMiles Chen seq_printf(m, "%4d: ", i); 21175d63f81cSMiles Chen seq_printf(m, "%pa..%pa\n", ®->base, &end); 21186d03b885SBenjamin Herrenschmidt } 21196d03b885SBenjamin Herrenschmidt return 0; 21206d03b885SBenjamin Herrenschmidt } 21215ad35093SAndy Shevchenko DEFINE_SHOW_ATTRIBUTE(memblock_debug); 21226d03b885SBenjamin Herrenschmidt 21236d03b885SBenjamin Herrenschmidt static int __init memblock_init_debugfs(void) 21246d03b885SBenjamin Herrenschmidt { 21256d03b885SBenjamin Herrenschmidt struct dentry *root = debugfs_create_dir("memblock", NULL); 2126d9f7979cSGreg Kroah-Hartman 21270825a6f9SJoe Perches debugfs_create_file("memory", 0444, root, 21280825a6f9SJoe Perches &memblock.memory, &memblock_debug_fops); 21290825a6f9SJoe Perches debugfs_create_file("reserved", 0444, root, 21300825a6f9SJoe Perches &memblock.reserved, &memblock_debug_fops); 213170210ed9SPhilipp Hachtmann #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 213277649905SDavid Hildenbrand debugfs_create_file("physmem", 0444, root, &physmem, 213377649905SDavid Hildenbrand &memblock_debug_fops); 213470210ed9SPhilipp Hachtmann #endif 21356d03b885SBenjamin Herrenschmidt 21366d03b885SBenjamin Herrenschmidt return 0; 21376d03b885SBenjamin Herrenschmidt } 21386d03b885SBenjamin Herrenschmidt __initcall(memblock_init_debugfs); 21396d03b885SBenjamin Herrenschmidt 21406d03b885SBenjamin Herrenschmidt #endif /* CONFIG_DEBUG_FS */ 2141