195f72d1eSYinghai Lu /* 295f72d1eSYinghai Lu * Procedures for maintaining information about logical memory blocks. 395f72d1eSYinghai Lu * 495f72d1eSYinghai Lu * Peter Bergner, IBM Corp. June 2001. 595f72d1eSYinghai Lu * Copyright (C) 2001 Peter Bergner. 695f72d1eSYinghai Lu * 795f72d1eSYinghai Lu * This program is free software; you can redistribute it and/or 895f72d1eSYinghai Lu * modify it under the terms of the GNU General Public License 995f72d1eSYinghai Lu * as published by the Free Software Foundation; either version 1095f72d1eSYinghai Lu * 2 of the License, or (at your option) any later version. 1195f72d1eSYinghai Lu */ 1295f72d1eSYinghai Lu 1395f72d1eSYinghai Lu #include <linux/kernel.h> 14142b45a7SBenjamin Herrenschmidt #include <linux/slab.h> 1595f72d1eSYinghai Lu #include <linux/init.h> 1695f72d1eSYinghai Lu #include <linux/bitops.h> 17449e8df3SBenjamin Herrenschmidt #include <linux/poison.h> 18c196f76fSBenjamin Herrenschmidt #include <linux/pfn.h> 196d03b885SBenjamin Herrenschmidt #include <linux/debugfs.h> 20514c6032SRandy Dunlap #include <linux/kmemleak.h> 216d03b885SBenjamin Herrenschmidt #include <linux/seq_file.h> 2295f72d1eSYinghai Lu #include <linux/memblock.h> 2319373672SMathieu Malaterre #include <linux/bootmem.h> 2495f72d1eSYinghai Lu 25c4c5ad6bSChristoph Hellwig #include <asm/sections.h> 2626f09e9bSSantosh Shilimkar #include <linux/io.h> 2726f09e9bSSantosh Shilimkar 2826f09e9bSSantosh Shilimkar #include "internal.h" 2979442ed1STang Chen 303e039c5cSMike Rapoport /** 313e039c5cSMike Rapoport * DOC: memblock overview 323e039c5cSMike Rapoport * 333e039c5cSMike Rapoport * Memblock is a method of managing memory regions during the early 343e039c5cSMike Rapoport * boot period when the usual kernel memory allocators are not up and 353e039c5cSMike Rapoport * running. 363e039c5cSMike Rapoport * 373e039c5cSMike Rapoport * Memblock views the system memory as collections of contiguous 383e039c5cSMike Rapoport * regions. There are several types of these collections: 393e039c5cSMike Rapoport * 403e039c5cSMike Rapoport * * ``memory`` - describes the physical memory available to the 413e039c5cSMike Rapoport * kernel; this may differ from the actual physical memory installed 423e039c5cSMike Rapoport * in the system, for instance when the memory is restricted with 433e039c5cSMike Rapoport * ``mem=`` command line parameter 443e039c5cSMike Rapoport * * ``reserved`` - describes the regions that were allocated 453e039c5cSMike Rapoport * * ``physmap`` - describes the actual physical memory regardless of 463e039c5cSMike Rapoport * the possible restrictions; the ``physmap`` type is only available 473e039c5cSMike Rapoport * on some architectures. 483e039c5cSMike Rapoport * 493e039c5cSMike Rapoport * Each region is represented by :c:type:`struct memblock_region` that 503e039c5cSMike Rapoport * defines the region extents, its attributes and NUMA node id on NUMA 513e039c5cSMike Rapoport * systems. Every memory type is described by the :c:type:`struct 523e039c5cSMike Rapoport * memblock_type` which contains an array of memory regions along with 533e039c5cSMike Rapoport * the allocator metadata. The memory types are nicely wrapped with 543e039c5cSMike Rapoport * :c:type:`struct memblock`. This structure is statically initialzed 553e039c5cSMike Rapoport * at build time. The region arrays for the "memory" and "reserved" 563e039c5cSMike Rapoport * types are initially sized to %INIT_MEMBLOCK_REGIONS and for the 573e039c5cSMike Rapoport * "physmap" type to %INIT_PHYSMEM_REGIONS. 583e039c5cSMike Rapoport * The :c:func:`memblock_allow_resize` enables automatic resizing of 593e039c5cSMike Rapoport * the region arrays during addition of new regions. This feature 603e039c5cSMike Rapoport * should be used with care so that memory allocated for the region 613e039c5cSMike Rapoport * array will not overlap with areas that should be reserved, for 623e039c5cSMike Rapoport * example initrd. 633e039c5cSMike Rapoport * 643e039c5cSMike Rapoport * The early architecture setup should tell memblock what the physical 653e039c5cSMike Rapoport * memory layout is by using :c:func:`memblock_add` or 663e039c5cSMike Rapoport * :c:func:`memblock_add_node` functions. The first function does not 673e039c5cSMike Rapoport * assign the region to a NUMA node and it is appropriate for UMA 683e039c5cSMike Rapoport * systems. Yet, it is possible to use it on NUMA systems as well and 693e039c5cSMike Rapoport * assign the region to a NUMA node later in the setup process using 703e039c5cSMike Rapoport * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node` 713e039c5cSMike Rapoport * performs such an assignment directly. 723e039c5cSMike Rapoport * 733e039c5cSMike Rapoport * Once memblock is setup the memory can be allocated using either 743e039c5cSMike Rapoport * memblock or bootmem APIs. 753e039c5cSMike Rapoport * 763e039c5cSMike Rapoport * As the system boot progresses, the architecture specific 773e039c5cSMike Rapoport * :c:func:`mem_init` function frees all the memory to the buddy page 783e039c5cSMike Rapoport * allocator. 793e039c5cSMike Rapoport * 803e039c5cSMike Rapoport * If an architecure enables %CONFIG_ARCH_DISCARD_MEMBLOCK, the 813e039c5cSMike Rapoport * memblock data structures will be discarded after the system 823e039c5cSMike Rapoport * initialization compltes. 833e039c5cSMike Rapoport */ 843e039c5cSMike Rapoport 85fe091c20STejun Heo static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 86fe091c20STejun Heo static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 8770210ed9SPhilipp Hachtmann #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 8870210ed9SPhilipp Hachtmann static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; 8970210ed9SPhilipp Hachtmann #endif 90fe091c20STejun Heo 91fe091c20STejun Heo struct memblock memblock __initdata_memblock = { 92fe091c20STejun Heo .memory.regions = memblock_memory_init_regions, 93fe091c20STejun Heo .memory.cnt = 1, /* empty dummy entry */ 94fe091c20STejun Heo .memory.max = INIT_MEMBLOCK_REGIONS, 950262d9c8SHeiko Carstens .memory.name = "memory", 96fe091c20STejun Heo 97fe091c20STejun Heo .reserved.regions = memblock_reserved_init_regions, 98fe091c20STejun Heo .reserved.cnt = 1, /* empty dummy entry */ 99fe091c20STejun Heo .reserved.max = INIT_MEMBLOCK_REGIONS, 1000262d9c8SHeiko Carstens .reserved.name = "reserved", 101fe091c20STejun Heo 10270210ed9SPhilipp Hachtmann #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 10370210ed9SPhilipp Hachtmann .physmem.regions = memblock_physmem_init_regions, 10470210ed9SPhilipp Hachtmann .physmem.cnt = 1, /* empty dummy entry */ 10570210ed9SPhilipp Hachtmann .physmem.max = INIT_PHYSMEM_REGIONS, 1060262d9c8SHeiko Carstens .physmem.name = "physmem", 10770210ed9SPhilipp Hachtmann #endif 10870210ed9SPhilipp Hachtmann 10979442ed1STang Chen .bottom_up = false, 110fe091c20STejun Heo .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 111fe091c20STejun Heo }; 11295f72d1eSYinghai Lu 11310d06439SYinghai Lu int memblock_debug __initdata_memblock; 114a3f5bafcSTony Luck static bool system_has_some_mirror __initdata_memblock = false; 1151aadc056STejun Heo static int memblock_can_resize __initdata_memblock; 116181eb394SGavin Shan static int memblock_memory_in_slab __initdata_memblock = 0; 117181eb394SGavin Shan static int memblock_reserved_in_slab __initdata_memblock = 0; 11895f72d1eSYinghai Lu 119e1720feeSMike Rapoport enum memblock_flags __init_memblock choose_memblock_flags(void) 120a3f5bafcSTony Luck { 121a3f5bafcSTony Luck return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; 122a3f5bafcSTony Luck } 123a3f5bafcSTony Luck 124eb18f1b5STejun Heo /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 125eb18f1b5STejun Heo static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 126eb18f1b5STejun Heo { 1271c4bc43dSStefan Agner return *size = min(*size, PHYS_ADDR_MAX - base); 128eb18f1b5STejun Heo } 129eb18f1b5STejun Heo 1306ed311b2SBenjamin Herrenschmidt /* 1316ed311b2SBenjamin Herrenschmidt * Address comparison utilities 1326ed311b2SBenjamin Herrenschmidt */ 13310d06439SYinghai Lu static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 1342898cc4cSBenjamin Herrenschmidt phys_addr_t base2, phys_addr_t size2) 13595f72d1eSYinghai Lu { 13695f72d1eSYinghai Lu return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 13795f72d1eSYinghai Lu } 13895f72d1eSYinghai Lu 13995cf82ecSTang Chen bool __init_memblock memblock_overlaps_region(struct memblock_type *type, 1402d7d3eb2SH Hartley Sweeten phys_addr_t base, phys_addr_t size) 1416ed311b2SBenjamin Herrenschmidt { 1426ed311b2SBenjamin Herrenschmidt unsigned long i; 1436ed311b2SBenjamin Herrenschmidt 144f14516fbSAlexander Kuleshov for (i = 0; i < type->cnt; i++) 145f14516fbSAlexander Kuleshov if (memblock_addrs_overlap(base, size, type->regions[i].base, 146f14516fbSAlexander Kuleshov type->regions[i].size)) 1476ed311b2SBenjamin Herrenschmidt break; 148c5c5c9d1STang Chen return i < type->cnt; 1496ed311b2SBenjamin Herrenschmidt } 1506ed311b2SBenjamin Herrenschmidt 15147cec443SMike Rapoport /** 15279442ed1STang Chen * __memblock_find_range_bottom_up - find free area utility in bottom-up 15379442ed1STang Chen * @start: start of candidate range 15447cec443SMike Rapoport * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 15547cec443SMike Rapoport * %MEMBLOCK_ALLOC_ACCESSIBLE 15679442ed1STang Chen * @size: size of free area to find 15779442ed1STang Chen * @align: alignment of free area to find 158b1154233SGrygorii Strashko * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 159fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes 16079442ed1STang Chen * 16179442ed1STang Chen * Utility called from memblock_find_in_range_node(), find free area bottom-up. 16279442ed1STang Chen * 16347cec443SMike Rapoport * Return: 16479442ed1STang Chen * Found address on success, 0 on failure. 16579442ed1STang Chen */ 16679442ed1STang Chen static phys_addr_t __init_memblock 16779442ed1STang Chen __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 168fc6daaf9STony Luck phys_addr_t size, phys_addr_t align, int nid, 169e1720feeSMike Rapoport enum memblock_flags flags) 17079442ed1STang Chen { 17179442ed1STang Chen phys_addr_t this_start, this_end, cand; 17279442ed1STang Chen u64 i; 17379442ed1STang Chen 174fc6daaf9STony Luck for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { 17579442ed1STang Chen this_start = clamp(this_start, start, end); 17679442ed1STang Chen this_end = clamp(this_end, start, end); 17779442ed1STang Chen 17879442ed1STang Chen cand = round_up(this_start, align); 17979442ed1STang Chen if (cand < this_end && this_end - cand >= size) 18079442ed1STang Chen return cand; 18179442ed1STang Chen } 18279442ed1STang Chen 18379442ed1STang Chen return 0; 18479442ed1STang Chen } 18579442ed1STang Chen 1867bd0b0f0STejun Heo /** 1871402899eSTang Chen * __memblock_find_range_top_down - find free area utility, in top-down 1881402899eSTang Chen * @start: start of candidate range 18947cec443SMike Rapoport * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 19047cec443SMike Rapoport * %MEMBLOCK_ALLOC_ACCESSIBLE 1911402899eSTang Chen * @size: size of free area to find 1921402899eSTang Chen * @align: alignment of free area to find 193b1154233SGrygorii Strashko * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 194fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes 1951402899eSTang Chen * 1961402899eSTang Chen * Utility called from memblock_find_in_range_node(), find free area top-down. 1971402899eSTang Chen * 19847cec443SMike Rapoport * Return: 19979442ed1STang Chen * Found address on success, 0 on failure. 2001402899eSTang Chen */ 2011402899eSTang Chen static phys_addr_t __init_memblock 2021402899eSTang Chen __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 203fc6daaf9STony Luck phys_addr_t size, phys_addr_t align, int nid, 204e1720feeSMike Rapoport enum memblock_flags flags) 2051402899eSTang Chen { 2061402899eSTang Chen phys_addr_t this_start, this_end, cand; 2071402899eSTang Chen u64 i; 2081402899eSTang Chen 209fc6daaf9STony Luck for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, 210fc6daaf9STony Luck NULL) { 2111402899eSTang Chen this_start = clamp(this_start, start, end); 2121402899eSTang Chen this_end = clamp(this_end, start, end); 2131402899eSTang Chen 2141402899eSTang Chen if (this_end < size) 2151402899eSTang Chen continue; 2161402899eSTang Chen 2171402899eSTang Chen cand = round_down(this_end - size, align); 2181402899eSTang Chen if (cand >= this_start) 2191402899eSTang Chen return cand; 2201402899eSTang Chen } 2211402899eSTang Chen 2221402899eSTang Chen return 0; 2231402899eSTang Chen } 2241402899eSTang Chen 2251402899eSTang Chen /** 2267bd0b0f0STejun Heo * memblock_find_in_range_node - find free area in given range and node 2277bd0b0f0STejun Heo * @size: size of free area to find 2287bd0b0f0STejun Heo * @align: alignment of free area to find 22987029ee9SGrygorii Strashko * @start: start of candidate range 23047cec443SMike Rapoport * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 23147cec443SMike Rapoport * %MEMBLOCK_ALLOC_ACCESSIBLE 232b1154233SGrygorii Strashko * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 233fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes 2347bd0b0f0STejun Heo * 2357bd0b0f0STejun Heo * Find @size free area aligned to @align in the specified range and node. 2367bd0b0f0STejun Heo * 23779442ed1STang Chen * When allocation direction is bottom-up, the @start should be greater 23879442ed1STang Chen * than the end of the kernel image. Otherwise, it will be trimmed. The 23979442ed1STang Chen * reason is that we want the bottom-up allocation just near the kernel 24079442ed1STang Chen * image so it is highly likely that the allocated memory and the kernel 24179442ed1STang Chen * will reside in the same node. 24279442ed1STang Chen * 24379442ed1STang Chen * If bottom-up allocation failed, will try to allocate memory top-down. 24479442ed1STang Chen * 24547cec443SMike Rapoport * Return: 24679442ed1STang Chen * Found address on success, 0 on failure. 2476ed311b2SBenjamin Herrenschmidt */ 24887029ee9SGrygorii Strashko phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 24987029ee9SGrygorii Strashko phys_addr_t align, phys_addr_t start, 250e1720feeSMike Rapoport phys_addr_t end, int nid, 251e1720feeSMike Rapoport enum memblock_flags flags) 252f7210e6cSTang Chen { 2530cfb8f0cSTang Chen phys_addr_t kernel_end, ret; 25479442ed1STang Chen 255f7210e6cSTang Chen /* pump up @end */ 256f7210e6cSTang Chen if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 257f7210e6cSTang Chen end = memblock.current_limit; 258f7210e6cSTang Chen 259f7210e6cSTang Chen /* avoid allocating the first page */ 260f7210e6cSTang Chen start = max_t(phys_addr_t, start, PAGE_SIZE); 261f7210e6cSTang Chen end = max(start, end); 26279442ed1STang Chen kernel_end = __pa_symbol(_end); 26379442ed1STang Chen 26479442ed1STang Chen /* 26579442ed1STang Chen * try bottom-up allocation only when bottom-up mode 26679442ed1STang Chen * is set and @end is above the kernel image. 26779442ed1STang Chen */ 26879442ed1STang Chen if (memblock_bottom_up() && end > kernel_end) { 26979442ed1STang Chen phys_addr_t bottom_up_start; 27079442ed1STang Chen 27179442ed1STang Chen /* make sure we will allocate above the kernel */ 27279442ed1STang Chen bottom_up_start = max(start, kernel_end); 27379442ed1STang Chen 27479442ed1STang Chen /* ok, try bottom-up allocation first */ 27579442ed1STang Chen ret = __memblock_find_range_bottom_up(bottom_up_start, end, 276fc6daaf9STony Luck size, align, nid, flags); 27779442ed1STang Chen if (ret) 27879442ed1STang Chen return ret; 27979442ed1STang Chen 28079442ed1STang Chen /* 28179442ed1STang Chen * we always limit bottom-up allocation above the kernel, 28279442ed1STang Chen * but top-down allocation doesn't have the limit, so 28379442ed1STang Chen * retrying top-down allocation may succeed when bottom-up 28479442ed1STang Chen * allocation failed. 28579442ed1STang Chen * 28679442ed1STang Chen * bottom-up allocation is expected to be fail very rarely, 28779442ed1STang Chen * so we use WARN_ONCE() here to see the stack trace if 28879442ed1STang Chen * fail happens. 28979442ed1STang Chen */ 290e3d301caSMichal Hocko WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), 291e3d301caSMichal Hocko "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); 29279442ed1STang Chen } 293f7210e6cSTang Chen 294fc6daaf9STony Luck return __memblock_find_range_top_down(start, end, size, align, nid, 295fc6daaf9STony Luck flags); 296f7210e6cSTang Chen } 2976ed311b2SBenjamin Herrenschmidt 2987bd0b0f0STejun Heo /** 2997bd0b0f0STejun Heo * memblock_find_in_range - find free area in given range 3007bd0b0f0STejun Heo * @start: start of candidate range 30147cec443SMike Rapoport * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 30247cec443SMike Rapoport * %MEMBLOCK_ALLOC_ACCESSIBLE 3037bd0b0f0STejun Heo * @size: size of free area to find 3047bd0b0f0STejun Heo * @align: alignment of free area to find 3057bd0b0f0STejun Heo * 3067bd0b0f0STejun Heo * Find @size free area aligned to @align in the specified range. 3077bd0b0f0STejun Heo * 30847cec443SMike Rapoport * Return: 30979442ed1STang Chen * Found address on success, 0 on failure. 3107bd0b0f0STejun Heo */ 3117bd0b0f0STejun Heo phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 3127bd0b0f0STejun Heo phys_addr_t end, phys_addr_t size, 3137bd0b0f0STejun Heo phys_addr_t align) 3147bd0b0f0STejun Heo { 315a3f5bafcSTony Luck phys_addr_t ret; 316e1720feeSMike Rapoport enum memblock_flags flags = choose_memblock_flags(); 317a3f5bafcSTony Luck 318a3f5bafcSTony Luck again: 319a3f5bafcSTony Luck ret = memblock_find_in_range_node(size, align, start, end, 320a3f5bafcSTony Luck NUMA_NO_NODE, flags); 321a3f5bafcSTony Luck 322a3f5bafcSTony Luck if (!ret && (flags & MEMBLOCK_MIRROR)) { 323a3f5bafcSTony Luck pr_warn("Could not allocate %pap bytes of mirrored memory\n", 324a3f5bafcSTony Luck &size); 325a3f5bafcSTony Luck flags &= ~MEMBLOCK_MIRROR; 326a3f5bafcSTony Luck goto again; 327a3f5bafcSTony Luck } 328a3f5bafcSTony Luck 329a3f5bafcSTony Luck return ret; 3307bd0b0f0STejun Heo } 3317bd0b0f0STejun Heo 33210d06439SYinghai Lu static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 33395f72d1eSYinghai Lu { 3341440c4e2STejun Heo type->total_size -= type->regions[r].size; 3357c0caeb8STejun Heo memmove(&type->regions[r], &type->regions[r + 1], 3367c0caeb8STejun Heo (type->cnt - (r + 1)) * sizeof(type->regions[r])); 337e3239ff9SBenjamin Herrenschmidt type->cnt--; 33895f72d1eSYinghai Lu 3398f7a6605SBenjamin Herrenschmidt /* Special case for empty arrays */ 3408f7a6605SBenjamin Herrenschmidt if (type->cnt == 0) { 3411440c4e2STejun Heo WARN_ON(type->total_size != 0); 3428f7a6605SBenjamin Herrenschmidt type->cnt = 1; 3438f7a6605SBenjamin Herrenschmidt type->regions[0].base = 0; 3448f7a6605SBenjamin Herrenschmidt type->regions[0].size = 0; 34566a20757STang Chen type->regions[0].flags = 0; 3467c0caeb8STejun Heo memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 3478f7a6605SBenjamin Herrenschmidt } 34895f72d1eSYinghai Lu } 34995f72d1eSYinghai Lu 350354f17e1SPhilipp Hachtmann #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK 3513010f876SPavel Tatashin /** 35247cec443SMike Rapoport * memblock_discard - discard memory and reserved arrays if they were allocated 3533010f876SPavel Tatashin */ 3543010f876SPavel Tatashin void __init memblock_discard(void) 35529f67386SYinghai Lu { 3563010f876SPavel Tatashin phys_addr_t addr, size; 35729f67386SYinghai Lu 3583010f876SPavel Tatashin if (memblock.reserved.regions != memblock_reserved_init_regions) { 3593010f876SPavel Tatashin addr = __pa(memblock.reserved.regions); 3603010f876SPavel Tatashin size = PAGE_ALIGN(sizeof(struct memblock_region) * 36129f67386SYinghai Lu memblock.reserved.max); 3623010f876SPavel Tatashin __memblock_free_late(addr, size); 36329f67386SYinghai Lu } 36429f67386SYinghai Lu 36591b540f9SPavel Tatashin if (memblock.memory.regions != memblock_memory_init_regions) { 3663010f876SPavel Tatashin addr = __pa(memblock.memory.regions); 3673010f876SPavel Tatashin size = PAGE_ALIGN(sizeof(struct memblock_region) * 3685e270e25SPhilipp Hachtmann memblock.memory.max); 3693010f876SPavel Tatashin __memblock_free_late(addr, size); 3705e270e25SPhilipp Hachtmann } 3713010f876SPavel Tatashin } 3725e270e25SPhilipp Hachtmann #endif 3735e270e25SPhilipp Hachtmann 37448c3b583SGreg Pearson /** 37548c3b583SGreg Pearson * memblock_double_array - double the size of the memblock regions array 37648c3b583SGreg Pearson * @type: memblock type of the regions array being doubled 37748c3b583SGreg Pearson * @new_area_start: starting address of memory range to avoid overlap with 37848c3b583SGreg Pearson * @new_area_size: size of memory range to avoid overlap with 37948c3b583SGreg Pearson * 38048c3b583SGreg Pearson * Double the size of the @type regions array. If memblock is being used to 38148c3b583SGreg Pearson * allocate memory for a new reserved regions array and there is a previously 38248c3b583SGreg Pearson * allocated memory range [@new_area_start, @new_area_start + @new_area_size] 38348c3b583SGreg Pearson * waiting to be reserved, ensure the memory used by the new array does 38448c3b583SGreg Pearson * not overlap. 38548c3b583SGreg Pearson * 38647cec443SMike Rapoport * Return: 38748c3b583SGreg Pearson * 0 on success, -1 on failure. 38848c3b583SGreg Pearson */ 38948c3b583SGreg Pearson static int __init_memblock memblock_double_array(struct memblock_type *type, 39048c3b583SGreg Pearson phys_addr_t new_area_start, 39148c3b583SGreg Pearson phys_addr_t new_area_size) 392142b45a7SBenjamin Herrenschmidt { 393142b45a7SBenjamin Herrenschmidt struct memblock_region *new_array, *old_array; 39429f67386SYinghai Lu phys_addr_t old_alloc_size, new_alloc_size; 395*a36aab89SMike Rapoport phys_addr_t old_size, new_size, addr, new_end; 396142b45a7SBenjamin Herrenschmidt int use_slab = slab_is_available(); 397181eb394SGavin Shan int *in_slab; 398142b45a7SBenjamin Herrenschmidt 399142b45a7SBenjamin Herrenschmidt /* We don't allow resizing until we know about the reserved regions 400142b45a7SBenjamin Herrenschmidt * of memory that aren't suitable for allocation 401142b45a7SBenjamin Herrenschmidt */ 402142b45a7SBenjamin Herrenschmidt if (!memblock_can_resize) 403142b45a7SBenjamin Herrenschmidt return -1; 404142b45a7SBenjamin Herrenschmidt 405142b45a7SBenjamin Herrenschmidt /* Calculate new doubled size */ 406142b45a7SBenjamin Herrenschmidt old_size = type->max * sizeof(struct memblock_region); 407142b45a7SBenjamin Herrenschmidt new_size = old_size << 1; 40829f67386SYinghai Lu /* 40929f67386SYinghai Lu * We need to allocated new one align to PAGE_SIZE, 41029f67386SYinghai Lu * so we can free them completely later. 41129f67386SYinghai Lu */ 41229f67386SYinghai Lu old_alloc_size = PAGE_ALIGN(old_size); 41329f67386SYinghai Lu new_alloc_size = PAGE_ALIGN(new_size); 414142b45a7SBenjamin Herrenschmidt 415181eb394SGavin Shan /* Retrieve the slab flag */ 416181eb394SGavin Shan if (type == &memblock.memory) 417181eb394SGavin Shan in_slab = &memblock_memory_in_slab; 418181eb394SGavin Shan else 419181eb394SGavin Shan in_slab = &memblock_reserved_in_slab; 420181eb394SGavin Shan 421142b45a7SBenjamin Herrenschmidt /* Try to find some space for it. 422142b45a7SBenjamin Herrenschmidt * 423142b45a7SBenjamin Herrenschmidt * WARNING: We assume that either slab_is_available() and we use it or 424fd07383bSAndrew Morton * we use MEMBLOCK for allocations. That means that this is unsafe to 425fd07383bSAndrew Morton * use when bootmem is currently active (unless bootmem itself is 426fd07383bSAndrew Morton * implemented on top of MEMBLOCK which isn't the case yet) 427142b45a7SBenjamin Herrenschmidt * 428142b45a7SBenjamin Herrenschmidt * This should however not be an issue for now, as we currently only 429fd07383bSAndrew Morton * call into MEMBLOCK while it's still active, or much later when slab 430fd07383bSAndrew Morton * is active for memory hotplug operations 431142b45a7SBenjamin Herrenschmidt */ 432142b45a7SBenjamin Herrenschmidt if (use_slab) { 433142b45a7SBenjamin Herrenschmidt new_array = kmalloc(new_size, GFP_KERNEL); 4341f5026a7STejun Heo addr = new_array ? __pa(new_array) : 0; 4354e2f0775SGavin Shan } else { 43648c3b583SGreg Pearson /* only exclude range when trying to double reserved.regions */ 43748c3b583SGreg Pearson if (type != &memblock.reserved) 43848c3b583SGreg Pearson new_area_start = new_area_size = 0; 43948c3b583SGreg Pearson 44048c3b583SGreg Pearson addr = memblock_find_in_range(new_area_start + new_area_size, 44148c3b583SGreg Pearson memblock.current_limit, 44229f67386SYinghai Lu new_alloc_size, PAGE_SIZE); 44348c3b583SGreg Pearson if (!addr && new_area_size) 44448c3b583SGreg Pearson addr = memblock_find_in_range(0, 44548c3b583SGreg Pearson min(new_area_start, memblock.current_limit), 44629f67386SYinghai Lu new_alloc_size, PAGE_SIZE); 44748c3b583SGreg Pearson 44815674868SSachin Kamat new_array = addr ? __va(addr) : NULL; 4494e2f0775SGavin Shan } 4501f5026a7STejun Heo if (!addr) { 451142b45a7SBenjamin Herrenschmidt pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 4520262d9c8SHeiko Carstens type->name, type->max, type->max * 2); 453142b45a7SBenjamin Herrenschmidt return -1; 454142b45a7SBenjamin Herrenschmidt } 455142b45a7SBenjamin Herrenschmidt 456*a36aab89SMike Rapoport new_end = addr + new_size - 1; 457*a36aab89SMike Rapoport memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]", 458*a36aab89SMike Rapoport type->name, type->max * 2, &addr, &new_end); 459ea9e4376SYinghai Lu 460fd07383bSAndrew Morton /* 461fd07383bSAndrew Morton * Found space, we now need to move the array over before we add the 462fd07383bSAndrew Morton * reserved region since it may be our reserved array itself that is 463fd07383bSAndrew Morton * full. 464142b45a7SBenjamin Herrenschmidt */ 465142b45a7SBenjamin Herrenschmidt memcpy(new_array, type->regions, old_size); 466142b45a7SBenjamin Herrenschmidt memset(new_array + type->max, 0, old_size); 467142b45a7SBenjamin Herrenschmidt old_array = type->regions; 468142b45a7SBenjamin Herrenschmidt type->regions = new_array; 469142b45a7SBenjamin Herrenschmidt type->max <<= 1; 470142b45a7SBenjamin Herrenschmidt 471fd07383bSAndrew Morton /* Free old array. We needn't free it if the array is the static one */ 472181eb394SGavin Shan if (*in_slab) 473181eb394SGavin Shan kfree(old_array); 474181eb394SGavin Shan else if (old_array != memblock_memory_init_regions && 475142b45a7SBenjamin Herrenschmidt old_array != memblock_reserved_init_regions) 47629f67386SYinghai Lu memblock_free(__pa(old_array), old_alloc_size); 477142b45a7SBenjamin Herrenschmidt 478fd07383bSAndrew Morton /* 479fd07383bSAndrew Morton * Reserve the new array if that comes from the memblock. Otherwise, we 480fd07383bSAndrew Morton * needn't do it 481181eb394SGavin Shan */ 482181eb394SGavin Shan if (!use_slab) 48329f67386SYinghai Lu BUG_ON(memblock_reserve(addr, new_alloc_size)); 484181eb394SGavin Shan 485181eb394SGavin Shan /* Update slab flag */ 486181eb394SGavin Shan *in_slab = use_slab; 487181eb394SGavin Shan 488142b45a7SBenjamin Herrenschmidt return 0; 489142b45a7SBenjamin Herrenschmidt } 490142b45a7SBenjamin Herrenschmidt 491784656f9STejun Heo /** 492784656f9STejun Heo * memblock_merge_regions - merge neighboring compatible regions 493784656f9STejun Heo * @type: memblock type to scan 494784656f9STejun Heo * 495784656f9STejun Heo * Scan @type and merge neighboring compatible regions. 496784656f9STejun Heo */ 497784656f9STejun Heo static void __init_memblock memblock_merge_regions(struct memblock_type *type) 498784656f9STejun Heo { 499784656f9STejun Heo int i = 0; 500784656f9STejun Heo 501784656f9STejun Heo /* cnt never goes below 1 */ 502784656f9STejun Heo while (i < type->cnt - 1) { 503784656f9STejun Heo struct memblock_region *this = &type->regions[i]; 504784656f9STejun Heo struct memblock_region *next = &type->regions[i + 1]; 505784656f9STejun Heo 5067c0caeb8STejun Heo if (this->base + this->size != next->base || 5077c0caeb8STejun Heo memblock_get_region_node(this) != 50866a20757STang Chen memblock_get_region_node(next) || 50966a20757STang Chen this->flags != next->flags) { 510784656f9STejun Heo BUG_ON(this->base + this->size > next->base); 511784656f9STejun Heo i++; 512784656f9STejun Heo continue; 513784656f9STejun Heo } 514784656f9STejun Heo 515784656f9STejun Heo this->size += next->size; 516c0232ae8SLin Feng /* move forward from next + 1, index of which is i + 2 */ 517c0232ae8SLin Feng memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); 518784656f9STejun Heo type->cnt--; 519784656f9STejun Heo } 520784656f9STejun Heo } 521784656f9STejun Heo 522784656f9STejun Heo /** 523784656f9STejun Heo * memblock_insert_region - insert new memblock region 524784656f9STejun Heo * @type: memblock type to insert into 525784656f9STejun Heo * @idx: index for the insertion point 526784656f9STejun Heo * @base: base address of the new region 527784656f9STejun Heo * @size: size of the new region 528209ff86dSTang Chen * @nid: node id of the new region 52966a20757STang Chen * @flags: flags of the new region 530784656f9STejun Heo * 531784656f9STejun Heo * Insert new memblock region [@base, @base + @size) into @type at @idx. 532412d0008SAlexander Kuleshov * @type must already have extra room to accommodate the new region. 533784656f9STejun Heo */ 534784656f9STejun Heo static void __init_memblock memblock_insert_region(struct memblock_type *type, 535784656f9STejun Heo int idx, phys_addr_t base, 53666a20757STang Chen phys_addr_t size, 537e1720feeSMike Rapoport int nid, 538e1720feeSMike Rapoport enum memblock_flags flags) 539784656f9STejun Heo { 540784656f9STejun Heo struct memblock_region *rgn = &type->regions[idx]; 541784656f9STejun Heo 542784656f9STejun Heo BUG_ON(type->cnt >= type->max); 543784656f9STejun Heo memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 544784656f9STejun Heo rgn->base = base; 545784656f9STejun Heo rgn->size = size; 54666a20757STang Chen rgn->flags = flags; 5477c0caeb8STejun Heo memblock_set_region_node(rgn, nid); 548784656f9STejun Heo type->cnt++; 5491440c4e2STejun Heo type->total_size += size; 550784656f9STejun Heo } 551784656f9STejun Heo 552784656f9STejun Heo /** 553f1af9d3aSPhilipp Hachtmann * memblock_add_range - add new memblock region 554784656f9STejun Heo * @type: memblock type to add new region into 555784656f9STejun Heo * @base: base address of the new region 556784656f9STejun Heo * @size: size of the new region 5577fb0bc3fSTejun Heo * @nid: nid of the new region 55866a20757STang Chen * @flags: flags of the new region 559784656f9STejun Heo * 560784656f9STejun Heo * Add new memblock region [@base, @base + @size) into @type. The new region 561784656f9STejun Heo * is allowed to overlap with existing ones - overlaps don't affect already 562784656f9STejun Heo * existing regions. @type is guaranteed to be minimal (all neighbouring 563784656f9STejun Heo * compatible regions are merged) after the addition. 564784656f9STejun Heo * 56547cec443SMike Rapoport * Return: 566784656f9STejun Heo * 0 on success, -errno on failure. 567784656f9STejun Heo */ 568f1af9d3aSPhilipp Hachtmann int __init_memblock memblock_add_range(struct memblock_type *type, 56966a20757STang Chen phys_addr_t base, phys_addr_t size, 570e1720feeSMike Rapoport int nid, enum memblock_flags flags) 57195f72d1eSYinghai Lu { 572784656f9STejun Heo bool insert = false; 573eb18f1b5STejun Heo phys_addr_t obase = base; 574eb18f1b5STejun Heo phys_addr_t end = base + memblock_cap_size(base, &size); 5758c9c1701SAlexander Kuleshov int idx, nr_new; 5768c9c1701SAlexander Kuleshov struct memblock_region *rgn; 57795f72d1eSYinghai Lu 578b3dc627cSTejun Heo if (!size) 579b3dc627cSTejun Heo return 0; 580b3dc627cSTejun Heo 581784656f9STejun Heo /* special case for empty array */ 582784656f9STejun Heo if (type->regions[0].size == 0) { 5831440c4e2STejun Heo WARN_ON(type->cnt != 1 || type->total_size); 584784656f9STejun Heo type->regions[0].base = base; 585784656f9STejun Heo type->regions[0].size = size; 58666a20757STang Chen type->regions[0].flags = flags; 5877fb0bc3fSTejun Heo memblock_set_region_node(&type->regions[0], nid); 5881440c4e2STejun Heo type->total_size = size; 589784656f9STejun Heo return 0; 590784656f9STejun Heo } 591784656f9STejun Heo repeat: 592784656f9STejun Heo /* 593784656f9STejun Heo * The following is executed twice. Once with %false @insert and 594784656f9STejun Heo * then with %true. The first counts the number of regions needed 595412d0008SAlexander Kuleshov * to accommodate the new area. The second actually inserts them. 596784656f9STejun Heo */ 597784656f9STejun Heo base = obase; 598784656f9STejun Heo nr_new = 0; 599784656f9STejun Heo 60066e8b438SGioh Kim for_each_memblock_type(idx, type, rgn) { 601784656f9STejun Heo phys_addr_t rbase = rgn->base; 602784656f9STejun Heo phys_addr_t rend = rbase + rgn->size; 6038f7a6605SBenjamin Herrenschmidt 604784656f9STejun Heo if (rbase >= end) 6058f7a6605SBenjamin Herrenschmidt break; 606784656f9STejun Heo if (rend <= base) 607784656f9STejun Heo continue; 608784656f9STejun Heo /* 609784656f9STejun Heo * @rgn overlaps. If it separates the lower part of new 610784656f9STejun Heo * area, insert that portion. 6118f7a6605SBenjamin Herrenschmidt */ 612784656f9STejun Heo if (rbase > base) { 613c0a29498SWei Yang #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 614c0a29498SWei Yang WARN_ON(nid != memblock_get_region_node(rgn)); 615c0a29498SWei Yang #endif 6164fcab5f4SWei Yang WARN_ON(flags != rgn->flags); 617784656f9STejun Heo nr_new++; 618784656f9STejun Heo if (insert) 6198c9c1701SAlexander Kuleshov memblock_insert_region(type, idx++, base, 62066a20757STang Chen rbase - base, nid, 62166a20757STang Chen flags); 622784656f9STejun Heo } 623784656f9STejun Heo /* area below @rend is dealt with, forget about it */ 624784656f9STejun Heo base = min(rend, end); 6258f7a6605SBenjamin Herrenschmidt } 6268f7a6605SBenjamin Herrenschmidt 627784656f9STejun Heo /* insert the remaining portion */ 628784656f9STejun Heo if (base < end) { 629784656f9STejun Heo nr_new++; 630784656f9STejun Heo if (insert) 6318c9c1701SAlexander Kuleshov memblock_insert_region(type, idx, base, end - base, 63266a20757STang Chen nid, flags); 6338f7a6605SBenjamin Herrenschmidt } 6348f7a6605SBenjamin Herrenschmidt 635ef3cc4dbSnimisolo if (!nr_new) 636ef3cc4dbSnimisolo return 0; 637ef3cc4dbSnimisolo 638784656f9STejun Heo /* 639784656f9STejun Heo * If this was the first round, resize array and repeat for actual 640784656f9STejun Heo * insertions; otherwise, merge and return. 6418f7a6605SBenjamin Herrenschmidt */ 642784656f9STejun Heo if (!insert) { 643784656f9STejun Heo while (type->cnt + nr_new > type->max) 64448c3b583SGreg Pearson if (memblock_double_array(type, obase, size) < 0) 645784656f9STejun Heo return -ENOMEM; 646784656f9STejun Heo insert = true; 647784656f9STejun Heo goto repeat; 64895f72d1eSYinghai Lu } else { 649784656f9STejun Heo memblock_merge_regions(type); 65095f72d1eSYinghai Lu return 0; 65195f72d1eSYinghai Lu } 652784656f9STejun Heo } 65395f72d1eSYinghai Lu 65448a833ccSMike Rapoport /** 65548a833ccSMike Rapoport * memblock_add_node - add new memblock region within a NUMA node 65648a833ccSMike Rapoport * @base: base address of the new region 65748a833ccSMike Rapoport * @size: size of the new region 65848a833ccSMike Rapoport * @nid: nid of the new region 65948a833ccSMike Rapoport * 66048a833ccSMike Rapoport * Add new memblock region [@base, @base + @size) to the "memory" 66148a833ccSMike Rapoport * type. See memblock_add_range() description for mode details 66248a833ccSMike Rapoport * 66348a833ccSMike Rapoport * Return: 66448a833ccSMike Rapoport * 0 on success, -errno on failure. 66548a833ccSMike Rapoport */ 6667fb0bc3fSTejun Heo int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 6677fb0bc3fSTejun Heo int nid) 6687fb0bc3fSTejun Heo { 669f1af9d3aSPhilipp Hachtmann return memblock_add_range(&memblock.memory, base, size, nid, 0); 6707fb0bc3fSTejun Heo } 6717fb0bc3fSTejun Heo 67248a833ccSMike Rapoport /** 67348a833ccSMike Rapoport * memblock_add - add new memblock region 67448a833ccSMike Rapoport * @base: base address of the new region 67548a833ccSMike Rapoport * @size: size of the new region 67648a833ccSMike Rapoport * 67748a833ccSMike Rapoport * Add new memblock region [@base, @base + @size) to the "memory" 67848a833ccSMike Rapoport * type. See memblock_add_range() description for mode details 67948a833ccSMike Rapoport * 68048a833ccSMike Rapoport * Return: 68148a833ccSMike Rapoport * 0 on success, -errno on failure. 68248a833ccSMike Rapoport */ 683f705ac4bSAlexander Kuleshov int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 6846a4055bcSAlexander Kuleshov { 6855d63f81cSMiles Chen phys_addr_t end = base + size - 1; 6865d63f81cSMiles Chen 6875d63f81cSMiles Chen memblock_dbg("memblock_add: [%pa-%pa] %pF\n", 6885d63f81cSMiles Chen &base, &end, (void *)_RET_IP_); 6896a4055bcSAlexander Kuleshov 690f705ac4bSAlexander Kuleshov return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); 69195f72d1eSYinghai Lu } 69295f72d1eSYinghai Lu 6936a9ceb31STejun Heo /** 6946a9ceb31STejun Heo * memblock_isolate_range - isolate given range into disjoint memblocks 6956a9ceb31STejun Heo * @type: memblock type to isolate range for 6966a9ceb31STejun Heo * @base: base of range to isolate 6976a9ceb31STejun Heo * @size: size of range to isolate 6986a9ceb31STejun Heo * @start_rgn: out parameter for the start of isolated region 6996a9ceb31STejun Heo * @end_rgn: out parameter for the end of isolated region 7006a9ceb31STejun Heo * 7016a9ceb31STejun Heo * Walk @type and ensure that regions don't cross the boundaries defined by 7026a9ceb31STejun Heo * [@base, @base + @size). Crossing regions are split at the boundaries, 7036a9ceb31STejun Heo * which may create at most two more regions. The index of the first 7046a9ceb31STejun Heo * region inside the range is returned in *@start_rgn and end in *@end_rgn. 7056a9ceb31STejun Heo * 70647cec443SMike Rapoport * Return: 7076a9ceb31STejun Heo * 0 on success, -errno on failure. 7086a9ceb31STejun Heo */ 7096a9ceb31STejun Heo static int __init_memblock memblock_isolate_range(struct memblock_type *type, 7106a9ceb31STejun Heo phys_addr_t base, phys_addr_t size, 7116a9ceb31STejun Heo int *start_rgn, int *end_rgn) 7126a9ceb31STejun Heo { 713eb18f1b5STejun Heo phys_addr_t end = base + memblock_cap_size(base, &size); 7148c9c1701SAlexander Kuleshov int idx; 7158c9c1701SAlexander Kuleshov struct memblock_region *rgn; 7166a9ceb31STejun Heo 7176a9ceb31STejun Heo *start_rgn = *end_rgn = 0; 7186a9ceb31STejun Heo 719b3dc627cSTejun Heo if (!size) 720b3dc627cSTejun Heo return 0; 721b3dc627cSTejun Heo 7226a9ceb31STejun Heo /* we'll create at most two more regions */ 7236a9ceb31STejun Heo while (type->cnt + 2 > type->max) 72448c3b583SGreg Pearson if (memblock_double_array(type, base, size) < 0) 7256a9ceb31STejun Heo return -ENOMEM; 7266a9ceb31STejun Heo 72766e8b438SGioh Kim for_each_memblock_type(idx, type, rgn) { 7286a9ceb31STejun Heo phys_addr_t rbase = rgn->base; 7296a9ceb31STejun Heo phys_addr_t rend = rbase + rgn->size; 7306a9ceb31STejun Heo 7316a9ceb31STejun Heo if (rbase >= end) 7326a9ceb31STejun Heo break; 7336a9ceb31STejun Heo if (rend <= base) 7346a9ceb31STejun Heo continue; 7356a9ceb31STejun Heo 7366a9ceb31STejun Heo if (rbase < base) { 7376a9ceb31STejun Heo /* 7386a9ceb31STejun Heo * @rgn intersects from below. Split and continue 7396a9ceb31STejun Heo * to process the next region - the new top half. 7406a9ceb31STejun Heo */ 7416a9ceb31STejun Heo rgn->base = base; 7421440c4e2STejun Heo rgn->size -= base - rbase; 7431440c4e2STejun Heo type->total_size -= base - rbase; 7448c9c1701SAlexander Kuleshov memblock_insert_region(type, idx, rbase, base - rbase, 74566a20757STang Chen memblock_get_region_node(rgn), 74666a20757STang Chen rgn->flags); 7476a9ceb31STejun Heo } else if (rend > end) { 7486a9ceb31STejun Heo /* 7496a9ceb31STejun Heo * @rgn intersects from above. Split and redo the 7506a9ceb31STejun Heo * current region - the new bottom half. 7516a9ceb31STejun Heo */ 7526a9ceb31STejun Heo rgn->base = end; 7531440c4e2STejun Heo rgn->size -= end - rbase; 7541440c4e2STejun Heo type->total_size -= end - rbase; 7558c9c1701SAlexander Kuleshov memblock_insert_region(type, idx--, rbase, end - rbase, 75666a20757STang Chen memblock_get_region_node(rgn), 75766a20757STang Chen rgn->flags); 7586a9ceb31STejun Heo } else { 7596a9ceb31STejun Heo /* @rgn is fully contained, record it */ 7606a9ceb31STejun Heo if (!*end_rgn) 7618c9c1701SAlexander Kuleshov *start_rgn = idx; 7628c9c1701SAlexander Kuleshov *end_rgn = idx + 1; 7636a9ceb31STejun Heo } 7646a9ceb31STejun Heo } 7656a9ceb31STejun Heo 7666a9ceb31STejun Heo return 0; 7676a9ceb31STejun Heo } 7686a9ceb31STejun Heo 76935bd16a2SAlexander Kuleshov static int __init_memblock memblock_remove_range(struct memblock_type *type, 7708f7a6605SBenjamin Herrenschmidt phys_addr_t base, phys_addr_t size) 77195f72d1eSYinghai Lu { 77271936180STejun Heo int start_rgn, end_rgn; 77371936180STejun Heo int i, ret; 77495f72d1eSYinghai Lu 77571936180STejun Heo ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 77671936180STejun Heo if (ret) 77771936180STejun Heo return ret; 77895f72d1eSYinghai Lu 77971936180STejun Heo for (i = end_rgn - 1; i >= start_rgn; i--) 78071936180STejun Heo memblock_remove_region(type, i); 78195f72d1eSYinghai Lu return 0; 78295f72d1eSYinghai Lu } 78395f72d1eSYinghai Lu 784581adcbeSTejun Heo int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 78595f72d1eSYinghai Lu { 78625cf23d7SMinchan Kim phys_addr_t end = base + size - 1; 78725cf23d7SMinchan Kim 78825cf23d7SMinchan Kim memblock_dbg("memblock_remove: [%pa-%pa] %pS\n", 78925cf23d7SMinchan Kim &base, &end, (void *)_RET_IP_); 79025cf23d7SMinchan Kim 791f1af9d3aSPhilipp Hachtmann return memblock_remove_range(&memblock.memory, base, size); 79295f72d1eSYinghai Lu } 79395f72d1eSYinghai Lu 794f1af9d3aSPhilipp Hachtmann 795581adcbeSTejun Heo int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 79695f72d1eSYinghai Lu { 7975d63f81cSMiles Chen phys_addr_t end = base + size - 1; 7985d63f81cSMiles Chen 7995d63f81cSMiles Chen memblock_dbg(" memblock_free: [%pa-%pa] %pF\n", 8005d63f81cSMiles Chen &base, &end, (void *)_RET_IP_); 80124aa0788STejun Heo 8029099daedSCatalin Marinas kmemleak_free_part_phys(base, size); 803f1af9d3aSPhilipp Hachtmann return memblock_remove_range(&memblock.reserved, base, size); 80495f72d1eSYinghai Lu } 80595f72d1eSYinghai Lu 806f705ac4bSAlexander Kuleshov int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 80795f72d1eSYinghai Lu { 8085d63f81cSMiles Chen phys_addr_t end = base + size - 1; 8095d63f81cSMiles Chen 8105d63f81cSMiles Chen memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n", 8115d63f81cSMiles Chen &base, &end, (void *)_RET_IP_); 81295f72d1eSYinghai Lu 813f705ac4bSAlexander Kuleshov return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); 81495f72d1eSYinghai Lu } 81595f72d1eSYinghai Lu 81635fd0808STejun Heo /** 81747cec443SMike Rapoport * memblock_setclr_flag - set or clear flag for a memory region 81847cec443SMike Rapoport * @base: base address of the region 81947cec443SMike Rapoport * @size: size of the region 82047cec443SMike Rapoport * @set: set or clear the flag 82147cec443SMike Rapoport * @flag: the flag to udpate 82266b16edfSTang Chen * 8234308ce17STony Luck * This function isolates region [@base, @base + @size), and sets/clears flag 82466b16edfSTang Chen * 82547cec443SMike Rapoport * Return: 0 on success, -errno on failure. 82666b16edfSTang Chen */ 8274308ce17STony Luck static int __init_memblock memblock_setclr_flag(phys_addr_t base, 8284308ce17STony Luck phys_addr_t size, int set, int flag) 82966b16edfSTang Chen { 83066b16edfSTang Chen struct memblock_type *type = &memblock.memory; 83166b16edfSTang Chen int i, ret, start_rgn, end_rgn; 83266b16edfSTang Chen 83366b16edfSTang Chen ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 83466b16edfSTang Chen if (ret) 83566b16edfSTang Chen return ret; 83666b16edfSTang Chen 83766b16edfSTang Chen for (i = start_rgn; i < end_rgn; i++) 8384308ce17STony Luck if (set) 8394308ce17STony Luck memblock_set_region_flags(&type->regions[i], flag); 8404308ce17STony Luck else 8414308ce17STony Luck memblock_clear_region_flags(&type->regions[i], flag); 84266b16edfSTang Chen 84366b16edfSTang Chen memblock_merge_regions(type); 84466b16edfSTang Chen return 0; 84566b16edfSTang Chen } 84666b16edfSTang Chen 84766b16edfSTang Chen /** 8484308ce17STony Luck * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. 8494308ce17STony Luck * @base: the base phys addr of the region 8504308ce17STony Luck * @size: the size of the region 8514308ce17STony Luck * 85247cec443SMike Rapoport * Return: 0 on success, -errno on failure. 8534308ce17STony Luck */ 8544308ce17STony Luck int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) 8554308ce17STony Luck { 8564308ce17STony Luck return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); 8574308ce17STony Luck } 8584308ce17STony Luck 8594308ce17STony Luck /** 86066b16edfSTang Chen * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. 86166b16edfSTang Chen * @base: the base phys addr of the region 86266b16edfSTang Chen * @size: the size of the region 86366b16edfSTang Chen * 86447cec443SMike Rapoport * Return: 0 on success, -errno on failure. 86566b16edfSTang Chen */ 86666b16edfSTang Chen int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) 86766b16edfSTang Chen { 8684308ce17STony Luck return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); 86966b16edfSTang Chen } 87066b16edfSTang Chen 87166b16edfSTang Chen /** 872a3f5bafcSTony Luck * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. 873a3f5bafcSTony Luck * @base: the base phys addr of the region 874a3f5bafcSTony Luck * @size: the size of the region 875a3f5bafcSTony Luck * 87647cec443SMike Rapoport * Return: 0 on success, -errno on failure. 877a3f5bafcSTony Luck */ 878a3f5bafcSTony Luck int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) 879a3f5bafcSTony Luck { 880a3f5bafcSTony Luck system_has_some_mirror = true; 881a3f5bafcSTony Luck 882a3f5bafcSTony Luck return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); 883a3f5bafcSTony Luck } 884a3f5bafcSTony Luck 885bf3d3cc5SArd Biesheuvel /** 886bf3d3cc5SArd Biesheuvel * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. 887bf3d3cc5SArd Biesheuvel * @base: the base phys addr of the region 888bf3d3cc5SArd Biesheuvel * @size: the size of the region 889bf3d3cc5SArd Biesheuvel * 89047cec443SMike Rapoport * Return: 0 on success, -errno on failure. 891bf3d3cc5SArd Biesheuvel */ 892bf3d3cc5SArd Biesheuvel int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) 893bf3d3cc5SArd Biesheuvel { 894bf3d3cc5SArd Biesheuvel return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); 895bf3d3cc5SArd Biesheuvel } 896a3f5bafcSTony Luck 897a3f5bafcSTony Luck /** 8984c546b8aSAKASHI Takahiro * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region. 8994c546b8aSAKASHI Takahiro * @base: the base phys addr of the region 9004c546b8aSAKASHI Takahiro * @size: the size of the region 9014c546b8aSAKASHI Takahiro * 90247cec443SMike Rapoport * Return: 0 on success, -errno on failure. 9034c546b8aSAKASHI Takahiro */ 9044c546b8aSAKASHI Takahiro int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size) 9054c546b8aSAKASHI Takahiro { 9064c546b8aSAKASHI Takahiro return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP); 9074c546b8aSAKASHI Takahiro } 9084c546b8aSAKASHI Takahiro 9094c546b8aSAKASHI Takahiro /** 9108e7a7f86SRobin Holt * __next_reserved_mem_region - next function for for_each_reserved_region() 9118e7a7f86SRobin Holt * @idx: pointer to u64 loop variable 9128e7a7f86SRobin Holt * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL 9138e7a7f86SRobin Holt * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL 9148e7a7f86SRobin Holt * 9158e7a7f86SRobin Holt * Iterate over all reserved memory regions. 9168e7a7f86SRobin Holt */ 9178e7a7f86SRobin Holt void __init_memblock __next_reserved_mem_region(u64 *idx, 9188e7a7f86SRobin Holt phys_addr_t *out_start, 9198e7a7f86SRobin Holt phys_addr_t *out_end) 9208e7a7f86SRobin Holt { 921567d117bSAlexander Kuleshov struct memblock_type *type = &memblock.reserved; 9228e7a7f86SRobin Holt 923cd33a76bSRichard Leitner if (*idx < type->cnt) { 924567d117bSAlexander Kuleshov struct memblock_region *r = &type->regions[*idx]; 9258e7a7f86SRobin Holt phys_addr_t base = r->base; 9268e7a7f86SRobin Holt phys_addr_t size = r->size; 9278e7a7f86SRobin Holt 9288e7a7f86SRobin Holt if (out_start) 9298e7a7f86SRobin Holt *out_start = base; 9308e7a7f86SRobin Holt if (out_end) 9318e7a7f86SRobin Holt *out_end = base + size - 1; 9328e7a7f86SRobin Holt 9338e7a7f86SRobin Holt *idx += 1; 9348e7a7f86SRobin Holt return; 9358e7a7f86SRobin Holt } 9368e7a7f86SRobin Holt 9378e7a7f86SRobin Holt /* signal end of iteration */ 9388e7a7f86SRobin Holt *idx = ULLONG_MAX; 9398e7a7f86SRobin Holt } 9408e7a7f86SRobin Holt 9418e7a7f86SRobin Holt /** 942f1af9d3aSPhilipp Hachtmann * __next__mem_range - next function for for_each_free_mem_range() etc. 94335fd0808STejun Heo * @idx: pointer to u64 loop variable 944b1154233SGrygorii Strashko * @nid: node selector, %NUMA_NO_NODE for all nodes 945fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes 946f1af9d3aSPhilipp Hachtmann * @type_a: pointer to memblock_type from where the range is taken 947f1af9d3aSPhilipp Hachtmann * @type_b: pointer to memblock_type which excludes memory from being taken 948dad7557eSWanpeng Li * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 949dad7557eSWanpeng Li * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 950dad7557eSWanpeng Li * @out_nid: ptr to int for nid of the range, can be %NULL 95135fd0808STejun Heo * 952f1af9d3aSPhilipp Hachtmann * Find the first area from *@idx which matches @nid, fill the out 95335fd0808STejun Heo * parameters, and update *@idx for the next iteration. The lower 32bit of 954f1af9d3aSPhilipp Hachtmann * *@idx contains index into type_a and the upper 32bit indexes the 955f1af9d3aSPhilipp Hachtmann * areas before each region in type_b. For example, if type_b regions 95635fd0808STejun Heo * look like the following, 95735fd0808STejun Heo * 95835fd0808STejun Heo * 0:[0-16), 1:[32-48), 2:[128-130) 95935fd0808STejun Heo * 96035fd0808STejun Heo * The upper 32bit indexes the following regions. 96135fd0808STejun Heo * 96235fd0808STejun Heo * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 96335fd0808STejun Heo * 96435fd0808STejun Heo * As both region arrays are sorted, the function advances the two indices 96535fd0808STejun Heo * in lockstep and returns each intersection. 96635fd0808STejun Heo */ 967e1720feeSMike Rapoport void __init_memblock __next_mem_range(u64 *idx, int nid, 968e1720feeSMike Rapoport enum memblock_flags flags, 969f1af9d3aSPhilipp Hachtmann struct memblock_type *type_a, 970f1af9d3aSPhilipp Hachtmann struct memblock_type *type_b, 97135fd0808STejun Heo phys_addr_t *out_start, 97235fd0808STejun Heo phys_addr_t *out_end, int *out_nid) 97335fd0808STejun Heo { 974f1af9d3aSPhilipp Hachtmann int idx_a = *idx & 0xffffffff; 975f1af9d3aSPhilipp Hachtmann int idx_b = *idx >> 32; 976b1154233SGrygorii Strashko 977f1af9d3aSPhilipp Hachtmann if (WARN_ONCE(nid == MAX_NUMNODES, 978f1af9d3aSPhilipp Hachtmann "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 979560dca27SGrygorii Strashko nid = NUMA_NO_NODE; 98035fd0808STejun Heo 981f1af9d3aSPhilipp Hachtmann for (; idx_a < type_a->cnt; idx_a++) { 982f1af9d3aSPhilipp Hachtmann struct memblock_region *m = &type_a->regions[idx_a]; 983f1af9d3aSPhilipp Hachtmann 98435fd0808STejun Heo phys_addr_t m_start = m->base; 98535fd0808STejun Heo phys_addr_t m_end = m->base + m->size; 986f1af9d3aSPhilipp Hachtmann int m_nid = memblock_get_region_node(m); 98735fd0808STejun Heo 98835fd0808STejun Heo /* only memory regions are associated with nodes, check it */ 989f1af9d3aSPhilipp Hachtmann if (nid != NUMA_NO_NODE && nid != m_nid) 99035fd0808STejun Heo continue; 99135fd0808STejun Heo 9920a313a99SXishi Qiu /* skip hotpluggable memory regions if needed */ 9930a313a99SXishi Qiu if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 9940a313a99SXishi Qiu continue; 9950a313a99SXishi Qiu 996a3f5bafcSTony Luck /* if we want mirror memory skip non-mirror memory regions */ 997a3f5bafcSTony Luck if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 998a3f5bafcSTony Luck continue; 999a3f5bafcSTony Luck 1000bf3d3cc5SArd Biesheuvel /* skip nomap memory unless we were asked for it explicitly */ 1001bf3d3cc5SArd Biesheuvel if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) 1002bf3d3cc5SArd Biesheuvel continue; 1003bf3d3cc5SArd Biesheuvel 1004f1af9d3aSPhilipp Hachtmann if (!type_b) { 1005f1af9d3aSPhilipp Hachtmann if (out_start) 1006f1af9d3aSPhilipp Hachtmann *out_start = m_start; 1007f1af9d3aSPhilipp Hachtmann if (out_end) 1008f1af9d3aSPhilipp Hachtmann *out_end = m_end; 1009f1af9d3aSPhilipp Hachtmann if (out_nid) 1010f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 1011f1af9d3aSPhilipp Hachtmann idx_a++; 1012f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 1013f1af9d3aSPhilipp Hachtmann return; 1014f1af9d3aSPhilipp Hachtmann } 101535fd0808STejun Heo 1016f1af9d3aSPhilipp Hachtmann /* scan areas before each reservation */ 1017f1af9d3aSPhilipp Hachtmann for (; idx_b < type_b->cnt + 1; idx_b++) { 1018f1af9d3aSPhilipp Hachtmann struct memblock_region *r; 1019f1af9d3aSPhilipp Hachtmann phys_addr_t r_start; 1020f1af9d3aSPhilipp Hachtmann phys_addr_t r_end; 1021f1af9d3aSPhilipp Hachtmann 1022f1af9d3aSPhilipp Hachtmann r = &type_b->regions[idx_b]; 1023f1af9d3aSPhilipp Hachtmann r_start = idx_b ? r[-1].base + r[-1].size : 0; 1024f1af9d3aSPhilipp Hachtmann r_end = idx_b < type_b->cnt ? 10251c4bc43dSStefan Agner r->base : PHYS_ADDR_MAX; 1026f1af9d3aSPhilipp Hachtmann 1027f1af9d3aSPhilipp Hachtmann /* 1028f1af9d3aSPhilipp Hachtmann * if idx_b advanced past idx_a, 1029f1af9d3aSPhilipp Hachtmann * break out to advance idx_a 1030f1af9d3aSPhilipp Hachtmann */ 103135fd0808STejun Heo if (r_start >= m_end) 103235fd0808STejun Heo break; 103335fd0808STejun Heo /* if the two regions intersect, we're done */ 103435fd0808STejun Heo if (m_start < r_end) { 103535fd0808STejun Heo if (out_start) 1036f1af9d3aSPhilipp Hachtmann *out_start = 1037f1af9d3aSPhilipp Hachtmann max(m_start, r_start); 103835fd0808STejun Heo if (out_end) 103935fd0808STejun Heo *out_end = min(m_end, r_end); 104035fd0808STejun Heo if (out_nid) 1041f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 104235fd0808STejun Heo /* 1043f1af9d3aSPhilipp Hachtmann * The region which ends first is 1044f1af9d3aSPhilipp Hachtmann * advanced for the next iteration. 104535fd0808STejun Heo */ 104635fd0808STejun Heo if (m_end <= r_end) 1047f1af9d3aSPhilipp Hachtmann idx_a++; 104835fd0808STejun Heo else 1049f1af9d3aSPhilipp Hachtmann idx_b++; 1050f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 105135fd0808STejun Heo return; 105235fd0808STejun Heo } 105335fd0808STejun Heo } 105435fd0808STejun Heo } 105535fd0808STejun Heo 105635fd0808STejun Heo /* signal end of iteration */ 105735fd0808STejun Heo *idx = ULLONG_MAX; 105835fd0808STejun Heo } 105935fd0808STejun Heo 10607bd0b0f0STejun Heo /** 1061f1af9d3aSPhilipp Hachtmann * __next_mem_range_rev - generic next function for for_each_*_range_rev() 1062f1af9d3aSPhilipp Hachtmann * 10637bd0b0f0STejun Heo * @idx: pointer to u64 loop variable 1064ad5ea8cdSAlexander Kuleshov * @nid: node selector, %NUMA_NO_NODE for all nodes 1065fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes 1066f1af9d3aSPhilipp Hachtmann * @type_a: pointer to memblock_type from where the range is taken 1067f1af9d3aSPhilipp Hachtmann * @type_b: pointer to memblock_type which excludes memory from being taken 1068dad7557eSWanpeng Li * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 1069dad7557eSWanpeng Li * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 1070dad7557eSWanpeng Li * @out_nid: ptr to int for nid of the range, can be %NULL 10717bd0b0f0STejun Heo * 107247cec443SMike Rapoport * Finds the next range from type_a which is not marked as unsuitable 107347cec443SMike Rapoport * in type_b. 107447cec443SMike Rapoport * 1075f1af9d3aSPhilipp Hachtmann * Reverse of __next_mem_range(). 10767bd0b0f0STejun Heo */ 1077e1720feeSMike Rapoport void __init_memblock __next_mem_range_rev(u64 *idx, int nid, 1078e1720feeSMike Rapoport enum memblock_flags flags, 1079f1af9d3aSPhilipp Hachtmann struct memblock_type *type_a, 1080f1af9d3aSPhilipp Hachtmann struct memblock_type *type_b, 10817bd0b0f0STejun Heo phys_addr_t *out_start, 10827bd0b0f0STejun Heo phys_addr_t *out_end, int *out_nid) 10837bd0b0f0STejun Heo { 1084f1af9d3aSPhilipp Hachtmann int idx_a = *idx & 0xffffffff; 1085f1af9d3aSPhilipp Hachtmann int idx_b = *idx >> 32; 1086b1154233SGrygorii Strashko 1087560dca27SGrygorii Strashko if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1088560dca27SGrygorii Strashko nid = NUMA_NO_NODE; 10897bd0b0f0STejun Heo 10907bd0b0f0STejun Heo if (*idx == (u64)ULLONG_MAX) { 1091f1af9d3aSPhilipp Hachtmann idx_a = type_a->cnt - 1; 1092e47608abSzijun_hu if (type_b != NULL) 1093f1af9d3aSPhilipp Hachtmann idx_b = type_b->cnt; 1094e47608abSzijun_hu else 1095e47608abSzijun_hu idx_b = 0; 10967bd0b0f0STejun Heo } 10977bd0b0f0STejun Heo 1098f1af9d3aSPhilipp Hachtmann for (; idx_a >= 0; idx_a--) { 1099f1af9d3aSPhilipp Hachtmann struct memblock_region *m = &type_a->regions[idx_a]; 1100f1af9d3aSPhilipp Hachtmann 11017bd0b0f0STejun Heo phys_addr_t m_start = m->base; 11027bd0b0f0STejun Heo phys_addr_t m_end = m->base + m->size; 1103f1af9d3aSPhilipp Hachtmann int m_nid = memblock_get_region_node(m); 11047bd0b0f0STejun Heo 11057bd0b0f0STejun Heo /* only memory regions are associated with nodes, check it */ 1106f1af9d3aSPhilipp Hachtmann if (nid != NUMA_NO_NODE && nid != m_nid) 11077bd0b0f0STejun Heo continue; 11087bd0b0f0STejun Heo 110955ac590cSTang Chen /* skip hotpluggable memory regions if needed */ 111055ac590cSTang Chen if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 111155ac590cSTang Chen continue; 111255ac590cSTang Chen 1113a3f5bafcSTony Luck /* if we want mirror memory skip non-mirror memory regions */ 1114a3f5bafcSTony Luck if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 1115a3f5bafcSTony Luck continue; 1116a3f5bafcSTony Luck 1117bf3d3cc5SArd Biesheuvel /* skip nomap memory unless we were asked for it explicitly */ 1118bf3d3cc5SArd Biesheuvel if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) 1119bf3d3cc5SArd Biesheuvel continue; 1120bf3d3cc5SArd Biesheuvel 1121f1af9d3aSPhilipp Hachtmann if (!type_b) { 1122f1af9d3aSPhilipp Hachtmann if (out_start) 1123f1af9d3aSPhilipp Hachtmann *out_start = m_start; 1124f1af9d3aSPhilipp Hachtmann if (out_end) 1125f1af9d3aSPhilipp Hachtmann *out_end = m_end; 1126f1af9d3aSPhilipp Hachtmann if (out_nid) 1127f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 1128fb399b48Szijun_hu idx_a--; 1129f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 1130f1af9d3aSPhilipp Hachtmann return; 1131f1af9d3aSPhilipp Hachtmann } 11327bd0b0f0STejun Heo 1133f1af9d3aSPhilipp Hachtmann /* scan areas before each reservation */ 1134f1af9d3aSPhilipp Hachtmann for (; idx_b >= 0; idx_b--) { 1135f1af9d3aSPhilipp Hachtmann struct memblock_region *r; 1136f1af9d3aSPhilipp Hachtmann phys_addr_t r_start; 1137f1af9d3aSPhilipp Hachtmann phys_addr_t r_end; 1138f1af9d3aSPhilipp Hachtmann 1139f1af9d3aSPhilipp Hachtmann r = &type_b->regions[idx_b]; 1140f1af9d3aSPhilipp Hachtmann r_start = idx_b ? r[-1].base + r[-1].size : 0; 1141f1af9d3aSPhilipp Hachtmann r_end = idx_b < type_b->cnt ? 11421c4bc43dSStefan Agner r->base : PHYS_ADDR_MAX; 1143f1af9d3aSPhilipp Hachtmann /* 1144f1af9d3aSPhilipp Hachtmann * if idx_b advanced past idx_a, 1145f1af9d3aSPhilipp Hachtmann * break out to advance idx_a 1146f1af9d3aSPhilipp Hachtmann */ 1147f1af9d3aSPhilipp Hachtmann 11487bd0b0f0STejun Heo if (r_end <= m_start) 11497bd0b0f0STejun Heo break; 11507bd0b0f0STejun Heo /* if the two regions intersect, we're done */ 11517bd0b0f0STejun Heo if (m_end > r_start) { 11527bd0b0f0STejun Heo if (out_start) 11537bd0b0f0STejun Heo *out_start = max(m_start, r_start); 11547bd0b0f0STejun Heo if (out_end) 11557bd0b0f0STejun Heo *out_end = min(m_end, r_end); 11567bd0b0f0STejun Heo if (out_nid) 1157f1af9d3aSPhilipp Hachtmann *out_nid = m_nid; 11587bd0b0f0STejun Heo if (m_start >= r_start) 1159f1af9d3aSPhilipp Hachtmann idx_a--; 11607bd0b0f0STejun Heo else 1161f1af9d3aSPhilipp Hachtmann idx_b--; 1162f1af9d3aSPhilipp Hachtmann *idx = (u32)idx_a | (u64)idx_b << 32; 11637bd0b0f0STejun Heo return; 11647bd0b0f0STejun Heo } 11657bd0b0f0STejun Heo } 11667bd0b0f0STejun Heo } 1167f1af9d3aSPhilipp Hachtmann /* signal end of iteration */ 11687bd0b0f0STejun Heo *idx = ULLONG_MAX; 11697bd0b0f0STejun Heo } 11707bd0b0f0STejun Heo 11717c0caeb8STejun Heo #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 11727c0caeb8STejun Heo /* 11737c0caeb8STejun Heo * Common iterator interface used to define for_each_mem_range(). 11747c0caeb8STejun Heo */ 11757c0caeb8STejun Heo void __init_memblock __next_mem_pfn_range(int *idx, int nid, 11767c0caeb8STejun Heo unsigned long *out_start_pfn, 11777c0caeb8STejun Heo unsigned long *out_end_pfn, int *out_nid) 11787c0caeb8STejun Heo { 11797c0caeb8STejun Heo struct memblock_type *type = &memblock.memory; 11807c0caeb8STejun Heo struct memblock_region *r; 11817c0caeb8STejun Heo 11827c0caeb8STejun Heo while (++*idx < type->cnt) { 11837c0caeb8STejun Heo r = &type->regions[*idx]; 11847c0caeb8STejun Heo 11857c0caeb8STejun Heo if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 11867c0caeb8STejun Heo continue; 11877c0caeb8STejun Heo if (nid == MAX_NUMNODES || nid == r->nid) 11887c0caeb8STejun Heo break; 11897c0caeb8STejun Heo } 11907c0caeb8STejun Heo if (*idx >= type->cnt) { 11917c0caeb8STejun Heo *idx = -1; 11927c0caeb8STejun Heo return; 11937c0caeb8STejun Heo } 11947c0caeb8STejun Heo 11957c0caeb8STejun Heo if (out_start_pfn) 11967c0caeb8STejun Heo *out_start_pfn = PFN_UP(r->base); 11977c0caeb8STejun Heo if (out_end_pfn) 11987c0caeb8STejun Heo *out_end_pfn = PFN_DOWN(r->base + r->size); 11997c0caeb8STejun Heo if (out_nid) 12007c0caeb8STejun Heo *out_nid = r->nid; 12017c0caeb8STejun Heo } 12027c0caeb8STejun Heo 12037c0caeb8STejun Heo /** 12047c0caeb8STejun Heo * memblock_set_node - set node ID on memblock regions 12057c0caeb8STejun Heo * @base: base of area to set node ID for 12067c0caeb8STejun Heo * @size: size of area to set node ID for 1207e7e8de59STang Chen * @type: memblock type to set node ID for 12087c0caeb8STejun Heo * @nid: node ID to set 12097c0caeb8STejun Heo * 1210e7e8de59STang Chen * Set the nid of memblock @type regions in [@base, @base + @size) to @nid. 12117c0caeb8STejun Heo * Regions which cross the area boundaries are split as necessary. 12127c0caeb8STejun Heo * 121347cec443SMike Rapoport * Return: 12147c0caeb8STejun Heo * 0 on success, -errno on failure. 12157c0caeb8STejun Heo */ 12167c0caeb8STejun Heo int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 1217e7e8de59STang Chen struct memblock_type *type, int nid) 12187c0caeb8STejun Heo { 12196a9ceb31STejun Heo int start_rgn, end_rgn; 12206a9ceb31STejun Heo int i, ret; 12217c0caeb8STejun Heo 12226a9ceb31STejun Heo ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 12236a9ceb31STejun Heo if (ret) 12246a9ceb31STejun Heo return ret; 12257c0caeb8STejun Heo 12266a9ceb31STejun Heo for (i = start_rgn; i < end_rgn; i++) 1227e9d24ad3SWanpeng Li memblock_set_region_node(&type->regions[i], nid); 12287c0caeb8STejun Heo 12297c0caeb8STejun Heo memblock_merge_regions(type); 12307c0caeb8STejun Heo return 0; 12317c0caeb8STejun Heo } 12327c0caeb8STejun Heo #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 12337c0caeb8STejun Heo 12342bfc2862SAkinobu Mita static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 12352bfc2862SAkinobu Mita phys_addr_t align, phys_addr_t start, 1236e1720feeSMike Rapoport phys_addr_t end, int nid, 1237e1720feeSMike Rapoport enum memblock_flags flags) 123895f72d1eSYinghai Lu { 12396ed311b2SBenjamin Herrenschmidt phys_addr_t found; 124095f72d1eSYinghai Lu 124179f40fabSGrygorii Strashko if (!align) 124279f40fabSGrygorii Strashko align = SMP_CACHE_BYTES; 124394f3d3afSVineet Gupta 1244fc6daaf9STony Luck found = memblock_find_in_range_node(size, align, start, end, nid, 1245fc6daaf9STony Luck flags); 1246aedf95eaSCatalin Marinas if (found && !memblock_reserve(found, size)) { 1247aedf95eaSCatalin Marinas /* 1248aedf95eaSCatalin Marinas * The min_count is set to 0 so that memblock allocations are 1249aedf95eaSCatalin Marinas * never reported as leaks. 1250aedf95eaSCatalin Marinas */ 12519099daedSCatalin Marinas kmemleak_alloc_phys(found, size, 0, 0); 12526ed311b2SBenjamin Herrenschmidt return found; 1253aedf95eaSCatalin Marinas } 12546ed311b2SBenjamin Herrenschmidt return 0; 125595f72d1eSYinghai Lu } 125695f72d1eSYinghai Lu 12572bfc2862SAkinobu Mita phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, 1258fc6daaf9STony Luck phys_addr_t start, phys_addr_t end, 1259e1720feeSMike Rapoport enum memblock_flags flags) 12602bfc2862SAkinobu Mita { 1261fc6daaf9STony Luck return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, 1262fc6daaf9STony Luck flags); 12632bfc2862SAkinobu Mita } 12642bfc2862SAkinobu Mita 1265b575454fSNicholas Piggin phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, 12662bfc2862SAkinobu Mita phys_addr_t align, phys_addr_t max_addr, 1267e1720feeSMike Rapoport int nid, enum memblock_flags flags) 12682bfc2862SAkinobu Mita { 1269fc6daaf9STony Luck return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags); 12702bfc2862SAkinobu Mita } 12712bfc2862SAkinobu Mita 12727bd0b0f0STejun Heo phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) 12737bd0b0f0STejun Heo { 1274e1720feeSMike Rapoport enum memblock_flags flags = choose_memblock_flags(); 1275a3f5bafcSTony Luck phys_addr_t ret; 1276a3f5bafcSTony Luck 1277a3f5bafcSTony Luck again: 1278a3f5bafcSTony Luck ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, 1279a3f5bafcSTony Luck nid, flags); 1280a3f5bafcSTony Luck 1281a3f5bafcSTony Luck if (!ret && (flags & MEMBLOCK_MIRROR)) { 1282a3f5bafcSTony Luck flags &= ~MEMBLOCK_MIRROR; 1283a3f5bafcSTony Luck goto again; 1284a3f5bafcSTony Luck } 1285a3f5bafcSTony Luck return ret; 12867bd0b0f0STejun Heo } 12877bd0b0f0STejun Heo 12887bd0b0f0STejun Heo phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 12897bd0b0f0STejun Heo { 1290fc6daaf9STony Luck return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE, 1291fc6daaf9STony Luck MEMBLOCK_NONE); 12927bd0b0f0STejun Heo } 12937bd0b0f0STejun Heo 12946ed311b2SBenjamin Herrenschmidt phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 129595f72d1eSYinghai Lu { 12966ed311b2SBenjamin Herrenschmidt phys_addr_t alloc; 12976ed311b2SBenjamin Herrenschmidt 12986ed311b2SBenjamin Herrenschmidt alloc = __memblock_alloc_base(size, align, max_addr); 12996ed311b2SBenjamin Herrenschmidt 13006ed311b2SBenjamin Herrenschmidt if (alloc == 0) 13015d63f81cSMiles Chen panic("ERROR: Failed to allocate %pa bytes below %pa.\n", 13025d63f81cSMiles Chen &size, &max_addr); 13036ed311b2SBenjamin Herrenschmidt 13046ed311b2SBenjamin Herrenschmidt return alloc; 130595f72d1eSYinghai Lu } 130695f72d1eSYinghai Lu 13076ed311b2SBenjamin Herrenschmidt phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) 130895f72d1eSYinghai Lu { 13096ed311b2SBenjamin Herrenschmidt return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 131095f72d1eSYinghai Lu } 131195f72d1eSYinghai Lu 13129d1e2492SBenjamin Herrenschmidt phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 13139d1e2492SBenjamin Herrenschmidt { 13149d1e2492SBenjamin Herrenschmidt phys_addr_t res = memblock_alloc_nid(size, align, nid); 13159d1e2492SBenjamin Herrenschmidt 13169d1e2492SBenjamin Herrenschmidt if (res) 13179d1e2492SBenjamin Herrenschmidt return res; 131815fb0972STejun Heo return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 131995f72d1eSYinghai Lu } 132095f72d1eSYinghai Lu 132119373672SMathieu Malaterre #if defined(CONFIG_NO_BOOTMEM) 132226f09e9bSSantosh Shilimkar /** 132326f09e9bSSantosh Shilimkar * memblock_virt_alloc_internal - allocate boot memory block 132426f09e9bSSantosh Shilimkar * @size: size of memory block to be allocated in bytes 132526f09e9bSSantosh Shilimkar * @align: alignment of the region and block's size 132626f09e9bSSantosh Shilimkar * @min_addr: the lower bound of the memory region to allocate (phys address) 132726f09e9bSSantosh Shilimkar * @max_addr: the upper bound of the memory region to allocate (phys address) 132826f09e9bSSantosh Shilimkar * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 132926f09e9bSSantosh Shilimkar * 133026f09e9bSSantosh Shilimkar * The @min_addr limit is dropped if it can not be satisfied and the allocation 133126f09e9bSSantosh Shilimkar * will fall back to memory below @min_addr. Also, allocation may fall back 133226f09e9bSSantosh Shilimkar * to any node in the system if the specified node can not 133326f09e9bSSantosh Shilimkar * hold the requested memory. 133426f09e9bSSantosh Shilimkar * 133526f09e9bSSantosh Shilimkar * The allocation is performed from memory region limited by 133626f09e9bSSantosh Shilimkar * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. 133726f09e9bSSantosh Shilimkar * 133847cec443SMike Rapoport * The memory block is aligned on %SMP_CACHE_BYTES if @align == 0. 133926f09e9bSSantosh Shilimkar * 134026f09e9bSSantosh Shilimkar * The phys address of allocated boot memory block is converted to virtual and 134126f09e9bSSantosh Shilimkar * allocated memory is reset to 0. 134226f09e9bSSantosh Shilimkar * 134326f09e9bSSantosh Shilimkar * In addition, function sets the min_count to 0 using kmemleak_alloc for 134426f09e9bSSantosh Shilimkar * allocated boot memory block, so that it is never reported as leaks. 134526f09e9bSSantosh Shilimkar * 134647cec443SMike Rapoport * Return: 134726f09e9bSSantosh Shilimkar * Virtual address of allocated memory block on success, NULL on failure. 134826f09e9bSSantosh Shilimkar */ 134926f09e9bSSantosh Shilimkar static void * __init memblock_virt_alloc_internal( 135026f09e9bSSantosh Shilimkar phys_addr_t size, phys_addr_t align, 135126f09e9bSSantosh Shilimkar phys_addr_t min_addr, phys_addr_t max_addr, 135226f09e9bSSantosh Shilimkar int nid) 135326f09e9bSSantosh Shilimkar { 135426f09e9bSSantosh Shilimkar phys_addr_t alloc; 135526f09e9bSSantosh Shilimkar void *ptr; 1356e1720feeSMike Rapoport enum memblock_flags flags = choose_memblock_flags(); 135726f09e9bSSantosh Shilimkar 1358560dca27SGrygorii Strashko if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1359560dca27SGrygorii Strashko nid = NUMA_NO_NODE; 136026f09e9bSSantosh Shilimkar 136126f09e9bSSantosh Shilimkar /* 136226f09e9bSSantosh Shilimkar * Detect any accidental use of these APIs after slab is ready, as at 136326f09e9bSSantosh Shilimkar * this moment memblock may be deinitialized already and its 136426f09e9bSSantosh Shilimkar * internal data may be destroyed (after execution of free_all_bootmem) 136526f09e9bSSantosh Shilimkar */ 136626f09e9bSSantosh Shilimkar if (WARN_ON_ONCE(slab_is_available())) 136726f09e9bSSantosh Shilimkar return kzalloc_node(size, GFP_NOWAIT, nid); 136826f09e9bSSantosh Shilimkar 136926f09e9bSSantosh Shilimkar if (!align) 137026f09e9bSSantosh Shilimkar align = SMP_CACHE_BYTES; 137126f09e9bSSantosh Shilimkar 1372f544e14fSYinghai Lu if (max_addr > memblock.current_limit) 1373f544e14fSYinghai Lu max_addr = memblock.current_limit; 137426f09e9bSSantosh Shilimkar again: 137526f09e9bSSantosh Shilimkar alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, 1376a3f5bafcSTony Luck nid, flags); 13777d41c03eSWei Yang if (alloc && !memblock_reserve(alloc, size)) 137826f09e9bSSantosh Shilimkar goto done; 137926f09e9bSSantosh Shilimkar 138026f09e9bSSantosh Shilimkar if (nid != NUMA_NO_NODE) { 138126f09e9bSSantosh Shilimkar alloc = memblock_find_in_range_node(size, align, min_addr, 1382fc6daaf9STony Luck max_addr, NUMA_NO_NODE, 1383a3f5bafcSTony Luck flags); 13847d41c03eSWei Yang if (alloc && !memblock_reserve(alloc, size)) 138526f09e9bSSantosh Shilimkar goto done; 138626f09e9bSSantosh Shilimkar } 138726f09e9bSSantosh Shilimkar 138826f09e9bSSantosh Shilimkar if (min_addr) { 138926f09e9bSSantosh Shilimkar min_addr = 0; 139026f09e9bSSantosh Shilimkar goto again; 139126f09e9bSSantosh Shilimkar } 139226f09e9bSSantosh Shilimkar 1393a3f5bafcSTony Luck if (flags & MEMBLOCK_MIRROR) { 1394a3f5bafcSTony Luck flags &= ~MEMBLOCK_MIRROR; 1395a3f5bafcSTony Luck pr_warn("Could not allocate %pap bytes of mirrored memory\n", 1396a3f5bafcSTony Luck &size); 1397a3f5bafcSTony Luck goto again; 1398a3f5bafcSTony Luck } 1399a3f5bafcSTony Luck 1400a3f5bafcSTony Luck return NULL; 140126f09e9bSSantosh Shilimkar done: 140226f09e9bSSantosh Shilimkar ptr = phys_to_virt(alloc); 140326f09e9bSSantosh Shilimkar 140426f09e9bSSantosh Shilimkar /* 140526f09e9bSSantosh Shilimkar * The min_count is set to 0 so that bootmem allocated blocks 140626f09e9bSSantosh Shilimkar * are never reported as leaks. This is because many of these blocks 140726f09e9bSSantosh Shilimkar * are only referred via the physical address which is not 140826f09e9bSSantosh Shilimkar * looked up by kmemleak. 140926f09e9bSSantosh Shilimkar */ 141026f09e9bSSantosh Shilimkar kmemleak_alloc(ptr, size, 0, 0); 141126f09e9bSSantosh Shilimkar 141226f09e9bSSantosh Shilimkar return ptr; 141326f09e9bSSantosh Shilimkar } 141426f09e9bSSantosh Shilimkar 141526f09e9bSSantosh Shilimkar /** 1416ea1f5f37SPavel Tatashin * memblock_virt_alloc_try_nid_raw - allocate boot memory block without zeroing 1417ea1f5f37SPavel Tatashin * memory and without panicking 1418ea1f5f37SPavel Tatashin * @size: size of memory block to be allocated in bytes 1419ea1f5f37SPavel Tatashin * @align: alignment of the region and block's size 1420ea1f5f37SPavel Tatashin * @min_addr: the lower bound of the memory region from where the allocation 1421ea1f5f37SPavel Tatashin * is preferred (phys address) 1422ea1f5f37SPavel Tatashin * @max_addr: the upper bound of the memory region from where the allocation 1423ea1f5f37SPavel Tatashin * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1424ea1f5f37SPavel Tatashin * allocate only from memory limited by memblock.current_limit value 1425ea1f5f37SPavel Tatashin * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1426ea1f5f37SPavel Tatashin * 1427ea1f5f37SPavel Tatashin * Public function, provides additional debug information (including caller 1428ea1f5f37SPavel Tatashin * info), if enabled. Does not zero allocated memory, does not panic if request 1429ea1f5f37SPavel Tatashin * cannot be satisfied. 1430ea1f5f37SPavel Tatashin * 143147cec443SMike Rapoport * Return: 1432ea1f5f37SPavel Tatashin * Virtual address of allocated memory block on success, NULL on failure. 1433ea1f5f37SPavel Tatashin */ 1434ea1f5f37SPavel Tatashin void * __init memblock_virt_alloc_try_nid_raw( 1435ea1f5f37SPavel Tatashin phys_addr_t size, phys_addr_t align, 1436ea1f5f37SPavel Tatashin phys_addr_t min_addr, phys_addr_t max_addr, 1437ea1f5f37SPavel Tatashin int nid) 1438ea1f5f37SPavel Tatashin { 1439ea1f5f37SPavel Tatashin void *ptr; 1440ea1f5f37SPavel Tatashin 1441*a36aab89SMike Rapoport memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n", 1442*a36aab89SMike Rapoport __func__, (u64)size, (u64)align, nid, &min_addr, 1443*a36aab89SMike Rapoport &max_addr, (void *)_RET_IP_); 1444ea1f5f37SPavel Tatashin 1445ea1f5f37SPavel Tatashin ptr = memblock_virt_alloc_internal(size, align, 1446ea1f5f37SPavel Tatashin min_addr, max_addr, nid); 1447ea1f5f37SPavel Tatashin #ifdef CONFIG_DEBUG_VM 1448ea1f5f37SPavel Tatashin if (ptr && size > 0) 1449f165b378SPavel Tatashin memset(ptr, PAGE_POISON_PATTERN, size); 1450ea1f5f37SPavel Tatashin #endif 1451ea1f5f37SPavel Tatashin return ptr; 1452ea1f5f37SPavel Tatashin } 1453ea1f5f37SPavel Tatashin 1454ea1f5f37SPavel Tatashin /** 145526f09e9bSSantosh Shilimkar * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block 145626f09e9bSSantosh Shilimkar * @size: size of memory block to be allocated in bytes 145726f09e9bSSantosh Shilimkar * @align: alignment of the region and block's size 145826f09e9bSSantosh Shilimkar * @min_addr: the lower bound of the memory region from where the allocation 145926f09e9bSSantosh Shilimkar * is preferred (phys address) 146026f09e9bSSantosh Shilimkar * @max_addr: the upper bound of the memory region from where the allocation 146126f09e9bSSantosh Shilimkar * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 146226f09e9bSSantosh Shilimkar * allocate only from memory limited by memblock.current_limit value 146326f09e9bSSantosh Shilimkar * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 146426f09e9bSSantosh Shilimkar * 1465ea1f5f37SPavel Tatashin * Public function, provides additional debug information (including caller 1466ea1f5f37SPavel Tatashin * info), if enabled. This function zeroes the allocated memory. 146726f09e9bSSantosh Shilimkar * 146847cec443SMike Rapoport * Return: 146926f09e9bSSantosh Shilimkar * Virtual address of allocated memory block on success, NULL on failure. 147026f09e9bSSantosh Shilimkar */ 147126f09e9bSSantosh Shilimkar void * __init memblock_virt_alloc_try_nid_nopanic( 147226f09e9bSSantosh Shilimkar phys_addr_t size, phys_addr_t align, 147326f09e9bSSantosh Shilimkar phys_addr_t min_addr, phys_addr_t max_addr, 147426f09e9bSSantosh Shilimkar int nid) 147526f09e9bSSantosh Shilimkar { 1476ea1f5f37SPavel Tatashin void *ptr; 1477ea1f5f37SPavel Tatashin 1478*a36aab89SMike Rapoport memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n", 1479*a36aab89SMike Rapoport __func__, (u64)size, (u64)align, nid, &min_addr, 1480*a36aab89SMike Rapoport &max_addr, (void *)_RET_IP_); 1481ea1f5f37SPavel Tatashin 1482ea1f5f37SPavel Tatashin ptr = memblock_virt_alloc_internal(size, align, 1483ea1f5f37SPavel Tatashin min_addr, max_addr, nid); 1484ea1f5f37SPavel Tatashin if (ptr) 1485ea1f5f37SPavel Tatashin memset(ptr, 0, size); 1486ea1f5f37SPavel Tatashin return ptr; 148726f09e9bSSantosh Shilimkar } 148826f09e9bSSantosh Shilimkar 148926f09e9bSSantosh Shilimkar /** 149026f09e9bSSantosh Shilimkar * memblock_virt_alloc_try_nid - allocate boot memory block with panicking 149126f09e9bSSantosh Shilimkar * @size: size of memory block to be allocated in bytes 149226f09e9bSSantosh Shilimkar * @align: alignment of the region and block's size 149326f09e9bSSantosh Shilimkar * @min_addr: the lower bound of the memory region from where the allocation 149426f09e9bSSantosh Shilimkar * is preferred (phys address) 149526f09e9bSSantosh Shilimkar * @max_addr: the upper bound of the memory region from where the allocation 149626f09e9bSSantosh Shilimkar * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 149726f09e9bSSantosh Shilimkar * allocate only from memory limited by memblock.current_limit value 149826f09e9bSSantosh Shilimkar * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 149926f09e9bSSantosh Shilimkar * 1500ea1f5f37SPavel Tatashin * Public panicking version of memblock_virt_alloc_try_nid_nopanic() 150126f09e9bSSantosh Shilimkar * which provides debug information (including caller info), if enabled, 150226f09e9bSSantosh Shilimkar * and panics if the request can not be satisfied. 150326f09e9bSSantosh Shilimkar * 150447cec443SMike Rapoport * Return: 150526f09e9bSSantosh Shilimkar * Virtual address of allocated memory block on success, NULL on failure. 150626f09e9bSSantosh Shilimkar */ 150726f09e9bSSantosh Shilimkar void * __init memblock_virt_alloc_try_nid( 150826f09e9bSSantosh Shilimkar phys_addr_t size, phys_addr_t align, 150926f09e9bSSantosh Shilimkar phys_addr_t min_addr, phys_addr_t max_addr, 151026f09e9bSSantosh Shilimkar int nid) 151126f09e9bSSantosh Shilimkar { 151226f09e9bSSantosh Shilimkar void *ptr; 151326f09e9bSSantosh Shilimkar 1514*a36aab89SMike Rapoport memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n", 1515*a36aab89SMike Rapoport __func__, (u64)size, (u64)align, nid, &min_addr, 1516*a36aab89SMike Rapoport &max_addr, (void *)_RET_IP_); 151726f09e9bSSantosh Shilimkar ptr = memblock_virt_alloc_internal(size, align, 151826f09e9bSSantosh Shilimkar min_addr, max_addr, nid); 1519ea1f5f37SPavel Tatashin if (ptr) { 1520ea1f5f37SPavel Tatashin memset(ptr, 0, size); 152126f09e9bSSantosh Shilimkar return ptr; 1522ea1f5f37SPavel Tatashin } 152326f09e9bSSantosh Shilimkar 1524*a36aab89SMike Rapoport panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa\n", 1525*a36aab89SMike Rapoport __func__, (u64)size, (u64)align, nid, &min_addr, &max_addr); 152626f09e9bSSantosh Shilimkar return NULL; 152726f09e9bSSantosh Shilimkar } 152819373672SMathieu Malaterre #endif 152926f09e9bSSantosh Shilimkar 153026f09e9bSSantosh Shilimkar /** 153126f09e9bSSantosh Shilimkar * __memblock_free_early - free boot memory block 153226f09e9bSSantosh Shilimkar * @base: phys starting address of the boot memory block 153326f09e9bSSantosh Shilimkar * @size: size of the boot memory block in bytes 153426f09e9bSSantosh Shilimkar * 153526f09e9bSSantosh Shilimkar * Free boot memory block previously allocated by memblock_virt_alloc_xx() API. 153626f09e9bSSantosh Shilimkar * The freeing memory will not be released to the buddy allocator. 153726f09e9bSSantosh Shilimkar */ 153826f09e9bSSantosh Shilimkar void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) 153926f09e9bSSantosh Shilimkar { 1540*a36aab89SMike Rapoport phys_addr_t end = base + size - 1; 1541*a36aab89SMike Rapoport 1542*a36aab89SMike Rapoport memblock_dbg("%s: [%pa-%pa] %pF\n", 1543*a36aab89SMike Rapoport __func__, &base, &end, (void *)_RET_IP_); 15449099daedSCatalin Marinas kmemleak_free_part_phys(base, size); 1545f1af9d3aSPhilipp Hachtmann memblock_remove_range(&memblock.reserved, base, size); 154626f09e9bSSantosh Shilimkar } 154726f09e9bSSantosh Shilimkar 154848a833ccSMike Rapoport /** 154926f09e9bSSantosh Shilimkar * __memblock_free_late - free bootmem block pages directly to buddy allocator 155048a833ccSMike Rapoport * @base: phys starting address of the boot memory block 155126f09e9bSSantosh Shilimkar * @size: size of the boot memory block in bytes 155226f09e9bSSantosh Shilimkar * 155326f09e9bSSantosh Shilimkar * This is only useful when the bootmem allocator has already been torn 155426f09e9bSSantosh Shilimkar * down, but we are still initializing the system. Pages are released directly 155526f09e9bSSantosh Shilimkar * to the buddy allocator, no bootmem metadata is updated because it is gone. 155626f09e9bSSantosh Shilimkar */ 155726f09e9bSSantosh Shilimkar void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) 155826f09e9bSSantosh Shilimkar { 1559*a36aab89SMike Rapoport phys_addr_t cursor, end; 156026f09e9bSSantosh Shilimkar 1561*a36aab89SMike Rapoport end = base + size - 1; 1562*a36aab89SMike Rapoport memblock_dbg("%s: [%pa-%pa] %pF\n", 1563*a36aab89SMike Rapoport __func__, &base, &end, (void *)_RET_IP_); 15649099daedSCatalin Marinas kmemleak_free_part_phys(base, size); 156526f09e9bSSantosh Shilimkar cursor = PFN_UP(base); 156626f09e9bSSantosh Shilimkar end = PFN_DOWN(base + size); 156726f09e9bSSantosh Shilimkar 156826f09e9bSSantosh Shilimkar for (; cursor < end; cursor++) { 1569d70ddd7aSMel Gorman __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); 157026f09e9bSSantosh Shilimkar totalram_pages++; 157126f09e9bSSantosh Shilimkar } 157226f09e9bSSantosh Shilimkar } 15739d1e2492SBenjamin Herrenschmidt 15749d1e2492SBenjamin Herrenschmidt /* 15759d1e2492SBenjamin Herrenschmidt * Remaining API functions 15769d1e2492SBenjamin Herrenschmidt */ 15779d1e2492SBenjamin Herrenschmidt 15781f1ffb8aSDavid Gibson phys_addr_t __init_memblock memblock_phys_mem_size(void) 157995f72d1eSYinghai Lu { 15801440c4e2STejun Heo return memblock.memory.total_size; 158195f72d1eSYinghai Lu } 158295f72d1eSYinghai Lu 15838907de5dSSrikar Dronamraju phys_addr_t __init_memblock memblock_reserved_size(void) 15848907de5dSSrikar Dronamraju { 15858907de5dSSrikar Dronamraju return memblock.reserved.total_size; 15868907de5dSSrikar Dronamraju } 15878907de5dSSrikar Dronamraju 1588595ad9afSYinghai Lu phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) 1589595ad9afSYinghai Lu { 1590595ad9afSYinghai Lu unsigned long pages = 0; 1591595ad9afSYinghai Lu struct memblock_region *r; 1592595ad9afSYinghai Lu unsigned long start_pfn, end_pfn; 1593595ad9afSYinghai Lu 1594595ad9afSYinghai Lu for_each_memblock(memory, r) { 1595595ad9afSYinghai Lu start_pfn = memblock_region_memory_base_pfn(r); 1596595ad9afSYinghai Lu end_pfn = memblock_region_memory_end_pfn(r); 1597595ad9afSYinghai Lu start_pfn = min_t(unsigned long, start_pfn, limit_pfn); 1598595ad9afSYinghai Lu end_pfn = min_t(unsigned long, end_pfn, limit_pfn); 1599595ad9afSYinghai Lu pages += end_pfn - start_pfn; 1600595ad9afSYinghai Lu } 1601595ad9afSYinghai Lu 160216763230SFabian Frederick return PFN_PHYS(pages); 1603595ad9afSYinghai Lu } 1604595ad9afSYinghai Lu 16050a93ebefSSam Ravnborg /* lowest address */ 16060a93ebefSSam Ravnborg phys_addr_t __init_memblock memblock_start_of_DRAM(void) 16070a93ebefSSam Ravnborg { 16080a93ebefSSam Ravnborg return memblock.memory.regions[0].base; 16090a93ebefSSam Ravnborg } 16100a93ebefSSam Ravnborg 161110d06439SYinghai Lu phys_addr_t __init_memblock memblock_end_of_DRAM(void) 161295f72d1eSYinghai Lu { 161395f72d1eSYinghai Lu int idx = memblock.memory.cnt - 1; 161495f72d1eSYinghai Lu 1615e3239ff9SBenjamin Herrenschmidt return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 161695f72d1eSYinghai Lu } 161795f72d1eSYinghai Lu 1618a571d4ebSDennis Chen static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) 161995f72d1eSYinghai Lu { 16201c4bc43dSStefan Agner phys_addr_t max_addr = PHYS_ADDR_MAX; 1621136199f0SEmil Medve struct memblock_region *r; 162295f72d1eSYinghai Lu 1623a571d4ebSDennis Chen /* 1624a571d4ebSDennis Chen * translate the memory @limit size into the max address within one of 1625a571d4ebSDennis Chen * the memory memblock regions, if the @limit exceeds the total size 16261c4bc43dSStefan Agner * of those regions, max_addr will keep original value PHYS_ADDR_MAX 1627a571d4ebSDennis Chen */ 1628136199f0SEmil Medve for_each_memblock(memory, r) { 1629c0ce8fefSTejun Heo if (limit <= r->size) { 1630c0ce8fefSTejun Heo max_addr = r->base + limit; 163195f72d1eSYinghai Lu break; 163295f72d1eSYinghai Lu } 1633c0ce8fefSTejun Heo limit -= r->size; 163495f72d1eSYinghai Lu } 1635c0ce8fefSTejun Heo 1636a571d4ebSDennis Chen return max_addr; 1637a571d4ebSDennis Chen } 1638a571d4ebSDennis Chen 1639a571d4ebSDennis Chen void __init memblock_enforce_memory_limit(phys_addr_t limit) 1640a571d4ebSDennis Chen { 16411c4bc43dSStefan Agner phys_addr_t max_addr = PHYS_ADDR_MAX; 1642a571d4ebSDennis Chen 1643a571d4ebSDennis Chen if (!limit) 1644a571d4ebSDennis Chen return; 1645a571d4ebSDennis Chen 1646a571d4ebSDennis Chen max_addr = __find_max_addr(limit); 1647a571d4ebSDennis Chen 1648a571d4ebSDennis Chen /* @limit exceeds the total size of the memory, do nothing */ 16491c4bc43dSStefan Agner if (max_addr == PHYS_ADDR_MAX) 1650a571d4ebSDennis Chen return; 1651a571d4ebSDennis Chen 1652c0ce8fefSTejun Heo /* truncate both memory and reserved regions */ 1653f1af9d3aSPhilipp Hachtmann memblock_remove_range(&memblock.memory, max_addr, 16541c4bc43dSStefan Agner PHYS_ADDR_MAX); 1655f1af9d3aSPhilipp Hachtmann memblock_remove_range(&memblock.reserved, max_addr, 16561c4bc43dSStefan Agner PHYS_ADDR_MAX); 165795f72d1eSYinghai Lu } 165895f72d1eSYinghai Lu 1659c9ca9b4eSAKASHI Takahiro void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size) 1660c9ca9b4eSAKASHI Takahiro { 1661c9ca9b4eSAKASHI Takahiro int start_rgn, end_rgn; 1662c9ca9b4eSAKASHI Takahiro int i, ret; 1663c9ca9b4eSAKASHI Takahiro 1664c9ca9b4eSAKASHI Takahiro if (!size) 1665c9ca9b4eSAKASHI Takahiro return; 1666c9ca9b4eSAKASHI Takahiro 1667c9ca9b4eSAKASHI Takahiro ret = memblock_isolate_range(&memblock.memory, base, size, 1668c9ca9b4eSAKASHI Takahiro &start_rgn, &end_rgn); 1669c9ca9b4eSAKASHI Takahiro if (ret) 1670c9ca9b4eSAKASHI Takahiro return; 1671c9ca9b4eSAKASHI Takahiro 1672c9ca9b4eSAKASHI Takahiro /* remove all the MAP regions */ 1673c9ca9b4eSAKASHI Takahiro for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) 1674c9ca9b4eSAKASHI Takahiro if (!memblock_is_nomap(&memblock.memory.regions[i])) 1675c9ca9b4eSAKASHI Takahiro memblock_remove_region(&memblock.memory, i); 1676c9ca9b4eSAKASHI Takahiro 1677c9ca9b4eSAKASHI Takahiro for (i = start_rgn - 1; i >= 0; i--) 1678c9ca9b4eSAKASHI Takahiro if (!memblock_is_nomap(&memblock.memory.regions[i])) 1679c9ca9b4eSAKASHI Takahiro memblock_remove_region(&memblock.memory, i); 1680c9ca9b4eSAKASHI Takahiro 1681c9ca9b4eSAKASHI Takahiro /* truncate the reserved regions */ 1682c9ca9b4eSAKASHI Takahiro memblock_remove_range(&memblock.reserved, 0, base); 1683c9ca9b4eSAKASHI Takahiro memblock_remove_range(&memblock.reserved, 16841c4bc43dSStefan Agner base + size, PHYS_ADDR_MAX); 1685c9ca9b4eSAKASHI Takahiro } 1686c9ca9b4eSAKASHI Takahiro 1687a571d4ebSDennis Chen void __init memblock_mem_limit_remove_map(phys_addr_t limit) 1688a571d4ebSDennis Chen { 1689a571d4ebSDennis Chen phys_addr_t max_addr; 1690a571d4ebSDennis Chen 1691a571d4ebSDennis Chen if (!limit) 1692a571d4ebSDennis Chen return; 1693a571d4ebSDennis Chen 1694a571d4ebSDennis Chen max_addr = __find_max_addr(limit); 1695a571d4ebSDennis Chen 1696a571d4ebSDennis Chen /* @limit exceeds the total size of the memory, do nothing */ 16971c4bc43dSStefan Agner if (max_addr == PHYS_ADDR_MAX) 1698a571d4ebSDennis Chen return; 1699a571d4ebSDennis Chen 1700c9ca9b4eSAKASHI Takahiro memblock_cap_memory_range(0, max_addr); 1701a571d4ebSDennis Chen } 1702a571d4ebSDennis Chen 1703cd79481dSYinghai Lu static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 170472d4b0b4SBenjamin Herrenschmidt { 170572d4b0b4SBenjamin Herrenschmidt unsigned int left = 0, right = type->cnt; 170672d4b0b4SBenjamin Herrenschmidt 170772d4b0b4SBenjamin Herrenschmidt do { 170872d4b0b4SBenjamin Herrenschmidt unsigned int mid = (right + left) / 2; 170972d4b0b4SBenjamin Herrenschmidt 171072d4b0b4SBenjamin Herrenschmidt if (addr < type->regions[mid].base) 171172d4b0b4SBenjamin Herrenschmidt right = mid; 171272d4b0b4SBenjamin Herrenschmidt else if (addr >= (type->regions[mid].base + 171372d4b0b4SBenjamin Herrenschmidt type->regions[mid].size)) 171472d4b0b4SBenjamin Herrenschmidt left = mid + 1; 171572d4b0b4SBenjamin Herrenschmidt else 171672d4b0b4SBenjamin Herrenschmidt return mid; 171772d4b0b4SBenjamin Herrenschmidt } while (left < right); 171872d4b0b4SBenjamin Herrenschmidt return -1; 171972d4b0b4SBenjamin Herrenschmidt } 172072d4b0b4SBenjamin Herrenschmidt 1721b4ad0c7eSYaowei Bai bool __init memblock_is_reserved(phys_addr_t addr) 172295f72d1eSYinghai Lu { 172372d4b0b4SBenjamin Herrenschmidt return memblock_search(&memblock.reserved, addr) != -1; 172495f72d1eSYinghai Lu } 172572d4b0b4SBenjamin Herrenschmidt 1726b4ad0c7eSYaowei Bai bool __init_memblock memblock_is_memory(phys_addr_t addr) 172772d4b0b4SBenjamin Herrenschmidt { 172872d4b0b4SBenjamin Herrenschmidt return memblock_search(&memblock.memory, addr) != -1; 172972d4b0b4SBenjamin Herrenschmidt } 173072d4b0b4SBenjamin Herrenschmidt 1731937f0c26SYaowei Bai bool __init_memblock memblock_is_map_memory(phys_addr_t addr) 1732bf3d3cc5SArd Biesheuvel { 1733bf3d3cc5SArd Biesheuvel int i = memblock_search(&memblock.memory, addr); 1734bf3d3cc5SArd Biesheuvel 1735bf3d3cc5SArd Biesheuvel if (i == -1) 1736bf3d3cc5SArd Biesheuvel return false; 1737bf3d3cc5SArd Biesheuvel return !memblock_is_nomap(&memblock.memory.regions[i]); 1738bf3d3cc5SArd Biesheuvel } 1739bf3d3cc5SArd Biesheuvel 1740e76b63f8SYinghai Lu #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1741e76b63f8SYinghai Lu int __init_memblock memblock_search_pfn_nid(unsigned long pfn, 1742e76b63f8SYinghai Lu unsigned long *start_pfn, unsigned long *end_pfn) 1743e76b63f8SYinghai Lu { 1744e76b63f8SYinghai Lu struct memblock_type *type = &memblock.memory; 174516763230SFabian Frederick int mid = memblock_search(type, PFN_PHYS(pfn)); 1746e76b63f8SYinghai Lu 1747e76b63f8SYinghai Lu if (mid == -1) 1748e76b63f8SYinghai Lu return -1; 1749e76b63f8SYinghai Lu 1750f7e2f7e8SFabian Frederick *start_pfn = PFN_DOWN(type->regions[mid].base); 1751f7e2f7e8SFabian Frederick *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); 1752e76b63f8SYinghai Lu 1753e76b63f8SYinghai Lu return type->regions[mid].nid; 1754e76b63f8SYinghai Lu } 1755e76b63f8SYinghai Lu #endif 1756e76b63f8SYinghai Lu 1757eab30949SStephen Boyd /** 1758eab30949SStephen Boyd * memblock_is_region_memory - check if a region is a subset of memory 1759eab30949SStephen Boyd * @base: base of region to check 1760eab30949SStephen Boyd * @size: size of region to check 1761eab30949SStephen Boyd * 1762eab30949SStephen Boyd * Check if the region [@base, @base + @size) is a subset of a memory block. 1763eab30949SStephen Boyd * 176447cec443SMike Rapoport * Return: 1765eab30949SStephen Boyd * 0 if false, non-zero if true 1766eab30949SStephen Boyd */ 1767937f0c26SYaowei Bai bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 176872d4b0b4SBenjamin Herrenschmidt { 1769abb65272STomi Valkeinen int idx = memblock_search(&memblock.memory, base); 1770eb18f1b5STejun Heo phys_addr_t end = base + memblock_cap_size(base, &size); 177172d4b0b4SBenjamin Herrenschmidt 177272d4b0b4SBenjamin Herrenschmidt if (idx == -1) 1773937f0c26SYaowei Bai return false; 1774ef415ef4SWei Yang return (memblock.memory.regions[idx].base + 1775eb18f1b5STejun Heo memblock.memory.regions[idx].size) >= end; 177695f72d1eSYinghai Lu } 177795f72d1eSYinghai Lu 1778eab30949SStephen Boyd /** 1779eab30949SStephen Boyd * memblock_is_region_reserved - check if a region intersects reserved memory 1780eab30949SStephen Boyd * @base: base of region to check 1781eab30949SStephen Boyd * @size: size of region to check 1782eab30949SStephen Boyd * 178347cec443SMike Rapoport * Check if the region [@base, @base + @size) intersects a reserved 178447cec443SMike Rapoport * memory block. 1785eab30949SStephen Boyd * 178647cec443SMike Rapoport * Return: 1787c5c5c9d1STang Chen * True if they intersect, false if not. 1788eab30949SStephen Boyd */ 1789c5c5c9d1STang Chen bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 179095f72d1eSYinghai Lu { 1791eb18f1b5STejun Heo memblock_cap_size(base, &size); 1792c5c5c9d1STang Chen return memblock_overlaps_region(&memblock.reserved, base, size); 179395f72d1eSYinghai Lu } 179495f72d1eSYinghai Lu 17956ede1fd3SYinghai Lu void __init_memblock memblock_trim_memory(phys_addr_t align) 17966ede1fd3SYinghai Lu { 17976ede1fd3SYinghai Lu phys_addr_t start, end, orig_start, orig_end; 1798136199f0SEmil Medve struct memblock_region *r; 17996ede1fd3SYinghai Lu 1800136199f0SEmil Medve for_each_memblock(memory, r) { 1801136199f0SEmil Medve orig_start = r->base; 1802136199f0SEmil Medve orig_end = r->base + r->size; 18036ede1fd3SYinghai Lu start = round_up(orig_start, align); 18046ede1fd3SYinghai Lu end = round_down(orig_end, align); 18056ede1fd3SYinghai Lu 18066ede1fd3SYinghai Lu if (start == orig_start && end == orig_end) 18076ede1fd3SYinghai Lu continue; 18086ede1fd3SYinghai Lu 18096ede1fd3SYinghai Lu if (start < end) { 1810136199f0SEmil Medve r->base = start; 1811136199f0SEmil Medve r->size = end - start; 18126ede1fd3SYinghai Lu } else { 1813136199f0SEmil Medve memblock_remove_region(&memblock.memory, 1814136199f0SEmil Medve r - memblock.memory.regions); 1815136199f0SEmil Medve r--; 18166ede1fd3SYinghai Lu } 18176ede1fd3SYinghai Lu } 18186ede1fd3SYinghai Lu } 1819e63075a3SBenjamin Herrenschmidt 18203661ca66SYinghai Lu void __init_memblock memblock_set_current_limit(phys_addr_t limit) 1821e63075a3SBenjamin Herrenschmidt { 1822e63075a3SBenjamin Herrenschmidt memblock.current_limit = limit; 1823e63075a3SBenjamin Herrenschmidt } 1824e63075a3SBenjamin Herrenschmidt 1825fec51014SLaura Abbott phys_addr_t __init_memblock memblock_get_current_limit(void) 1826fec51014SLaura Abbott { 1827fec51014SLaura Abbott return memblock.current_limit; 1828fec51014SLaura Abbott } 1829fec51014SLaura Abbott 18300262d9c8SHeiko Carstens static void __init_memblock memblock_dump(struct memblock_type *type) 18316ed311b2SBenjamin Herrenschmidt { 18325d63f81cSMiles Chen phys_addr_t base, end, size; 1833e1720feeSMike Rapoport enum memblock_flags flags; 18348c9c1701SAlexander Kuleshov int idx; 18358c9c1701SAlexander Kuleshov struct memblock_region *rgn; 18366ed311b2SBenjamin Herrenschmidt 18370262d9c8SHeiko Carstens pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt); 18386ed311b2SBenjamin Herrenschmidt 183966e8b438SGioh Kim for_each_memblock_type(idx, type, rgn) { 18407c0caeb8STejun Heo char nid_buf[32] = ""; 18416ed311b2SBenjamin Herrenschmidt 18427c0caeb8STejun Heo base = rgn->base; 18437c0caeb8STejun Heo size = rgn->size; 18445d63f81cSMiles Chen end = base + size - 1; 184566a20757STang Chen flags = rgn->flags; 18467c0caeb8STejun Heo #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 18477c0caeb8STejun Heo if (memblock_get_region_node(rgn) != MAX_NUMNODES) 18487c0caeb8STejun Heo snprintf(nid_buf, sizeof(nid_buf), " on node %d", 18497c0caeb8STejun Heo memblock_get_region_node(rgn)); 18507c0caeb8STejun Heo #endif 1851e1720feeSMike Rapoport pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n", 18520262d9c8SHeiko Carstens type->name, idx, &base, &end, &size, nid_buf, flags); 18536ed311b2SBenjamin Herrenschmidt } 18546ed311b2SBenjamin Herrenschmidt } 18556ed311b2SBenjamin Herrenschmidt 18564ff7b82fSTejun Heo void __init_memblock __memblock_dump_all(void) 18576ed311b2SBenjamin Herrenschmidt { 18586ed311b2SBenjamin Herrenschmidt pr_info("MEMBLOCK configuration:\n"); 18595d63f81cSMiles Chen pr_info(" memory size = %pa reserved size = %pa\n", 18605d63f81cSMiles Chen &memblock.memory.total_size, 18615d63f81cSMiles Chen &memblock.reserved.total_size); 18626ed311b2SBenjamin Herrenschmidt 18630262d9c8SHeiko Carstens memblock_dump(&memblock.memory); 18640262d9c8SHeiko Carstens memblock_dump(&memblock.reserved); 1865409efd4cSHeiko Carstens #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 18660262d9c8SHeiko Carstens memblock_dump(&memblock.physmem); 1867409efd4cSHeiko Carstens #endif 18686ed311b2SBenjamin Herrenschmidt } 18696ed311b2SBenjamin Herrenschmidt 18701aadc056STejun Heo void __init memblock_allow_resize(void) 18716ed311b2SBenjamin Herrenschmidt { 1872142b45a7SBenjamin Herrenschmidt memblock_can_resize = 1; 18736ed311b2SBenjamin Herrenschmidt } 18746ed311b2SBenjamin Herrenschmidt 18756ed311b2SBenjamin Herrenschmidt static int __init early_memblock(char *p) 18766ed311b2SBenjamin Herrenschmidt { 18776ed311b2SBenjamin Herrenschmidt if (p && strstr(p, "debug")) 18786ed311b2SBenjamin Herrenschmidt memblock_debug = 1; 18796ed311b2SBenjamin Herrenschmidt return 0; 18806ed311b2SBenjamin Herrenschmidt } 18816ed311b2SBenjamin Herrenschmidt early_param("memblock", early_memblock); 18826ed311b2SBenjamin Herrenschmidt 1883c378ddd5STejun Heo #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) 18846d03b885SBenjamin Herrenschmidt 18856d03b885SBenjamin Herrenschmidt static int memblock_debug_show(struct seq_file *m, void *private) 18866d03b885SBenjamin Herrenschmidt { 18876d03b885SBenjamin Herrenschmidt struct memblock_type *type = m->private; 18886d03b885SBenjamin Herrenschmidt struct memblock_region *reg; 18896d03b885SBenjamin Herrenschmidt int i; 18905d63f81cSMiles Chen phys_addr_t end; 18916d03b885SBenjamin Herrenschmidt 18926d03b885SBenjamin Herrenschmidt for (i = 0; i < type->cnt; i++) { 18936d03b885SBenjamin Herrenschmidt reg = &type->regions[i]; 18945d63f81cSMiles Chen end = reg->base + reg->size - 1; 18956d03b885SBenjamin Herrenschmidt 18965d63f81cSMiles Chen seq_printf(m, "%4d: ", i); 18975d63f81cSMiles Chen seq_printf(m, "%pa..%pa\n", ®->base, &end); 18986d03b885SBenjamin Herrenschmidt } 18996d03b885SBenjamin Herrenschmidt return 0; 19006d03b885SBenjamin Herrenschmidt } 19015ad35093SAndy Shevchenko DEFINE_SHOW_ATTRIBUTE(memblock_debug); 19026d03b885SBenjamin Herrenschmidt 19036d03b885SBenjamin Herrenschmidt static int __init memblock_init_debugfs(void) 19046d03b885SBenjamin Herrenschmidt { 19056d03b885SBenjamin Herrenschmidt struct dentry *root = debugfs_create_dir("memblock", NULL); 19066d03b885SBenjamin Herrenschmidt if (!root) 19076d03b885SBenjamin Herrenschmidt return -ENXIO; 19080825a6f9SJoe Perches debugfs_create_file("memory", 0444, root, 19090825a6f9SJoe Perches &memblock.memory, &memblock_debug_fops); 19100825a6f9SJoe Perches debugfs_create_file("reserved", 0444, root, 19110825a6f9SJoe Perches &memblock.reserved, &memblock_debug_fops); 191270210ed9SPhilipp Hachtmann #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 19130825a6f9SJoe Perches debugfs_create_file("physmem", 0444, root, 19140825a6f9SJoe Perches &memblock.physmem, &memblock_debug_fops); 191570210ed9SPhilipp Hachtmann #endif 19166d03b885SBenjamin Herrenschmidt 19176d03b885SBenjamin Herrenschmidt return 0; 19186d03b885SBenjamin Herrenschmidt } 19196d03b885SBenjamin Herrenschmidt __initcall(memblock_init_debugfs); 19206d03b885SBenjamin Herrenschmidt 19216d03b885SBenjamin Herrenschmidt #endif /* CONFIG_DEBUG_FS */ 1922