xref: /linux/mm/memblock.c (revision 167632303005670713603452a3c9ee5de4aa5828)
195f72d1eSYinghai Lu /*
295f72d1eSYinghai Lu  * Procedures for maintaining information about logical memory blocks.
395f72d1eSYinghai Lu  *
495f72d1eSYinghai Lu  * Peter Bergner, IBM Corp.	June 2001.
595f72d1eSYinghai Lu  * Copyright (C) 2001 Peter Bergner.
695f72d1eSYinghai Lu  *
795f72d1eSYinghai Lu  *      This program is free software; you can redistribute it and/or
895f72d1eSYinghai Lu  *      modify it under the terms of the GNU General Public License
995f72d1eSYinghai Lu  *      as published by the Free Software Foundation; either version
1095f72d1eSYinghai Lu  *      2 of the License, or (at your option) any later version.
1195f72d1eSYinghai Lu  */
1295f72d1eSYinghai Lu 
1395f72d1eSYinghai Lu #include <linux/kernel.h>
14142b45a7SBenjamin Herrenschmidt #include <linux/slab.h>
1595f72d1eSYinghai Lu #include <linux/init.h>
1695f72d1eSYinghai Lu #include <linux/bitops.h>
17449e8df3SBenjamin Herrenschmidt #include <linux/poison.h>
18c196f76fSBenjamin Herrenschmidt #include <linux/pfn.h>
196d03b885SBenjamin Herrenschmidt #include <linux/debugfs.h>
206d03b885SBenjamin Herrenschmidt #include <linux/seq_file.h>
2195f72d1eSYinghai Lu #include <linux/memblock.h>
2295f72d1eSYinghai Lu 
2379442ed1STang Chen #include <asm-generic/sections.h>
2426f09e9bSSantosh Shilimkar #include <linux/io.h>
2526f09e9bSSantosh Shilimkar 
2626f09e9bSSantosh Shilimkar #include "internal.h"
2779442ed1STang Chen 
28fe091c20STejun Heo static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
29fe091c20STejun Heo static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
30fe091c20STejun Heo 
31fe091c20STejun Heo struct memblock memblock __initdata_memblock = {
32fe091c20STejun Heo 	.memory.regions		= memblock_memory_init_regions,
33fe091c20STejun Heo 	.memory.cnt		= 1,	/* empty dummy entry */
34fe091c20STejun Heo 	.memory.max		= INIT_MEMBLOCK_REGIONS,
35fe091c20STejun Heo 
36fe091c20STejun Heo 	.reserved.regions	= memblock_reserved_init_regions,
37fe091c20STejun Heo 	.reserved.cnt		= 1,	/* empty dummy entry */
38fe091c20STejun Heo 	.reserved.max		= INIT_MEMBLOCK_REGIONS,
39fe091c20STejun Heo 
4079442ed1STang Chen 	.bottom_up		= false,
41fe091c20STejun Heo 	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
42fe091c20STejun Heo };
4395f72d1eSYinghai Lu 
4410d06439SYinghai Lu int memblock_debug __initdata_memblock;
4555ac590cSTang Chen #ifdef CONFIG_MOVABLE_NODE
4655ac590cSTang Chen bool movable_node_enabled __initdata_memblock = false;
4755ac590cSTang Chen #endif
481aadc056STejun Heo static int memblock_can_resize __initdata_memblock;
49181eb394SGavin Shan static int memblock_memory_in_slab __initdata_memblock = 0;
50181eb394SGavin Shan static int memblock_reserved_in_slab __initdata_memblock = 0;
5195f72d1eSYinghai Lu 
52142b45a7SBenjamin Herrenschmidt /* inline so we don't get a warning when pr_debug is compiled out */
53c2233116SRaghavendra D Prabhu static __init_memblock const char *
54c2233116SRaghavendra D Prabhu memblock_type_name(struct memblock_type *type)
55142b45a7SBenjamin Herrenschmidt {
56142b45a7SBenjamin Herrenschmidt 	if (type == &memblock.memory)
57142b45a7SBenjamin Herrenschmidt 		return "memory";
58142b45a7SBenjamin Herrenschmidt 	else if (type == &memblock.reserved)
59142b45a7SBenjamin Herrenschmidt 		return "reserved";
60142b45a7SBenjamin Herrenschmidt 	else
61142b45a7SBenjamin Herrenschmidt 		return "unknown";
62142b45a7SBenjamin Herrenschmidt }
63142b45a7SBenjamin Herrenschmidt 
64eb18f1b5STejun Heo /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
65eb18f1b5STejun Heo static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
66eb18f1b5STejun Heo {
67eb18f1b5STejun Heo 	return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
68eb18f1b5STejun Heo }
69eb18f1b5STejun Heo 
706ed311b2SBenjamin Herrenschmidt /*
716ed311b2SBenjamin Herrenschmidt  * Address comparison utilities
726ed311b2SBenjamin Herrenschmidt  */
7310d06439SYinghai Lu static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
742898cc4cSBenjamin Herrenschmidt 				       phys_addr_t base2, phys_addr_t size2)
7595f72d1eSYinghai Lu {
7695f72d1eSYinghai Lu 	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
7795f72d1eSYinghai Lu }
7895f72d1eSYinghai Lu 
792d7d3eb2SH Hartley Sweeten static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
802d7d3eb2SH Hartley Sweeten 					phys_addr_t base, phys_addr_t size)
816ed311b2SBenjamin Herrenschmidt {
826ed311b2SBenjamin Herrenschmidt 	unsigned long i;
836ed311b2SBenjamin Herrenschmidt 
846ed311b2SBenjamin Herrenschmidt 	for (i = 0; i < type->cnt; i++) {
856ed311b2SBenjamin Herrenschmidt 		phys_addr_t rgnbase = type->regions[i].base;
866ed311b2SBenjamin Herrenschmidt 		phys_addr_t rgnsize = type->regions[i].size;
876ed311b2SBenjamin Herrenschmidt 		if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
886ed311b2SBenjamin Herrenschmidt 			break;
896ed311b2SBenjamin Herrenschmidt 	}
906ed311b2SBenjamin Herrenschmidt 
916ed311b2SBenjamin Herrenschmidt 	return (i < type->cnt) ? i : -1;
926ed311b2SBenjamin Herrenschmidt }
936ed311b2SBenjamin Herrenschmidt 
9479442ed1STang Chen /*
9579442ed1STang Chen  * __memblock_find_range_bottom_up - find free area utility in bottom-up
9679442ed1STang Chen  * @start: start of candidate range
9779442ed1STang Chen  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
9879442ed1STang Chen  * @size: size of free area to find
9979442ed1STang Chen  * @align: alignment of free area to find
100b1154233SGrygorii Strashko  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
10179442ed1STang Chen  *
10279442ed1STang Chen  * Utility called from memblock_find_in_range_node(), find free area bottom-up.
10379442ed1STang Chen  *
10479442ed1STang Chen  * RETURNS:
10579442ed1STang Chen  * Found address on success, 0 on failure.
10679442ed1STang Chen  */
10779442ed1STang Chen static phys_addr_t __init_memblock
10879442ed1STang Chen __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
10979442ed1STang Chen 				phys_addr_t size, phys_addr_t align, int nid)
11079442ed1STang Chen {
11179442ed1STang Chen 	phys_addr_t this_start, this_end, cand;
11279442ed1STang Chen 	u64 i;
11379442ed1STang Chen 
11479442ed1STang Chen 	for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) {
11579442ed1STang Chen 		this_start = clamp(this_start, start, end);
11679442ed1STang Chen 		this_end = clamp(this_end, start, end);
11779442ed1STang Chen 
11879442ed1STang Chen 		cand = round_up(this_start, align);
11979442ed1STang Chen 		if (cand < this_end && this_end - cand >= size)
12079442ed1STang Chen 			return cand;
12179442ed1STang Chen 	}
12279442ed1STang Chen 
12379442ed1STang Chen 	return 0;
12479442ed1STang Chen }
12579442ed1STang Chen 
1267bd0b0f0STejun Heo /**
1271402899eSTang Chen  * __memblock_find_range_top_down - find free area utility, in top-down
1281402899eSTang Chen  * @start: start of candidate range
1291402899eSTang Chen  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
1301402899eSTang Chen  * @size: size of free area to find
1311402899eSTang Chen  * @align: alignment of free area to find
132b1154233SGrygorii Strashko  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1331402899eSTang Chen  *
1341402899eSTang Chen  * Utility called from memblock_find_in_range_node(), find free area top-down.
1351402899eSTang Chen  *
1361402899eSTang Chen  * RETURNS:
13779442ed1STang Chen  * Found address on success, 0 on failure.
1381402899eSTang Chen  */
1391402899eSTang Chen static phys_addr_t __init_memblock
1401402899eSTang Chen __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
1411402899eSTang Chen 			       phys_addr_t size, phys_addr_t align, int nid)
1421402899eSTang Chen {
1431402899eSTang Chen 	phys_addr_t this_start, this_end, cand;
1441402899eSTang Chen 	u64 i;
1451402899eSTang Chen 
1461402899eSTang Chen 	for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
1471402899eSTang Chen 		this_start = clamp(this_start, start, end);
1481402899eSTang Chen 		this_end = clamp(this_end, start, end);
1491402899eSTang Chen 
1501402899eSTang Chen 		if (this_end < size)
1511402899eSTang Chen 			continue;
1521402899eSTang Chen 
1531402899eSTang Chen 		cand = round_down(this_end - size, align);
1541402899eSTang Chen 		if (cand >= this_start)
1551402899eSTang Chen 			return cand;
1561402899eSTang Chen 	}
1571402899eSTang Chen 
1581402899eSTang Chen 	return 0;
1591402899eSTang Chen }
1601402899eSTang Chen 
1611402899eSTang Chen /**
1627bd0b0f0STejun Heo  * memblock_find_in_range_node - find free area in given range and node
1637bd0b0f0STejun Heo  * @size: size of free area to find
1647bd0b0f0STejun Heo  * @align: alignment of free area to find
16587029ee9SGrygorii Strashko  * @start: start of candidate range
16687029ee9SGrygorii Strashko  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
167b1154233SGrygorii Strashko  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1687bd0b0f0STejun Heo  *
1697bd0b0f0STejun Heo  * Find @size free area aligned to @align in the specified range and node.
1707bd0b0f0STejun Heo  *
17179442ed1STang Chen  * When allocation direction is bottom-up, the @start should be greater
17279442ed1STang Chen  * than the end of the kernel image. Otherwise, it will be trimmed. The
17379442ed1STang Chen  * reason is that we want the bottom-up allocation just near the kernel
17479442ed1STang Chen  * image so it is highly likely that the allocated memory and the kernel
17579442ed1STang Chen  * will reside in the same node.
17679442ed1STang Chen  *
17779442ed1STang Chen  * If bottom-up allocation failed, will try to allocate memory top-down.
17879442ed1STang Chen  *
1797bd0b0f0STejun Heo  * RETURNS:
18079442ed1STang Chen  * Found address on success, 0 on failure.
1816ed311b2SBenjamin Herrenschmidt  */
18287029ee9SGrygorii Strashko phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
18387029ee9SGrygorii Strashko 					phys_addr_t align, phys_addr_t start,
18487029ee9SGrygorii Strashko 					phys_addr_t end, int nid)
185f7210e6cSTang Chen {
18679442ed1STang Chen 	int ret;
18779442ed1STang Chen 	phys_addr_t kernel_end;
18879442ed1STang Chen 
189f7210e6cSTang Chen 	/* pump up @end */
190f7210e6cSTang Chen 	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
191f7210e6cSTang Chen 		end = memblock.current_limit;
192f7210e6cSTang Chen 
193f7210e6cSTang Chen 	/* avoid allocating the first page */
194f7210e6cSTang Chen 	start = max_t(phys_addr_t, start, PAGE_SIZE);
195f7210e6cSTang Chen 	end = max(start, end);
19679442ed1STang Chen 	kernel_end = __pa_symbol(_end);
19779442ed1STang Chen 
19879442ed1STang Chen 	/*
19979442ed1STang Chen 	 * try bottom-up allocation only when bottom-up mode
20079442ed1STang Chen 	 * is set and @end is above the kernel image.
20179442ed1STang Chen 	 */
20279442ed1STang Chen 	if (memblock_bottom_up() && end > kernel_end) {
20379442ed1STang Chen 		phys_addr_t bottom_up_start;
20479442ed1STang Chen 
20579442ed1STang Chen 		/* make sure we will allocate above the kernel */
20679442ed1STang Chen 		bottom_up_start = max(start, kernel_end);
20779442ed1STang Chen 
20879442ed1STang Chen 		/* ok, try bottom-up allocation first */
20979442ed1STang Chen 		ret = __memblock_find_range_bottom_up(bottom_up_start, end,
21079442ed1STang Chen 						      size, align, nid);
21179442ed1STang Chen 		if (ret)
21279442ed1STang Chen 			return ret;
21379442ed1STang Chen 
21479442ed1STang Chen 		/*
21579442ed1STang Chen 		 * we always limit bottom-up allocation above the kernel,
21679442ed1STang Chen 		 * but top-down allocation doesn't have the limit, so
21779442ed1STang Chen 		 * retrying top-down allocation may succeed when bottom-up
21879442ed1STang Chen 		 * allocation failed.
21979442ed1STang Chen 		 *
22079442ed1STang Chen 		 * bottom-up allocation is expected to be fail very rarely,
22179442ed1STang Chen 		 * so we use WARN_ONCE() here to see the stack trace if
22279442ed1STang Chen 		 * fail happens.
22379442ed1STang Chen 		 */
22479442ed1STang Chen 		WARN_ONCE(1, "memblock: bottom-up allocation failed, "
22579442ed1STang Chen 			     "memory hotunplug may be affected\n");
22679442ed1STang Chen 	}
227f7210e6cSTang Chen 
2281402899eSTang Chen 	return __memblock_find_range_top_down(start, end, size, align, nid);
229f7210e6cSTang Chen }
2306ed311b2SBenjamin Herrenschmidt 
2317bd0b0f0STejun Heo /**
2327bd0b0f0STejun Heo  * memblock_find_in_range - find free area in given range
2337bd0b0f0STejun Heo  * @start: start of candidate range
2347bd0b0f0STejun Heo  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
2357bd0b0f0STejun Heo  * @size: size of free area to find
2367bd0b0f0STejun Heo  * @align: alignment of free area to find
2377bd0b0f0STejun Heo  *
2387bd0b0f0STejun Heo  * Find @size free area aligned to @align in the specified range.
2397bd0b0f0STejun Heo  *
2407bd0b0f0STejun Heo  * RETURNS:
24179442ed1STang Chen  * Found address on success, 0 on failure.
2427bd0b0f0STejun Heo  */
2437bd0b0f0STejun Heo phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
2447bd0b0f0STejun Heo 					phys_addr_t end, phys_addr_t size,
2457bd0b0f0STejun Heo 					phys_addr_t align)
2467bd0b0f0STejun Heo {
24787029ee9SGrygorii Strashko 	return memblock_find_in_range_node(size, align, start, end,
248b1154233SGrygorii Strashko 					    NUMA_NO_NODE);
2497bd0b0f0STejun Heo }
2507bd0b0f0STejun Heo 
25110d06439SYinghai Lu static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
25295f72d1eSYinghai Lu {
2531440c4e2STejun Heo 	type->total_size -= type->regions[r].size;
2547c0caeb8STejun Heo 	memmove(&type->regions[r], &type->regions[r + 1],
2557c0caeb8STejun Heo 		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
256e3239ff9SBenjamin Herrenschmidt 	type->cnt--;
25795f72d1eSYinghai Lu 
2588f7a6605SBenjamin Herrenschmidt 	/* Special case for empty arrays */
2598f7a6605SBenjamin Herrenschmidt 	if (type->cnt == 0) {
2601440c4e2STejun Heo 		WARN_ON(type->total_size != 0);
2618f7a6605SBenjamin Herrenschmidt 		type->cnt = 1;
2628f7a6605SBenjamin Herrenschmidt 		type->regions[0].base = 0;
2638f7a6605SBenjamin Herrenschmidt 		type->regions[0].size = 0;
26466a20757STang Chen 		type->regions[0].flags = 0;
2657c0caeb8STejun Heo 		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
2668f7a6605SBenjamin Herrenschmidt 	}
26795f72d1eSYinghai Lu }
26895f72d1eSYinghai Lu 
269354f17e1SPhilipp Hachtmann #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
270354f17e1SPhilipp Hachtmann 
27129f67386SYinghai Lu phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
27229f67386SYinghai Lu 					phys_addr_t *addr)
27329f67386SYinghai Lu {
27429f67386SYinghai Lu 	if (memblock.reserved.regions == memblock_reserved_init_regions)
27529f67386SYinghai Lu 		return 0;
27629f67386SYinghai Lu 
27729f67386SYinghai Lu 	*addr = __pa(memblock.reserved.regions);
27829f67386SYinghai Lu 
27929f67386SYinghai Lu 	return PAGE_ALIGN(sizeof(struct memblock_region) *
28029f67386SYinghai Lu 			  memblock.reserved.max);
28129f67386SYinghai Lu }
28229f67386SYinghai Lu 
2835e270e25SPhilipp Hachtmann phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
2845e270e25SPhilipp Hachtmann 					phys_addr_t *addr)
2855e270e25SPhilipp Hachtmann {
2865e270e25SPhilipp Hachtmann 	if (memblock.memory.regions == memblock_memory_init_regions)
2875e270e25SPhilipp Hachtmann 		return 0;
2885e270e25SPhilipp Hachtmann 
2895e270e25SPhilipp Hachtmann 	*addr = __pa(memblock.memory.regions);
2905e270e25SPhilipp Hachtmann 
2915e270e25SPhilipp Hachtmann 	return PAGE_ALIGN(sizeof(struct memblock_region) *
2925e270e25SPhilipp Hachtmann 			  memblock.memory.max);
2935e270e25SPhilipp Hachtmann }
2945e270e25SPhilipp Hachtmann 
2955e270e25SPhilipp Hachtmann #endif
2965e270e25SPhilipp Hachtmann 
29748c3b583SGreg Pearson /**
29848c3b583SGreg Pearson  * memblock_double_array - double the size of the memblock regions array
29948c3b583SGreg Pearson  * @type: memblock type of the regions array being doubled
30048c3b583SGreg Pearson  * @new_area_start: starting address of memory range to avoid overlap with
30148c3b583SGreg Pearson  * @new_area_size: size of memory range to avoid overlap with
30248c3b583SGreg Pearson  *
30348c3b583SGreg Pearson  * Double the size of the @type regions array. If memblock is being used to
30448c3b583SGreg Pearson  * allocate memory for a new reserved regions array and there is a previously
30548c3b583SGreg Pearson  * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
30648c3b583SGreg Pearson  * waiting to be reserved, ensure the memory used by the new array does
30748c3b583SGreg Pearson  * not overlap.
30848c3b583SGreg Pearson  *
30948c3b583SGreg Pearson  * RETURNS:
31048c3b583SGreg Pearson  * 0 on success, -1 on failure.
31148c3b583SGreg Pearson  */
31248c3b583SGreg Pearson static int __init_memblock memblock_double_array(struct memblock_type *type,
31348c3b583SGreg Pearson 						phys_addr_t new_area_start,
31448c3b583SGreg Pearson 						phys_addr_t new_area_size)
315142b45a7SBenjamin Herrenschmidt {
316142b45a7SBenjamin Herrenschmidt 	struct memblock_region *new_array, *old_array;
31729f67386SYinghai Lu 	phys_addr_t old_alloc_size, new_alloc_size;
318142b45a7SBenjamin Herrenschmidt 	phys_addr_t old_size, new_size, addr;
319142b45a7SBenjamin Herrenschmidt 	int use_slab = slab_is_available();
320181eb394SGavin Shan 	int *in_slab;
321142b45a7SBenjamin Herrenschmidt 
322142b45a7SBenjamin Herrenschmidt 	/* We don't allow resizing until we know about the reserved regions
323142b45a7SBenjamin Herrenschmidt 	 * of memory that aren't suitable for allocation
324142b45a7SBenjamin Herrenschmidt 	 */
325142b45a7SBenjamin Herrenschmidt 	if (!memblock_can_resize)
326142b45a7SBenjamin Herrenschmidt 		return -1;
327142b45a7SBenjamin Herrenschmidt 
328142b45a7SBenjamin Herrenschmidt 	/* Calculate new doubled size */
329142b45a7SBenjamin Herrenschmidt 	old_size = type->max * sizeof(struct memblock_region);
330142b45a7SBenjamin Herrenschmidt 	new_size = old_size << 1;
33129f67386SYinghai Lu 	/*
33229f67386SYinghai Lu 	 * We need to allocated new one align to PAGE_SIZE,
33329f67386SYinghai Lu 	 *   so we can free them completely later.
33429f67386SYinghai Lu 	 */
33529f67386SYinghai Lu 	old_alloc_size = PAGE_ALIGN(old_size);
33629f67386SYinghai Lu 	new_alloc_size = PAGE_ALIGN(new_size);
337142b45a7SBenjamin Herrenschmidt 
338181eb394SGavin Shan 	/* Retrieve the slab flag */
339181eb394SGavin Shan 	if (type == &memblock.memory)
340181eb394SGavin Shan 		in_slab = &memblock_memory_in_slab;
341181eb394SGavin Shan 	else
342181eb394SGavin Shan 		in_slab = &memblock_reserved_in_slab;
343181eb394SGavin Shan 
344142b45a7SBenjamin Herrenschmidt 	/* Try to find some space for it.
345142b45a7SBenjamin Herrenschmidt 	 *
346142b45a7SBenjamin Herrenschmidt 	 * WARNING: We assume that either slab_is_available() and we use it or
347fd07383bSAndrew Morton 	 * we use MEMBLOCK for allocations. That means that this is unsafe to
348fd07383bSAndrew Morton 	 * use when bootmem is currently active (unless bootmem itself is
349fd07383bSAndrew Morton 	 * implemented on top of MEMBLOCK which isn't the case yet)
350142b45a7SBenjamin Herrenschmidt 	 *
351142b45a7SBenjamin Herrenschmidt 	 * This should however not be an issue for now, as we currently only
352fd07383bSAndrew Morton 	 * call into MEMBLOCK while it's still active, or much later when slab
353fd07383bSAndrew Morton 	 * is active for memory hotplug operations
354142b45a7SBenjamin Herrenschmidt 	 */
355142b45a7SBenjamin Herrenschmidt 	if (use_slab) {
356142b45a7SBenjamin Herrenschmidt 		new_array = kmalloc(new_size, GFP_KERNEL);
3571f5026a7STejun Heo 		addr = new_array ? __pa(new_array) : 0;
3584e2f0775SGavin Shan 	} else {
35948c3b583SGreg Pearson 		/* only exclude range when trying to double reserved.regions */
36048c3b583SGreg Pearson 		if (type != &memblock.reserved)
36148c3b583SGreg Pearson 			new_area_start = new_area_size = 0;
36248c3b583SGreg Pearson 
36348c3b583SGreg Pearson 		addr = memblock_find_in_range(new_area_start + new_area_size,
36448c3b583SGreg Pearson 						memblock.current_limit,
36529f67386SYinghai Lu 						new_alloc_size, PAGE_SIZE);
36648c3b583SGreg Pearson 		if (!addr && new_area_size)
36748c3b583SGreg Pearson 			addr = memblock_find_in_range(0,
36848c3b583SGreg Pearson 				min(new_area_start, memblock.current_limit),
36929f67386SYinghai Lu 				new_alloc_size, PAGE_SIZE);
37048c3b583SGreg Pearson 
37115674868SSachin Kamat 		new_array = addr ? __va(addr) : NULL;
3724e2f0775SGavin Shan 	}
3731f5026a7STejun Heo 	if (!addr) {
374142b45a7SBenjamin Herrenschmidt 		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
375142b45a7SBenjamin Herrenschmidt 		       memblock_type_name(type), type->max, type->max * 2);
376142b45a7SBenjamin Herrenschmidt 		return -1;
377142b45a7SBenjamin Herrenschmidt 	}
378142b45a7SBenjamin Herrenschmidt 
379fd07383bSAndrew Morton 	memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
380fd07383bSAndrew Morton 			memblock_type_name(type), type->max * 2, (u64)addr,
381fd07383bSAndrew Morton 			(u64)addr + new_size - 1);
382ea9e4376SYinghai Lu 
383fd07383bSAndrew Morton 	/*
384fd07383bSAndrew Morton 	 * Found space, we now need to move the array over before we add the
385fd07383bSAndrew Morton 	 * reserved region since it may be our reserved array itself that is
386fd07383bSAndrew Morton 	 * full.
387142b45a7SBenjamin Herrenschmidt 	 */
388142b45a7SBenjamin Herrenschmidt 	memcpy(new_array, type->regions, old_size);
389142b45a7SBenjamin Herrenschmidt 	memset(new_array + type->max, 0, old_size);
390142b45a7SBenjamin Herrenschmidt 	old_array = type->regions;
391142b45a7SBenjamin Herrenschmidt 	type->regions = new_array;
392142b45a7SBenjamin Herrenschmidt 	type->max <<= 1;
393142b45a7SBenjamin Herrenschmidt 
394fd07383bSAndrew Morton 	/* Free old array. We needn't free it if the array is the static one */
395181eb394SGavin Shan 	if (*in_slab)
396181eb394SGavin Shan 		kfree(old_array);
397181eb394SGavin Shan 	else if (old_array != memblock_memory_init_regions &&
398142b45a7SBenjamin Herrenschmidt 		 old_array != memblock_reserved_init_regions)
39929f67386SYinghai Lu 		memblock_free(__pa(old_array), old_alloc_size);
400142b45a7SBenjamin Herrenschmidt 
401fd07383bSAndrew Morton 	/*
402fd07383bSAndrew Morton 	 * Reserve the new array if that comes from the memblock.  Otherwise, we
403fd07383bSAndrew Morton 	 * needn't do it
404181eb394SGavin Shan 	 */
405181eb394SGavin Shan 	if (!use_slab)
40629f67386SYinghai Lu 		BUG_ON(memblock_reserve(addr, new_alloc_size));
407181eb394SGavin Shan 
408181eb394SGavin Shan 	/* Update slab flag */
409181eb394SGavin Shan 	*in_slab = use_slab;
410181eb394SGavin Shan 
411142b45a7SBenjamin Herrenschmidt 	return 0;
412142b45a7SBenjamin Herrenschmidt }
413142b45a7SBenjamin Herrenschmidt 
414784656f9STejun Heo /**
415784656f9STejun Heo  * memblock_merge_regions - merge neighboring compatible regions
416784656f9STejun Heo  * @type: memblock type to scan
417784656f9STejun Heo  *
418784656f9STejun Heo  * Scan @type and merge neighboring compatible regions.
419784656f9STejun Heo  */
420784656f9STejun Heo static void __init_memblock memblock_merge_regions(struct memblock_type *type)
421784656f9STejun Heo {
422784656f9STejun Heo 	int i = 0;
423784656f9STejun Heo 
424784656f9STejun Heo 	/* cnt never goes below 1 */
425784656f9STejun Heo 	while (i < type->cnt - 1) {
426784656f9STejun Heo 		struct memblock_region *this = &type->regions[i];
427784656f9STejun Heo 		struct memblock_region *next = &type->regions[i + 1];
428784656f9STejun Heo 
4297c0caeb8STejun Heo 		if (this->base + this->size != next->base ||
4307c0caeb8STejun Heo 		    memblock_get_region_node(this) !=
43166a20757STang Chen 		    memblock_get_region_node(next) ||
43266a20757STang Chen 		    this->flags != next->flags) {
433784656f9STejun Heo 			BUG_ON(this->base + this->size > next->base);
434784656f9STejun Heo 			i++;
435784656f9STejun Heo 			continue;
436784656f9STejun Heo 		}
437784656f9STejun Heo 
438784656f9STejun Heo 		this->size += next->size;
439c0232ae8SLin Feng 		/* move forward from next + 1, index of which is i + 2 */
440c0232ae8SLin Feng 		memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
441784656f9STejun Heo 		type->cnt--;
442784656f9STejun Heo 	}
443784656f9STejun Heo }
444784656f9STejun Heo 
445784656f9STejun Heo /**
446784656f9STejun Heo  * memblock_insert_region - insert new memblock region
447784656f9STejun Heo  * @type:	memblock type to insert into
448784656f9STejun Heo  * @idx:	index for the insertion point
449784656f9STejun Heo  * @base:	base address of the new region
450784656f9STejun Heo  * @size:	size of the new region
451209ff86dSTang Chen  * @nid:	node id of the new region
45266a20757STang Chen  * @flags:	flags of the new region
453784656f9STejun Heo  *
454784656f9STejun Heo  * Insert new memblock region [@base,@base+@size) into @type at @idx.
455784656f9STejun Heo  * @type must already have extra room to accomodate the new region.
456784656f9STejun Heo  */
457784656f9STejun Heo static void __init_memblock memblock_insert_region(struct memblock_type *type,
458784656f9STejun Heo 						   int idx, phys_addr_t base,
45966a20757STang Chen 						   phys_addr_t size,
46066a20757STang Chen 						   int nid, unsigned long flags)
461784656f9STejun Heo {
462784656f9STejun Heo 	struct memblock_region *rgn = &type->regions[idx];
463784656f9STejun Heo 
464784656f9STejun Heo 	BUG_ON(type->cnt >= type->max);
465784656f9STejun Heo 	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
466784656f9STejun Heo 	rgn->base = base;
467784656f9STejun Heo 	rgn->size = size;
46866a20757STang Chen 	rgn->flags = flags;
4697c0caeb8STejun Heo 	memblock_set_region_node(rgn, nid);
470784656f9STejun Heo 	type->cnt++;
4711440c4e2STejun Heo 	type->total_size += size;
472784656f9STejun Heo }
473784656f9STejun Heo 
474784656f9STejun Heo /**
475784656f9STejun Heo  * memblock_add_region - add new memblock region
476784656f9STejun Heo  * @type: memblock type to add new region into
477784656f9STejun Heo  * @base: base address of the new region
478784656f9STejun Heo  * @size: size of the new region
4797fb0bc3fSTejun Heo  * @nid: nid of the new region
48066a20757STang Chen  * @flags: flags of the new region
481784656f9STejun Heo  *
482784656f9STejun Heo  * Add new memblock region [@base,@base+@size) into @type.  The new region
483784656f9STejun Heo  * is allowed to overlap with existing ones - overlaps don't affect already
484784656f9STejun Heo  * existing regions.  @type is guaranteed to be minimal (all neighbouring
485784656f9STejun Heo  * compatible regions are merged) after the addition.
486784656f9STejun Heo  *
487784656f9STejun Heo  * RETURNS:
488784656f9STejun Heo  * 0 on success, -errno on failure.
489784656f9STejun Heo  */
490581adcbeSTejun Heo static int __init_memblock memblock_add_region(struct memblock_type *type,
49166a20757STang Chen 				phys_addr_t base, phys_addr_t size,
49266a20757STang Chen 				int nid, unsigned long flags)
49395f72d1eSYinghai Lu {
494784656f9STejun Heo 	bool insert = false;
495eb18f1b5STejun Heo 	phys_addr_t obase = base;
496eb18f1b5STejun Heo 	phys_addr_t end = base + memblock_cap_size(base, &size);
497784656f9STejun Heo 	int i, nr_new;
49895f72d1eSYinghai Lu 
499b3dc627cSTejun Heo 	if (!size)
500b3dc627cSTejun Heo 		return 0;
501b3dc627cSTejun Heo 
502784656f9STejun Heo 	/* special case for empty array */
503784656f9STejun Heo 	if (type->regions[0].size == 0) {
5041440c4e2STejun Heo 		WARN_ON(type->cnt != 1 || type->total_size);
505784656f9STejun Heo 		type->regions[0].base = base;
506784656f9STejun Heo 		type->regions[0].size = size;
50766a20757STang Chen 		type->regions[0].flags = flags;
5087fb0bc3fSTejun Heo 		memblock_set_region_node(&type->regions[0], nid);
5091440c4e2STejun Heo 		type->total_size = size;
510784656f9STejun Heo 		return 0;
511784656f9STejun Heo 	}
512784656f9STejun Heo repeat:
513784656f9STejun Heo 	/*
514784656f9STejun Heo 	 * The following is executed twice.  Once with %false @insert and
515784656f9STejun Heo 	 * then with %true.  The first counts the number of regions needed
516784656f9STejun Heo 	 * to accomodate the new area.  The second actually inserts them.
517784656f9STejun Heo 	 */
518784656f9STejun Heo 	base = obase;
519784656f9STejun Heo 	nr_new = 0;
520784656f9STejun Heo 
5218f7a6605SBenjamin Herrenschmidt 	for (i = 0; i < type->cnt; i++) {
5228f7a6605SBenjamin Herrenschmidt 		struct memblock_region *rgn = &type->regions[i];
523784656f9STejun Heo 		phys_addr_t rbase = rgn->base;
524784656f9STejun Heo 		phys_addr_t rend = rbase + rgn->size;
5258f7a6605SBenjamin Herrenschmidt 
526784656f9STejun Heo 		if (rbase >= end)
5278f7a6605SBenjamin Herrenschmidt 			break;
528784656f9STejun Heo 		if (rend <= base)
529784656f9STejun Heo 			continue;
530784656f9STejun Heo 		/*
531784656f9STejun Heo 		 * @rgn overlaps.  If it separates the lower part of new
532784656f9STejun Heo 		 * area, insert that portion.
5338f7a6605SBenjamin Herrenschmidt 		 */
534784656f9STejun Heo 		if (rbase > base) {
535784656f9STejun Heo 			nr_new++;
536784656f9STejun Heo 			if (insert)
537784656f9STejun Heo 				memblock_insert_region(type, i++, base,
53866a20757STang Chen 						       rbase - base, nid,
53966a20757STang Chen 						       flags);
540784656f9STejun Heo 		}
541784656f9STejun Heo 		/* area below @rend is dealt with, forget about it */
542784656f9STejun Heo 		base = min(rend, end);
5438f7a6605SBenjamin Herrenschmidt 	}
5448f7a6605SBenjamin Herrenschmidt 
545784656f9STejun Heo 	/* insert the remaining portion */
546784656f9STejun Heo 	if (base < end) {
547784656f9STejun Heo 		nr_new++;
548784656f9STejun Heo 		if (insert)
54966a20757STang Chen 			memblock_insert_region(type, i, base, end - base,
55066a20757STang Chen 					       nid, flags);
5518f7a6605SBenjamin Herrenschmidt 	}
5528f7a6605SBenjamin Herrenschmidt 
553784656f9STejun Heo 	/*
554784656f9STejun Heo 	 * If this was the first round, resize array and repeat for actual
555784656f9STejun Heo 	 * insertions; otherwise, merge and return.
5568f7a6605SBenjamin Herrenschmidt 	 */
557784656f9STejun Heo 	if (!insert) {
558784656f9STejun Heo 		while (type->cnt + nr_new > type->max)
55948c3b583SGreg Pearson 			if (memblock_double_array(type, obase, size) < 0)
560784656f9STejun Heo 				return -ENOMEM;
561784656f9STejun Heo 		insert = true;
562784656f9STejun Heo 		goto repeat;
56395f72d1eSYinghai Lu 	} else {
564784656f9STejun Heo 		memblock_merge_regions(type);
56595f72d1eSYinghai Lu 		return 0;
56695f72d1eSYinghai Lu 	}
567784656f9STejun Heo }
56895f72d1eSYinghai Lu 
5697fb0bc3fSTejun Heo int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
5707fb0bc3fSTejun Heo 				       int nid)
5717fb0bc3fSTejun Heo {
57266a20757STang Chen 	return memblock_add_region(&memblock.memory, base, size, nid, 0);
5737fb0bc3fSTejun Heo }
5747fb0bc3fSTejun Heo 
575581adcbeSTejun Heo int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
57695f72d1eSYinghai Lu {
57766a20757STang Chen 	return memblock_add_region(&memblock.memory, base, size,
57866a20757STang Chen 				   MAX_NUMNODES, 0);
57995f72d1eSYinghai Lu }
58095f72d1eSYinghai Lu 
5816a9ceb31STejun Heo /**
5826a9ceb31STejun Heo  * memblock_isolate_range - isolate given range into disjoint memblocks
5836a9ceb31STejun Heo  * @type: memblock type to isolate range for
5846a9ceb31STejun Heo  * @base: base of range to isolate
5856a9ceb31STejun Heo  * @size: size of range to isolate
5866a9ceb31STejun Heo  * @start_rgn: out parameter for the start of isolated region
5876a9ceb31STejun Heo  * @end_rgn: out parameter for the end of isolated region
5886a9ceb31STejun Heo  *
5896a9ceb31STejun Heo  * Walk @type and ensure that regions don't cross the boundaries defined by
5906a9ceb31STejun Heo  * [@base,@base+@size).  Crossing regions are split at the boundaries,
5916a9ceb31STejun Heo  * which may create at most two more regions.  The index of the first
5926a9ceb31STejun Heo  * region inside the range is returned in *@start_rgn and end in *@end_rgn.
5936a9ceb31STejun Heo  *
5946a9ceb31STejun Heo  * RETURNS:
5956a9ceb31STejun Heo  * 0 on success, -errno on failure.
5966a9ceb31STejun Heo  */
5976a9ceb31STejun Heo static int __init_memblock memblock_isolate_range(struct memblock_type *type,
5986a9ceb31STejun Heo 					phys_addr_t base, phys_addr_t size,
5996a9ceb31STejun Heo 					int *start_rgn, int *end_rgn)
6006a9ceb31STejun Heo {
601eb18f1b5STejun Heo 	phys_addr_t end = base + memblock_cap_size(base, &size);
6026a9ceb31STejun Heo 	int i;
6036a9ceb31STejun Heo 
6046a9ceb31STejun Heo 	*start_rgn = *end_rgn = 0;
6056a9ceb31STejun Heo 
606b3dc627cSTejun Heo 	if (!size)
607b3dc627cSTejun Heo 		return 0;
608b3dc627cSTejun Heo 
6096a9ceb31STejun Heo 	/* we'll create at most two more regions */
6106a9ceb31STejun Heo 	while (type->cnt + 2 > type->max)
61148c3b583SGreg Pearson 		if (memblock_double_array(type, base, size) < 0)
6126a9ceb31STejun Heo 			return -ENOMEM;
6136a9ceb31STejun Heo 
6146a9ceb31STejun Heo 	for (i = 0; i < type->cnt; i++) {
6156a9ceb31STejun Heo 		struct memblock_region *rgn = &type->regions[i];
6166a9ceb31STejun Heo 		phys_addr_t rbase = rgn->base;
6176a9ceb31STejun Heo 		phys_addr_t rend = rbase + rgn->size;
6186a9ceb31STejun Heo 
6196a9ceb31STejun Heo 		if (rbase >= end)
6206a9ceb31STejun Heo 			break;
6216a9ceb31STejun Heo 		if (rend <= base)
6226a9ceb31STejun Heo 			continue;
6236a9ceb31STejun Heo 
6246a9ceb31STejun Heo 		if (rbase < base) {
6256a9ceb31STejun Heo 			/*
6266a9ceb31STejun Heo 			 * @rgn intersects from below.  Split and continue
6276a9ceb31STejun Heo 			 * to process the next region - the new top half.
6286a9ceb31STejun Heo 			 */
6296a9ceb31STejun Heo 			rgn->base = base;
6301440c4e2STejun Heo 			rgn->size -= base - rbase;
6311440c4e2STejun Heo 			type->total_size -= base - rbase;
6326a9ceb31STejun Heo 			memblock_insert_region(type, i, rbase, base - rbase,
63366a20757STang Chen 					       memblock_get_region_node(rgn),
63466a20757STang Chen 					       rgn->flags);
6356a9ceb31STejun Heo 		} else if (rend > end) {
6366a9ceb31STejun Heo 			/*
6376a9ceb31STejun Heo 			 * @rgn intersects from above.  Split and redo the
6386a9ceb31STejun Heo 			 * current region - the new bottom half.
6396a9ceb31STejun Heo 			 */
6406a9ceb31STejun Heo 			rgn->base = end;
6411440c4e2STejun Heo 			rgn->size -= end - rbase;
6421440c4e2STejun Heo 			type->total_size -= end - rbase;
6436a9ceb31STejun Heo 			memblock_insert_region(type, i--, rbase, end - rbase,
64466a20757STang Chen 					       memblock_get_region_node(rgn),
64566a20757STang Chen 					       rgn->flags);
6466a9ceb31STejun Heo 		} else {
6476a9ceb31STejun Heo 			/* @rgn is fully contained, record it */
6486a9ceb31STejun Heo 			if (!*end_rgn)
6496a9ceb31STejun Heo 				*start_rgn = i;
6506a9ceb31STejun Heo 			*end_rgn = i + 1;
6516a9ceb31STejun Heo 		}
6526a9ceb31STejun Heo 	}
6536a9ceb31STejun Heo 
6546a9ceb31STejun Heo 	return 0;
6556a9ceb31STejun Heo }
6566a9ceb31STejun Heo 
657581adcbeSTejun Heo static int __init_memblock __memblock_remove(struct memblock_type *type,
6588f7a6605SBenjamin Herrenschmidt 					     phys_addr_t base, phys_addr_t size)
65995f72d1eSYinghai Lu {
66071936180STejun Heo 	int start_rgn, end_rgn;
66171936180STejun Heo 	int i, ret;
66295f72d1eSYinghai Lu 
66371936180STejun Heo 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
66471936180STejun Heo 	if (ret)
66571936180STejun Heo 		return ret;
66695f72d1eSYinghai Lu 
66771936180STejun Heo 	for (i = end_rgn - 1; i >= start_rgn; i--)
66871936180STejun Heo 		memblock_remove_region(type, i);
66995f72d1eSYinghai Lu 	return 0;
67095f72d1eSYinghai Lu }
67195f72d1eSYinghai Lu 
672581adcbeSTejun Heo int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
67395f72d1eSYinghai Lu {
67495f72d1eSYinghai Lu 	return __memblock_remove(&memblock.memory, base, size);
67595f72d1eSYinghai Lu }
67695f72d1eSYinghai Lu 
677581adcbeSTejun Heo int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
67895f72d1eSYinghai Lu {
67924aa0788STejun Heo 	memblock_dbg("   memblock_free: [%#016llx-%#016llx] %pF\n",
680a150439cSH. Peter Anvin 		     (unsigned long long)base,
681931d13f5SGrygorii Strashko 		     (unsigned long long)base + size - 1,
682a150439cSH. Peter Anvin 		     (void *)_RET_IP_);
68324aa0788STejun Heo 
68495f72d1eSYinghai Lu 	return __memblock_remove(&memblock.reserved, base, size);
68595f72d1eSYinghai Lu }
68695f72d1eSYinghai Lu 
68766a20757STang Chen static int __init_memblock memblock_reserve_region(phys_addr_t base,
68866a20757STang Chen 						   phys_addr_t size,
68966a20757STang Chen 						   int nid,
69066a20757STang Chen 						   unsigned long flags)
69195f72d1eSYinghai Lu {
692e3239ff9SBenjamin Herrenschmidt 	struct memblock_type *_rgn = &memblock.reserved;
69395f72d1eSYinghai Lu 
69466a20757STang Chen 	memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
695a150439cSH. Peter Anvin 		     (unsigned long long)base,
696931d13f5SGrygorii Strashko 		     (unsigned long long)base + size - 1,
69766a20757STang Chen 		     flags, (void *)_RET_IP_);
69895f72d1eSYinghai Lu 
69966a20757STang Chen 	return memblock_add_region(_rgn, base, size, nid, flags);
70066a20757STang Chen }
70166a20757STang Chen 
70266a20757STang Chen int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
70366a20757STang Chen {
70466a20757STang Chen 	return memblock_reserve_region(base, size, MAX_NUMNODES, 0);
70595f72d1eSYinghai Lu }
70695f72d1eSYinghai Lu 
70735fd0808STejun Heo /**
70866b16edfSTang Chen  * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
70966b16edfSTang Chen  * @base: the base phys addr of the region
71066b16edfSTang Chen  * @size: the size of the region
71166b16edfSTang Chen  *
71266b16edfSTang Chen  * This function isolates region [@base, @base + @size), and mark it with flag
71366b16edfSTang Chen  * MEMBLOCK_HOTPLUG.
71466b16edfSTang Chen  *
71566b16edfSTang Chen  * Return 0 on succees, -errno on failure.
71666b16edfSTang Chen  */
71766b16edfSTang Chen int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
71866b16edfSTang Chen {
71966b16edfSTang Chen 	struct memblock_type *type = &memblock.memory;
72066b16edfSTang Chen 	int i, ret, start_rgn, end_rgn;
72166b16edfSTang Chen 
72266b16edfSTang Chen 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
72366b16edfSTang Chen 	if (ret)
72466b16edfSTang Chen 		return ret;
72566b16edfSTang Chen 
72666b16edfSTang Chen 	for (i = start_rgn; i < end_rgn; i++)
72766b16edfSTang Chen 		memblock_set_region_flags(&type->regions[i], MEMBLOCK_HOTPLUG);
72866b16edfSTang Chen 
72966b16edfSTang Chen 	memblock_merge_regions(type);
73066b16edfSTang Chen 	return 0;
73166b16edfSTang Chen }
73266b16edfSTang Chen 
73366b16edfSTang Chen /**
73466b16edfSTang Chen  * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
73566b16edfSTang Chen  * @base: the base phys addr of the region
73666b16edfSTang Chen  * @size: the size of the region
73766b16edfSTang Chen  *
73866b16edfSTang Chen  * This function isolates region [@base, @base + @size), and clear flag
73966b16edfSTang Chen  * MEMBLOCK_HOTPLUG for the isolated regions.
74066b16edfSTang Chen  *
74166b16edfSTang Chen  * Return 0 on succees, -errno on failure.
74266b16edfSTang Chen  */
74366b16edfSTang Chen int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
74466b16edfSTang Chen {
74566b16edfSTang Chen 	struct memblock_type *type = &memblock.memory;
74666b16edfSTang Chen 	int i, ret, start_rgn, end_rgn;
74766b16edfSTang Chen 
74866b16edfSTang Chen 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
74966b16edfSTang Chen 	if (ret)
75066b16edfSTang Chen 		return ret;
75166b16edfSTang Chen 
75266b16edfSTang Chen 	for (i = start_rgn; i < end_rgn; i++)
75366b16edfSTang Chen 		memblock_clear_region_flags(&type->regions[i],
75466b16edfSTang Chen 					    MEMBLOCK_HOTPLUG);
75566b16edfSTang Chen 
75666b16edfSTang Chen 	memblock_merge_regions(type);
75766b16edfSTang Chen 	return 0;
75866b16edfSTang Chen }
75966b16edfSTang Chen 
76066b16edfSTang Chen /**
76135fd0808STejun Heo  * __next_free_mem_range - next function for for_each_free_mem_range()
76235fd0808STejun Heo  * @idx: pointer to u64 loop variable
763b1154233SGrygorii Strashko  * @nid: node selector, %NUMA_NO_NODE for all nodes
764dad7557eSWanpeng Li  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
765dad7557eSWanpeng Li  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
766dad7557eSWanpeng Li  * @out_nid: ptr to int for nid of the range, can be %NULL
76735fd0808STejun Heo  *
76835fd0808STejun Heo  * Find the first free area from *@idx which matches @nid, fill the out
76935fd0808STejun Heo  * parameters, and update *@idx for the next iteration.  The lower 32bit of
77035fd0808STejun Heo  * *@idx contains index into memory region and the upper 32bit indexes the
77135fd0808STejun Heo  * areas before each reserved region.  For example, if reserved regions
77235fd0808STejun Heo  * look like the following,
77335fd0808STejun Heo  *
77435fd0808STejun Heo  *	0:[0-16), 1:[32-48), 2:[128-130)
77535fd0808STejun Heo  *
77635fd0808STejun Heo  * The upper 32bit indexes the following regions.
77735fd0808STejun Heo  *
77835fd0808STejun Heo  *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
77935fd0808STejun Heo  *
78035fd0808STejun Heo  * As both region arrays are sorted, the function advances the two indices
78135fd0808STejun Heo  * in lockstep and returns each intersection.
78235fd0808STejun Heo  */
78335fd0808STejun Heo void __init_memblock __next_free_mem_range(u64 *idx, int nid,
78435fd0808STejun Heo 					   phys_addr_t *out_start,
78535fd0808STejun Heo 					   phys_addr_t *out_end, int *out_nid)
78635fd0808STejun Heo {
78735fd0808STejun Heo 	struct memblock_type *mem = &memblock.memory;
78835fd0808STejun Heo 	struct memblock_type *rsv = &memblock.reserved;
78935fd0808STejun Heo 	int mi = *idx & 0xffffffff;
79035fd0808STejun Heo 	int ri = *idx >> 32;
791b1154233SGrygorii Strashko 
792560dca27SGrygorii Strashko 	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
793560dca27SGrygorii Strashko 		nid = NUMA_NO_NODE;
79435fd0808STejun Heo 
79535fd0808STejun Heo 	for ( ; mi < mem->cnt; mi++) {
79635fd0808STejun Heo 		struct memblock_region *m = &mem->regions[mi];
79735fd0808STejun Heo 		phys_addr_t m_start = m->base;
79835fd0808STejun Heo 		phys_addr_t m_end = m->base + m->size;
79935fd0808STejun Heo 
80035fd0808STejun Heo 		/* only memory regions are associated with nodes, check it */
801560dca27SGrygorii Strashko 		if (nid != NUMA_NO_NODE && nid != memblock_get_region_node(m))
80235fd0808STejun Heo 			continue;
80335fd0808STejun Heo 
80435fd0808STejun Heo 		/* scan areas before each reservation for intersection */
80535fd0808STejun Heo 		for ( ; ri < rsv->cnt + 1; ri++) {
80635fd0808STejun Heo 			struct memblock_region *r = &rsv->regions[ri];
80735fd0808STejun Heo 			phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
80835fd0808STejun Heo 			phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
80935fd0808STejun Heo 
81035fd0808STejun Heo 			/* if ri advanced past mi, break out to advance mi */
81135fd0808STejun Heo 			if (r_start >= m_end)
81235fd0808STejun Heo 				break;
81335fd0808STejun Heo 			/* if the two regions intersect, we're done */
81435fd0808STejun Heo 			if (m_start < r_end) {
81535fd0808STejun Heo 				if (out_start)
81635fd0808STejun Heo 					*out_start = max(m_start, r_start);
81735fd0808STejun Heo 				if (out_end)
81835fd0808STejun Heo 					*out_end = min(m_end, r_end);
81935fd0808STejun Heo 				if (out_nid)
82035fd0808STejun Heo 					*out_nid = memblock_get_region_node(m);
82135fd0808STejun Heo 				/*
82235fd0808STejun Heo 				 * The region which ends first is advanced
82335fd0808STejun Heo 				 * for the next iteration.
82435fd0808STejun Heo 				 */
82535fd0808STejun Heo 				if (m_end <= r_end)
82635fd0808STejun Heo 					mi++;
82735fd0808STejun Heo 				else
82835fd0808STejun Heo 					ri++;
82935fd0808STejun Heo 				*idx = (u32)mi | (u64)ri << 32;
83035fd0808STejun Heo 				return;
83135fd0808STejun Heo 			}
83235fd0808STejun Heo 		}
83335fd0808STejun Heo 	}
83435fd0808STejun Heo 
83535fd0808STejun Heo 	/* signal end of iteration */
83635fd0808STejun Heo 	*idx = ULLONG_MAX;
83735fd0808STejun Heo }
83835fd0808STejun Heo 
8397bd0b0f0STejun Heo /**
8407bd0b0f0STejun Heo  * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
8417bd0b0f0STejun Heo  * @idx: pointer to u64 loop variable
842b1154233SGrygorii Strashko  * @nid: nid: node selector, %NUMA_NO_NODE for all nodes
843dad7557eSWanpeng Li  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
844dad7557eSWanpeng Li  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
845dad7557eSWanpeng Li  * @out_nid: ptr to int for nid of the range, can be %NULL
8467bd0b0f0STejun Heo  *
8477bd0b0f0STejun Heo  * Reverse of __next_free_mem_range().
84855ac590cSTang Chen  *
84955ac590cSTang Chen  * Linux kernel cannot migrate pages used by itself. Memory hotplug users won't
85055ac590cSTang Chen  * be able to hot-remove hotpluggable memory used by the kernel. So this
85155ac590cSTang Chen  * function skip hotpluggable regions if needed when allocating memory for the
85255ac590cSTang Chen  * kernel.
8537bd0b0f0STejun Heo  */
8547bd0b0f0STejun Heo void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
8557bd0b0f0STejun Heo 					   phys_addr_t *out_start,
8567bd0b0f0STejun Heo 					   phys_addr_t *out_end, int *out_nid)
8577bd0b0f0STejun Heo {
8587bd0b0f0STejun Heo 	struct memblock_type *mem = &memblock.memory;
8597bd0b0f0STejun Heo 	struct memblock_type *rsv = &memblock.reserved;
8607bd0b0f0STejun Heo 	int mi = *idx & 0xffffffff;
8617bd0b0f0STejun Heo 	int ri = *idx >> 32;
862b1154233SGrygorii Strashko 
863560dca27SGrygorii Strashko 	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
864560dca27SGrygorii Strashko 		nid = NUMA_NO_NODE;
8657bd0b0f0STejun Heo 
8667bd0b0f0STejun Heo 	if (*idx == (u64)ULLONG_MAX) {
8677bd0b0f0STejun Heo 		mi = mem->cnt - 1;
8687bd0b0f0STejun Heo 		ri = rsv->cnt;
8697bd0b0f0STejun Heo 	}
8707bd0b0f0STejun Heo 
8717bd0b0f0STejun Heo 	for ( ; mi >= 0; mi--) {
8727bd0b0f0STejun Heo 		struct memblock_region *m = &mem->regions[mi];
8737bd0b0f0STejun Heo 		phys_addr_t m_start = m->base;
8747bd0b0f0STejun Heo 		phys_addr_t m_end = m->base + m->size;
8757bd0b0f0STejun Heo 
8767bd0b0f0STejun Heo 		/* only memory regions are associated with nodes, check it */
877560dca27SGrygorii Strashko 		if (nid != NUMA_NO_NODE && nid != memblock_get_region_node(m))
8787bd0b0f0STejun Heo 			continue;
8797bd0b0f0STejun Heo 
88055ac590cSTang Chen 		/* skip hotpluggable memory regions if needed */
88155ac590cSTang Chen 		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
88255ac590cSTang Chen 			continue;
88355ac590cSTang Chen 
8847bd0b0f0STejun Heo 		/* scan areas before each reservation for intersection */
8857bd0b0f0STejun Heo 		for ( ; ri >= 0; ri--) {
8867bd0b0f0STejun Heo 			struct memblock_region *r = &rsv->regions[ri];
8877bd0b0f0STejun Heo 			phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
8887bd0b0f0STejun Heo 			phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
8897bd0b0f0STejun Heo 
8907bd0b0f0STejun Heo 			/* if ri advanced past mi, break out to advance mi */
8917bd0b0f0STejun Heo 			if (r_end <= m_start)
8927bd0b0f0STejun Heo 				break;
8937bd0b0f0STejun Heo 			/* if the two regions intersect, we're done */
8947bd0b0f0STejun Heo 			if (m_end > r_start) {
8957bd0b0f0STejun Heo 				if (out_start)
8967bd0b0f0STejun Heo 					*out_start = max(m_start, r_start);
8977bd0b0f0STejun Heo 				if (out_end)
8987bd0b0f0STejun Heo 					*out_end = min(m_end, r_end);
8997bd0b0f0STejun Heo 				if (out_nid)
9007bd0b0f0STejun Heo 					*out_nid = memblock_get_region_node(m);
9017bd0b0f0STejun Heo 
9027bd0b0f0STejun Heo 				if (m_start >= r_start)
9037bd0b0f0STejun Heo 					mi--;
9047bd0b0f0STejun Heo 				else
9057bd0b0f0STejun Heo 					ri--;
9067bd0b0f0STejun Heo 				*idx = (u32)mi | (u64)ri << 32;
9077bd0b0f0STejun Heo 				return;
9087bd0b0f0STejun Heo 			}
9097bd0b0f0STejun Heo 		}
9107bd0b0f0STejun Heo 	}
9117bd0b0f0STejun Heo 
9127bd0b0f0STejun Heo 	*idx = ULLONG_MAX;
9137bd0b0f0STejun Heo }
9147bd0b0f0STejun Heo 
9157c0caeb8STejun Heo #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
9167c0caeb8STejun Heo /*
9177c0caeb8STejun Heo  * Common iterator interface used to define for_each_mem_range().
9187c0caeb8STejun Heo  */
9197c0caeb8STejun Heo void __init_memblock __next_mem_pfn_range(int *idx, int nid,
9207c0caeb8STejun Heo 				unsigned long *out_start_pfn,
9217c0caeb8STejun Heo 				unsigned long *out_end_pfn, int *out_nid)
9227c0caeb8STejun Heo {
9237c0caeb8STejun Heo 	struct memblock_type *type = &memblock.memory;
9247c0caeb8STejun Heo 	struct memblock_region *r;
9257c0caeb8STejun Heo 
9267c0caeb8STejun Heo 	while (++*idx < type->cnt) {
9277c0caeb8STejun Heo 		r = &type->regions[*idx];
9287c0caeb8STejun Heo 
9297c0caeb8STejun Heo 		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
9307c0caeb8STejun Heo 			continue;
9317c0caeb8STejun Heo 		if (nid == MAX_NUMNODES || nid == r->nid)
9327c0caeb8STejun Heo 			break;
9337c0caeb8STejun Heo 	}
9347c0caeb8STejun Heo 	if (*idx >= type->cnt) {
9357c0caeb8STejun Heo 		*idx = -1;
9367c0caeb8STejun Heo 		return;
9377c0caeb8STejun Heo 	}
9387c0caeb8STejun Heo 
9397c0caeb8STejun Heo 	if (out_start_pfn)
9407c0caeb8STejun Heo 		*out_start_pfn = PFN_UP(r->base);
9417c0caeb8STejun Heo 	if (out_end_pfn)
9427c0caeb8STejun Heo 		*out_end_pfn = PFN_DOWN(r->base + r->size);
9437c0caeb8STejun Heo 	if (out_nid)
9447c0caeb8STejun Heo 		*out_nid = r->nid;
9457c0caeb8STejun Heo }
9467c0caeb8STejun Heo 
9477c0caeb8STejun Heo /**
9487c0caeb8STejun Heo  * memblock_set_node - set node ID on memblock regions
9497c0caeb8STejun Heo  * @base: base of area to set node ID for
9507c0caeb8STejun Heo  * @size: size of area to set node ID for
951e7e8de59STang Chen  * @type: memblock type to set node ID for
9527c0caeb8STejun Heo  * @nid: node ID to set
9537c0caeb8STejun Heo  *
954e7e8de59STang Chen  * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
9557c0caeb8STejun Heo  * Regions which cross the area boundaries are split as necessary.
9567c0caeb8STejun Heo  *
9577c0caeb8STejun Heo  * RETURNS:
9587c0caeb8STejun Heo  * 0 on success, -errno on failure.
9597c0caeb8STejun Heo  */
9607c0caeb8STejun Heo int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
961e7e8de59STang Chen 				      struct memblock_type *type, int nid)
9627c0caeb8STejun Heo {
9636a9ceb31STejun Heo 	int start_rgn, end_rgn;
9646a9ceb31STejun Heo 	int i, ret;
9657c0caeb8STejun Heo 
9666a9ceb31STejun Heo 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
9676a9ceb31STejun Heo 	if (ret)
9686a9ceb31STejun Heo 		return ret;
9697c0caeb8STejun Heo 
9706a9ceb31STejun Heo 	for (i = start_rgn; i < end_rgn; i++)
971e9d24ad3SWanpeng Li 		memblock_set_region_node(&type->regions[i], nid);
9727c0caeb8STejun Heo 
9737c0caeb8STejun Heo 	memblock_merge_regions(type);
9747c0caeb8STejun Heo 	return 0;
9757c0caeb8STejun Heo }
9767c0caeb8STejun Heo #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
9777c0caeb8STejun Heo 
9787bd0b0f0STejun Heo static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
9797bd0b0f0STejun Heo 					phys_addr_t align, phys_addr_t max_addr,
9807bd0b0f0STejun Heo 					int nid)
98195f72d1eSYinghai Lu {
9826ed311b2SBenjamin Herrenschmidt 	phys_addr_t found;
98395f72d1eSYinghai Lu 
98479f40fabSGrygorii Strashko 	if (!align)
98579f40fabSGrygorii Strashko 		align = SMP_CACHE_BYTES;
98694f3d3afSVineet Gupta 
98787029ee9SGrygorii Strashko 	found = memblock_find_in_range_node(size, align, 0, max_addr, nid);
9889c8c27e2STejun Heo 	if (found && !memblock_reserve(found, size))
9896ed311b2SBenjamin Herrenschmidt 		return found;
9906ed311b2SBenjamin Herrenschmidt 
9916ed311b2SBenjamin Herrenschmidt 	return 0;
99295f72d1eSYinghai Lu }
99395f72d1eSYinghai Lu 
9947bd0b0f0STejun Heo phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
9957bd0b0f0STejun Heo {
9967bd0b0f0STejun Heo 	return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
9977bd0b0f0STejun Heo }
9987bd0b0f0STejun Heo 
9997bd0b0f0STejun Heo phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
10007bd0b0f0STejun Heo {
1001b1154233SGrygorii Strashko 	return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE);
10027bd0b0f0STejun Heo }
10037bd0b0f0STejun Heo 
10046ed311b2SBenjamin Herrenschmidt phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
100595f72d1eSYinghai Lu {
10066ed311b2SBenjamin Herrenschmidt 	phys_addr_t alloc;
10076ed311b2SBenjamin Herrenschmidt 
10086ed311b2SBenjamin Herrenschmidt 	alloc = __memblock_alloc_base(size, align, max_addr);
10096ed311b2SBenjamin Herrenschmidt 
10106ed311b2SBenjamin Herrenschmidt 	if (alloc == 0)
10116ed311b2SBenjamin Herrenschmidt 		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
10126ed311b2SBenjamin Herrenschmidt 		      (unsigned long long) size, (unsigned long long) max_addr);
10136ed311b2SBenjamin Herrenschmidt 
10146ed311b2SBenjamin Herrenschmidt 	return alloc;
101595f72d1eSYinghai Lu }
101695f72d1eSYinghai Lu 
10176ed311b2SBenjamin Herrenschmidt phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
101895f72d1eSYinghai Lu {
10196ed311b2SBenjamin Herrenschmidt 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
102095f72d1eSYinghai Lu }
102195f72d1eSYinghai Lu 
10229d1e2492SBenjamin Herrenschmidt phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
10239d1e2492SBenjamin Herrenschmidt {
10249d1e2492SBenjamin Herrenschmidt 	phys_addr_t res = memblock_alloc_nid(size, align, nid);
10259d1e2492SBenjamin Herrenschmidt 
10269d1e2492SBenjamin Herrenschmidt 	if (res)
10279d1e2492SBenjamin Herrenschmidt 		return res;
102815fb0972STejun Heo 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
102995f72d1eSYinghai Lu }
103095f72d1eSYinghai Lu 
103126f09e9bSSantosh Shilimkar /**
103226f09e9bSSantosh Shilimkar  * memblock_virt_alloc_internal - allocate boot memory block
103326f09e9bSSantosh Shilimkar  * @size: size of memory block to be allocated in bytes
103426f09e9bSSantosh Shilimkar  * @align: alignment of the region and block's size
103526f09e9bSSantosh Shilimkar  * @min_addr: the lower bound of the memory region to allocate (phys address)
103626f09e9bSSantosh Shilimkar  * @max_addr: the upper bound of the memory region to allocate (phys address)
103726f09e9bSSantosh Shilimkar  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
103826f09e9bSSantosh Shilimkar  *
103926f09e9bSSantosh Shilimkar  * The @min_addr limit is dropped if it can not be satisfied and the allocation
104026f09e9bSSantosh Shilimkar  * will fall back to memory below @min_addr. Also, allocation may fall back
104126f09e9bSSantosh Shilimkar  * to any node in the system if the specified node can not
104226f09e9bSSantosh Shilimkar  * hold the requested memory.
104326f09e9bSSantosh Shilimkar  *
104426f09e9bSSantosh Shilimkar  * The allocation is performed from memory region limited by
104526f09e9bSSantosh Shilimkar  * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
104626f09e9bSSantosh Shilimkar  *
104726f09e9bSSantosh Shilimkar  * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
104826f09e9bSSantosh Shilimkar  *
104926f09e9bSSantosh Shilimkar  * The phys address of allocated boot memory block is converted to virtual and
105026f09e9bSSantosh Shilimkar  * allocated memory is reset to 0.
105126f09e9bSSantosh Shilimkar  *
105226f09e9bSSantosh Shilimkar  * In addition, function sets the min_count to 0 using kmemleak_alloc for
105326f09e9bSSantosh Shilimkar  * allocated boot memory block, so that it is never reported as leaks.
105426f09e9bSSantosh Shilimkar  *
105526f09e9bSSantosh Shilimkar  * RETURNS:
105626f09e9bSSantosh Shilimkar  * Virtual address of allocated memory block on success, NULL on failure.
105726f09e9bSSantosh Shilimkar  */
105826f09e9bSSantosh Shilimkar static void * __init memblock_virt_alloc_internal(
105926f09e9bSSantosh Shilimkar 				phys_addr_t size, phys_addr_t align,
106026f09e9bSSantosh Shilimkar 				phys_addr_t min_addr, phys_addr_t max_addr,
106126f09e9bSSantosh Shilimkar 				int nid)
106226f09e9bSSantosh Shilimkar {
106326f09e9bSSantosh Shilimkar 	phys_addr_t alloc;
106426f09e9bSSantosh Shilimkar 	void *ptr;
106526f09e9bSSantosh Shilimkar 
1066560dca27SGrygorii Strashko 	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1067560dca27SGrygorii Strashko 		nid = NUMA_NO_NODE;
106826f09e9bSSantosh Shilimkar 
106926f09e9bSSantosh Shilimkar 	/*
107026f09e9bSSantosh Shilimkar 	 * Detect any accidental use of these APIs after slab is ready, as at
107126f09e9bSSantosh Shilimkar 	 * this moment memblock may be deinitialized already and its
107226f09e9bSSantosh Shilimkar 	 * internal data may be destroyed (after execution of free_all_bootmem)
107326f09e9bSSantosh Shilimkar 	 */
107426f09e9bSSantosh Shilimkar 	if (WARN_ON_ONCE(slab_is_available()))
107526f09e9bSSantosh Shilimkar 		return kzalloc_node(size, GFP_NOWAIT, nid);
107626f09e9bSSantosh Shilimkar 
107726f09e9bSSantosh Shilimkar 	if (!align)
107826f09e9bSSantosh Shilimkar 		align = SMP_CACHE_BYTES;
107926f09e9bSSantosh Shilimkar 
1080f544e14fSYinghai Lu 	if (max_addr > memblock.current_limit)
1081f544e14fSYinghai Lu 		max_addr = memblock.current_limit;
1082f544e14fSYinghai Lu 
108326f09e9bSSantosh Shilimkar again:
108426f09e9bSSantosh Shilimkar 	alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
108526f09e9bSSantosh Shilimkar 					    nid);
108626f09e9bSSantosh Shilimkar 	if (alloc)
108726f09e9bSSantosh Shilimkar 		goto done;
108826f09e9bSSantosh Shilimkar 
108926f09e9bSSantosh Shilimkar 	if (nid != NUMA_NO_NODE) {
109026f09e9bSSantosh Shilimkar 		alloc = memblock_find_in_range_node(size, align, min_addr,
109126f09e9bSSantosh Shilimkar 						    max_addr,  NUMA_NO_NODE);
109226f09e9bSSantosh Shilimkar 		if (alloc)
109326f09e9bSSantosh Shilimkar 			goto done;
109426f09e9bSSantosh Shilimkar 	}
109526f09e9bSSantosh Shilimkar 
109626f09e9bSSantosh Shilimkar 	if (min_addr) {
109726f09e9bSSantosh Shilimkar 		min_addr = 0;
109826f09e9bSSantosh Shilimkar 		goto again;
109926f09e9bSSantosh Shilimkar 	} else {
110026f09e9bSSantosh Shilimkar 		goto error;
110126f09e9bSSantosh Shilimkar 	}
110226f09e9bSSantosh Shilimkar 
110326f09e9bSSantosh Shilimkar done:
110426f09e9bSSantosh Shilimkar 	memblock_reserve(alloc, size);
110526f09e9bSSantosh Shilimkar 	ptr = phys_to_virt(alloc);
110626f09e9bSSantosh Shilimkar 	memset(ptr, 0, size);
110726f09e9bSSantosh Shilimkar 
110826f09e9bSSantosh Shilimkar 	/*
110926f09e9bSSantosh Shilimkar 	 * The min_count is set to 0 so that bootmem allocated blocks
111026f09e9bSSantosh Shilimkar 	 * are never reported as leaks. This is because many of these blocks
111126f09e9bSSantosh Shilimkar 	 * are only referred via the physical address which is not
111226f09e9bSSantosh Shilimkar 	 * looked up by kmemleak.
111326f09e9bSSantosh Shilimkar 	 */
111426f09e9bSSantosh Shilimkar 	kmemleak_alloc(ptr, size, 0, 0);
111526f09e9bSSantosh Shilimkar 
111626f09e9bSSantosh Shilimkar 	return ptr;
111726f09e9bSSantosh Shilimkar 
111826f09e9bSSantosh Shilimkar error:
111926f09e9bSSantosh Shilimkar 	return NULL;
112026f09e9bSSantosh Shilimkar }
112126f09e9bSSantosh Shilimkar 
112226f09e9bSSantosh Shilimkar /**
112326f09e9bSSantosh Shilimkar  * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
112426f09e9bSSantosh Shilimkar  * @size: size of memory block to be allocated in bytes
112526f09e9bSSantosh Shilimkar  * @align: alignment of the region and block's size
112626f09e9bSSantosh Shilimkar  * @min_addr: the lower bound of the memory region from where the allocation
112726f09e9bSSantosh Shilimkar  *	  is preferred (phys address)
112826f09e9bSSantosh Shilimkar  * @max_addr: the upper bound of the memory region from where the allocation
112926f09e9bSSantosh Shilimkar  *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
113026f09e9bSSantosh Shilimkar  *	      allocate only from memory limited by memblock.current_limit value
113126f09e9bSSantosh Shilimkar  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
113226f09e9bSSantosh Shilimkar  *
113326f09e9bSSantosh Shilimkar  * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
113426f09e9bSSantosh Shilimkar  * additional debug information (including caller info), if enabled.
113526f09e9bSSantosh Shilimkar  *
113626f09e9bSSantosh Shilimkar  * RETURNS:
113726f09e9bSSantosh Shilimkar  * Virtual address of allocated memory block on success, NULL on failure.
113826f09e9bSSantosh Shilimkar  */
113926f09e9bSSantosh Shilimkar void * __init memblock_virt_alloc_try_nid_nopanic(
114026f09e9bSSantosh Shilimkar 				phys_addr_t size, phys_addr_t align,
114126f09e9bSSantosh Shilimkar 				phys_addr_t min_addr, phys_addr_t max_addr,
114226f09e9bSSantosh Shilimkar 				int nid)
114326f09e9bSSantosh Shilimkar {
114426f09e9bSSantosh Shilimkar 	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
114526f09e9bSSantosh Shilimkar 		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
114626f09e9bSSantosh Shilimkar 		     (u64)max_addr, (void *)_RET_IP_);
114726f09e9bSSantosh Shilimkar 	return memblock_virt_alloc_internal(size, align, min_addr,
114826f09e9bSSantosh Shilimkar 					     max_addr, nid);
114926f09e9bSSantosh Shilimkar }
115026f09e9bSSantosh Shilimkar 
115126f09e9bSSantosh Shilimkar /**
115226f09e9bSSantosh Shilimkar  * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
115326f09e9bSSantosh Shilimkar  * @size: size of memory block to be allocated in bytes
115426f09e9bSSantosh Shilimkar  * @align: alignment of the region and block's size
115526f09e9bSSantosh Shilimkar  * @min_addr: the lower bound of the memory region from where the allocation
115626f09e9bSSantosh Shilimkar  *	  is preferred (phys address)
115726f09e9bSSantosh Shilimkar  * @max_addr: the upper bound of the memory region from where the allocation
115826f09e9bSSantosh Shilimkar  *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
115926f09e9bSSantosh Shilimkar  *	      allocate only from memory limited by memblock.current_limit value
116026f09e9bSSantosh Shilimkar  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
116126f09e9bSSantosh Shilimkar  *
116226f09e9bSSantosh Shilimkar  * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
116326f09e9bSSantosh Shilimkar  * which provides debug information (including caller info), if enabled,
116426f09e9bSSantosh Shilimkar  * and panics if the request can not be satisfied.
116526f09e9bSSantosh Shilimkar  *
116626f09e9bSSantosh Shilimkar  * RETURNS:
116726f09e9bSSantosh Shilimkar  * Virtual address of allocated memory block on success, NULL on failure.
116826f09e9bSSantosh Shilimkar  */
116926f09e9bSSantosh Shilimkar void * __init memblock_virt_alloc_try_nid(
117026f09e9bSSantosh Shilimkar 			phys_addr_t size, phys_addr_t align,
117126f09e9bSSantosh Shilimkar 			phys_addr_t min_addr, phys_addr_t max_addr,
117226f09e9bSSantosh Shilimkar 			int nid)
117326f09e9bSSantosh Shilimkar {
117426f09e9bSSantosh Shilimkar 	void *ptr;
117526f09e9bSSantosh Shilimkar 
117626f09e9bSSantosh Shilimkar 	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
117726f09e9bSSantosh Shilimkar 		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
117826f09e9bSSantosh Shilimkar 		     (u64)max_addr, (void *)_RET_IP_);
117926f09e9bSSantosh Shilimkar 	ptr = memblock_virt_alloc_internal(size, align,
118026f09e9bSSantosh Shilimkar 					   min_addr, max_addr, nid);
118126f09e9bSSantosh Shilimkar 	if (ptr)
118226f09e9bSSantosh Shilimkar 		return ptr;
118326f09e9bSSantosh Shilimkar 
118426f09e9bSSantosh Shilimkar 	panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
118526f09e9bSSantosh Shilimkar 	      __func__, (u64)size, (u64)align, nid, (u64)min_addr,
118626f09e9bSSantosh Shilimkar 	      (u64)max_addr);
118726f09e9bSSantosh Shilimkar 	return NULL;
118826f09e9bSSantosh Shilimkar }
118926f09e9bSSantosh Shilimkar 
119026f09e9bSSantosh Shilimkar /**
119126f09e9bSSantosh Shilimkar  * __memblock_free_early - free boot memory block
119226f09e9bSSantosh Shilimkar  * @base: phys starting address of the  boot memory block
119326f09e9bSSantosh Shilimkar  * @size: size of the boot memory block in bytes
119426f09e9bSSantosh Shilimkar  *
119526f09e9bSSantosh Shilimkar  * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
119626f09e9bSSantosh Shilimkar  * The freeing memory will not be released to the buddy allocator.
119726f09e9bSSantosh Shilimkar  */
119826f09e9bSSantosh Shilimkar void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
119926f09e9bSSantosh Shilimkar {
120026f09e9bSSantosh Shilimkar 	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
120126f09e9bSSantosh Shilimkar 		     __func__, (u64)base, (u64)base + size - 1,
120226f09e9bSSantosh Shilimkar 		     (void *)_RET_IP_);
120326f09e9bSSantosh Shilimkar 	kmemleak_free_part(__va(base), size);
120426f09e9bSSantosh Shilimkar 	__memblock_remove(&memblock.reserved, base, size);
120526f09e9bSSantosh Shilimkar }
120626f09e9bSSantosh Shilimkar 
120726f09e9bSSantosh Shilimkar /*
120826f09e9bSSantosh Shilimkar  * __memblock_free_late - free bootmem block pages directly to buddy allocator
120926f09e9bSSantosh Shilimkar  * @addr: phys starting address of the  boot memory block
121026f09e9bSSantosh Shilimkar  * @size: size of the boot memory block in bytes
121126f09e9bSSantosh Shilimkar  *
121226f09e9bSSantosh Shilimkar  * This is only useful when the bootmem allocator has already been torn
121326f09e9bSSantosh Shilimkar  * down, but we are still initializing the system.  Pages are released directly
121426f09e9bSSantosh Shilimkar  * to the buddy allocator, no bootmem metadata is updated because it is gone.
121526f09e9bSSantosh Shilimkar  */
121626f09e9bSSantosh Shilimkar void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
121726f09e9bSSantosh Shilimkar {
121826f09e9bSSantosh Shilimkar 	u64 cursor, end;
121926f09e9bSSantosh Shilimkar 
122026f09e9bSSantosh Shilimkar 	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
122126f09e9bSSantosh Shilimkar 		     __func__, (u64)base, (u64)base + size - 1,
122226f09e9bSSantosh Shilimkar 		     (void *)_RET_IP_);
122326f09e9bSSantosh Shilimkar 	kmemleak_free_part(__va(base), size);
122426f09e9bSSantosh Shilimkar 	cursor = PFN_UP(base);
122526f09e9bSSantosh Shilimkar 	end = PFN_DOWN(base + size);
122626f09e9bSSantosh Shilimkar 
122726f09e9bSSantosh Shilimkar 	for (; cursor < end; cursor++) {
122826f09e9bSSantosh Shilimkar 		__free_pages_bootmem(pfn_to_page(cursor), 0);
122926f09e9bSSantosh Shilimkar 		totalram_pages++;
123026f09e9bSSantosh Shilimkar 	}
123126f09e9bSSantosh Shilimkar }
12329d1e2492SBenjamin Herrenschmidt 
12339d1e2492SBenjamin Herrenschmidt /*
12349d1e2492SBenjamin Herrenschmidt  * Remaining API functions
12359d1e2492SBenjamin Herrenschmidt  */
12369d1e2492SBenjamin Herrenschmidt 
12372898cc4cSBenjamin Herrenschmidt phys_addr_t __init memblock_phys_mem_size(void)
123895f72d1eSYinghai Lu {
12391440c4e2STejun Heo 	return memblock.memory.total_size;
124095f72d1eSYinghai Lu }
124195f72d1eSYinghai Lu 
1242595ad9afSYinghai Lu phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1243595ad9afSYinghai Lu {
1244595ad9afSYinghai Lu 	unsigned long pages = 0;
1245595ad9afSYinghai Lu 	struct memblock_region *r;
1246595ad9afSYinghai Lu 	unsigned long start_pfn, end_pfn;
1247595ad9afSYinghai Lu 
1248595ad9afSYinghai Lu 	for_each_memblock(memory, r) {
1249595ad9afSYinghai Lu 		start_pfn = memblock_region_memory_base_pfn(r);
1250595ad9afSYinghai Lu 		end_pfn = memblock_region_memory_end_pfn(r);
1251595ad9afSYinghai Lu 		start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1252595ad9afSYinghai Lu 		end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1253595ad9afSYinghai Lu 		pages += end_pfn - start_pfn;
1254595ad9afSYinghai Lu 	}
1255595ad9afSYinghai Lu 
1256*16763230SFabian Frederick 	return PFN_PHYS(pages);
1257595ad9afSYinghai Lu }
1258595ad9afSYinghai Lu 
12590a93ebefSSam Ravnborg /* lowest address */
12600a93ebefSSam Ravnborg phys_addr_t __init_memblock memblock_start_of_DRAM(void)
12610a93ebefSSam Ravnborg {
12620a93ebefSSam Ravnborg 	return memblock.memory.regions[0].base;
12630a93ebefSSam Ravnborg }
12640a93ebefSSam Ravnborg 
126510d06439SYinghai Lu phys_addr_t __init_memblock memblock_end_of_DRAM(void)
126695f72d1eSYinghai Lu {
126795f72d1eSYinghai Lu 	int idx = memblock.memory.cnt - 1;
126895f72d1eSYinghai Lu 
1269e3239ff9SBenjamin Herrenschmidt 	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
127095f72d1eSYinghai Lu }
127195f72d1eSYinghai Lu 
1272c0ce8fefSTejun Heo void __init memblock_enforce_memory_limit(phys_addr_t limit)
127395f72d1eSYinghai Lu {
1274c0ce8fefSTejun Heo 	phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
1275136199f0SEmil Medve 	struct memblock_region *r;
127695f72d1eSYinghai Lu 
1277c0ce8fefSTejun Heo 	if (!limit)
127895f72d1eSYinghai Lu 		return;
127995f72d1eSYinghai Lu 
1280c0ce8fefSTejun Heo 	/* find out max address */
1281136199f0SEmil Medve 	for_each_memblock(memory, r) {
1282c0ce8fefSTejun Heo 		if (limit <= r->size) {
1283c0ce8fefSTejun Heo 			max_addr = r->base + limit;
128495f72d1eSYinghai Lu 			break;
128595f72d1eSYinghai Lu 		}
1286c0ce8fefSTejun Heo 		limit -= r->size;
128795f72d1eSYinghai Lu 	}
1288c0ce8fefSTejun Heo 
1289c0ce8fefSTejun Heo 	/* truncate both memory and reserved regions */
1290c0ce8fefSTejun Heo 	__memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX);
1291c0ce8fefSTejun Heo 	__memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX);
129295f72d1eSYinghai Lu }
129395f72d1eSYinghai Lu 
1294cd79481dSYinghai Lu static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
129572d4b0b4SBenjamin Herrenschmidt {
129672d4b0b4SBenjamin Herrenschmidt 	unsigned int left = 0, right = type->cnt;
129772d4b0b4SBenjamin Herrenschmidt 
129872d4b0b4SBenjamin Herrenschmidt 	do {
129972d4b0b4SBenjamin Herrenschmidt 		unsigned int mid = (right + left) / 2;
130072d4b0b4SBenjamin Herrenschmidt 
130172d4b0b4SBenjamin Herrenschmidt 		if (addr < type->regions[mid].base)
130272d4b0b4SBenjamin Herrenschmidt 			right = mid;
130372d4b0b4SBenjamin Herrenschmidt 		else if (addr >= (type->regions[mid].base +
130472d4b0b4SBenjamin Herrenschmidt 				  type->regions[mid].size))
130572d4b0b4SBenjamin Herrenschmidt 			left = mid + 1;
130672d4b0b4SBenjamin Herrenschmidt 		else
130772d4b0b4SBenjamin Herrenschmidt 			return mid;
130872d4b0b4SBenjamin Herrenschmidt 	} while (left < right);
130972d4b0b4SBenjamin Herrenschmidt 	return -1;
131072d4b0b4SBenjamin Herrenschmidt }
131172d4b0b4SBenjamin Herrenschmidt 
13122898cc4cSBenjamin Herrenschmidt int __init memblock_is_reserved(phys_addr_t addr)
131395f72d1eSYinghai Lu {
131472d4b0b4SBenjamin Herrenschmidt 	return memblock_search(&memblock.reserved, addr) != -1;
131595f72d1eSYinghai Lu }
131672d4b0b4SBenjamin Herrenschmidt 
13173661ca66SYinghai Lu int __init_memblock memblock_is_memory(phys_addr_t addr)
131872d4b0b4SBenjamin Herrenschmidt {
131972d4b0b4SBenjamin Herrenschmidt 	return memblock_search(&memblock.memory, addr) != -1;
132072d4b0b4SBenjamin Herrenschmidt }
132172d4b0b4SBenjamin Herrenschmidt 
1322e76b63f8SYinghai Lu #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1323e76b63f8SYinghai Lu int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1324e76b63f8SYinghai Lu 			 unsigned long *start_pfn, unsigned long *end_pfn)
1325e76b63f8SYinghai Lu {
1326e76b63f8SYinghai Lu 	struct memblock_type *type = &memblock.memory;
1327*16763230SFabian Frederick 	int mid = memblock_search(type, PFN_PHYS(pfn));
1328e76b63f8SYinghai Lu 
1329e76b63f8SYinghai Lu 	if (mid == -1)
1330e76b63f8SYinghai Lu 		return -1;
1331e76b63f8SYinghai Lu 
1332e76b63f8SYinghai Lu 	*start_pfn = type->regions[mid].base >> PAGE_SHIFT;
1333e76b63f8SYinghai Lu 	*end_pfn = (type->regions[mid].base + type->regions[mid].size)
1334e76b63f8SYinghai Lu 			>> PAGE_SHIFT;
1335e76b63f8SYinghai Lu 
1336e76b63f8SYinghai Lu 	return type->regions[mid].nid;
1337e76b63f8SYinghai Lu }
1338e76b63f8SYinghai Lu #endif
1339e76b63f8SYinghai Lu 
1340eab30949SStephen Boyd /**
1341eab30949SStephen Boyd  * memblock_is_region_memory - check if a region is a subset of memory
1342eab30949SStephen Boyd  * @base: base of region to check
1343eab30949SStephen Boyd  * @size: size of region to check
1344eab30949SStephen Boyd  *
1345eab30949SStephen Boyd  * Check if the region [@base, @base+@size) is a subset of a memory block.
1346eab30949SStephen Boyd  *
1347eab30949SStephen Boyd  * RETURNS:
1348eab30949SStephen Boyd  * 0 if false, non-zero if true
1349eab30949SStephen Boyd  */
13503661ca66SYinghai Lu int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
135172d4b0b4SBenjamin Herrenschmidt {
1352abb65272STomi Valkeinen 	int idx = memblock_search(&memblock.memory, base);
1353eb18f1b5STejun Heo 	phys_addr_t end = base + memblock_cap_size(base, &size);
135472d4b0b4SBenjamin Herrenschmidt 
135572d4b0b4SBenjamin Herrenschmidt 	if (idx == -1)
135695f72d1eSYinghai Lu 		return 0;
1357abb65272STomi Valkeinen 	return memblock.memory.regions[idx].base <= base &&
1358abb65272STomi Valkeinen 		(memblock.memory.regions[idx].base +
1359eb18f1b5STejun Heo 		 memblock.memory.regions[idx].size) >= end;
136095f72d1eSYinghai Lu }
136195f72d1eSYinghai Lu 
1362eab30949SStephen Boyd /**
1363eab30949SStephen Boyd  * memblock_is_region_reserved - check if a region intersects reserved memory
1364eab30949SStephen Boyd  * @base: base of region to check
1365eab30949SStephen Boyd  * @size: size of region to check
1366eab30949SStephen Boyd  *
1367eab30949SStephen Boyd  * Check if the region [@base, @base+@size) intersects a reserved memory block.
1368eab30949SStephen Boyd  *
1369eab30949SStephen Boyd  * RETURNS:
1370eab30949SStephen Boyd  * 0 if false, non-zero if true
1371eab30949SStephen Boyd  */
137210d06439SYinghai Lu int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
137395f72d1eSYinghai Lu {
1374eb18f1b5STejun Heo 	memblock_cap_size(base, &size);
1375f1c2c19cSBenjamin Herrenschmidt 	return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
137695f72d1eSYinghai Lu }
137795f72d1eSYinghai Lu 
13786ede1fd3SYinghai Lu void __init_memblock memblock_trim_memory(phys_addr_t align)
13796ede1fd3SYinghai Lu {
13806ede1fd3SYinghai Lu 	phys_addr_t start, end, orig_start, orig_end;
1381136199f0SEmil Medve 	struct memblock_region *r;
13826ede1fd3SYinghai Lu 
1383136199f0SEmil Medve 	for_each_memblock(memory, r) {
1384136199f0SEmil Medve 		orig_start = r->base;
1385136199f0SEmil Medve 		orig_end = r->base + r->size;
13866ede1fd3SYinghai Lu 		start = round_up(orig_start, align);
13876ede1fd3SYinghai Lu 		end = round_down(orig_end, align);
13886ede1fd3SYinghai Lu 
13896ede1fd3SYinghai Lu 		if (start == orig_start && end == orig_end)
13906ede1fd3SYinghai Lu 			continue;
13916ede1fd3SYinghai Lu 
13926ede1fd3SYinghai Lu 		if (start < end) {
1393136199f0SEmil Medve 			r->base = start;
1394136199f0SEmil Medve 			r->size = end - start;
13956ede1fd3SYinghai Lu 		} else {
1396136199f0SEmil Medve 			memblock_remove_region(&memblock.memory,
1397136199f0SEmil Medve 					       r - memblock.memory.regions);
1398136199f0SEmil Medve 			r--;
13996ede1fd3SYinghai Lu 		}
14006ede1fd3SYinghai Lu 	}
14016ede1fd3SYinghai Lu }
1402e63075a3SBenjamin Herrenschmidt 
14033661ca66SYinghai Lu void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1404e63075a3SBenjamin Herrenschmidt {
1405e63075a3SBenjamin Herrenschmidt 	memblock.current_limit = limit;
1406e63075a3SBenjamin Herrenschmidt }
1407e63075a3SBenjamin Herrenschmidt 
1408fec51014SLaura Abbott phys_addr_t __init_memblock memblock_get_current_limit(void)
1409fec51014SLaura Abbott {
1410fec51014SLaura Abbott 	return memblock.current_limit;
1411fec51014SLaura Abbott }
1412fec51014SLaura Abbott 
14137c0caeb8STejun Heo static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
14146ed311b2SBenjamin Herrenschmidt {
14156ed311b2SBenjamin Herrenschmidt 	unsigned long long base, size;
141666a20757STang Chen 	unsigned long flags;
14176ed311b2SBenjamin Herrenschmidt 	int i;
14186ed311b2SBenjamin Herrenschmidt 
14197c0caeb8STejun Heo 	pr_info(" %s.cnt  = 0x%lx\n", name, type->cnt);
14206ed311b2SBenjamin Herrenschmidt 
14217c0caeb8STejun Heo 	for (i = 0; i < type->cnt; i++) {
14227c0caeb8STejun Heo 		struct memblock_region *rgn = &type->regions[i];
14237c0caeb8STejun Heo 		char nid_buf[32] = "";
14246ed311b2SBenjamin Herrenschmidt 
14257c0caeb8STejun Heo 		base = rgn->base;
14267c0caeb8STejun Heo 		size = rgn->size;
142766a20757STang Chen 		flags = rgn->flags;
14287c0caeb8STejun Heo #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
14297c0caeb8STejun Heo 		if (memblock_get_region_node(rgn) != MAX_NUMNODES)
14307c0caeb8STejun Heo 			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
14317c0caeb8STejun Heo 				 memblock_get_region_node(rgn));
14327c0caeb8STejun Heo #endif
143366a20757STang Chen 		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
143466a20757STang Chen 			name, i, base, base + size - 1, size, nid_buf, flags);
14356ed311b2SBenjamin Herrenschmidt 	}
14366ed311b2SBenjamin Herrenschmidt }
14376ed311b2SBenjamin Herrenschmidt 
14384ff7b82fSTejun Heo void __init_memblock __memblock_dump_all(void)
14396ed311b2SBenjamin Herrenschmidt {
14406ed311b2SBenjamin Herrenschmidt 	pr_info("MEMBLOCK configuration:\n");
14411440c4e2STejun Heo 	pr_info(" memory size = %#llx reserved size = %#llx\n",
14421440c4e2STejun Heo 		(unsigned long long)memblock.memory.total_size,
14431440c4e2STejun Heo 		(unsigned long long)memblock.reserved.total_size);
14446ed311b2SBenjamin Herrenschmidt 
14456ed311b2SBenjamin Herrenschmidt 	memblock_dump(&memblock.memory, "memory");
14466ed311b2SBenjamin Herrenschmidt 	memblock_dump(&memblock.reserved, "reserved");
14476ed311b2SBenjamin Herrenschmidt }
14486ed311b2SBenjamin Herrenschmidt 
14491aadc056STejun Heo void __init memblock_allow_resize(void)
14506ed311b2SBenjamin Herrenschmidt {
1451142b45a7SBenjamin Herrenschmidt 	memblock_can_resize = 1;
14526ed311b2SBenjamin Herrenschmidt }
14536ed311b2SBenjamin Herrenschmidt 
14546ed311b2SBenjamin Herrenschmidt static int __init early_memblock(char *p)
14556ed311b2SBenjamin Herrenschmidt {
14566ed311b2SBenjamin Herrenschmidt 	if (p && strstr(p, "debug"))
14576ed311b2SBenjamin Herrenschmidt 		memblock_debug = 1;
14586ed311b2SBenjamin Herrenschmidt 	return 0;
14596ed311b2SBenjamin Herrenschmidt }
14606ed311b2SBenjamin Herrenschmidt early_param("memblock", early_memblock);
14616ed311b2SBenjamin Herrenschmidt 
1462c378ddd5STejun Heo #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
14636d03b885SBenjamin Herrenschmidt 
14646d03b885SBenjamin Herrenschmidt static int memblock_debug_show(struct seq_file *m, void *private)
14656d03b885SBenjamin Herrenschmidt {
14666d03b885SBenjamin Herrenschmidt 	struct memblock_type *type = m->private;
14676d03b885SBenjamin Herrenschmidt 	struct memblock_region *reg;
14686d03b885SBenjamin Herrenschmidt 	int i;
14696d03b885SBenjamin Herrenschmidt 
14706d03b885SBenjamin Herrenschmidt 	for (i = 0; i < type->cnt; i++) {
14716d03b885SBenjamin Herrenschmidt 		reg = &type->regions[i];
14726d03b885SBenjamin Herrenschmidt 		seq_printf(m, "%4d: ", i);
14736d03b885SBenjamin Herrenschmidt 		if (sizeof(phys_addr_t) == 4)
14746d03b885SBenjamin Herrenschmidt 			seq_printf(m, "0x%08lx..0x%08lx\n",
14756d03b885SBenjamin Herrenschmidt 				   (unsigned long)reg->base,
14766d03b885SBenjamin Herrenschmidt 				   (unsigned long)(reg->base + reg->size - 1));
14776d03b885SBenjamin Herrenschmidt 		else
14786d03b885SBenjamin Herrenschmidt 			seq_printf(m, "0x%016llx..0x%016llx\n",
14796d03b885SBenjamin Herrenschmidt 				   (unsigned long long)reg->base,
14806d03b885SBenjamin Herrenschmidt 				   (unsigned long long)(reg->base + reg->size - 1));
14816d03b885SBenjamin Herrenschmidt 
14826d03b885SBenjamin Herrenschmidt 	}
14836d03b885SBenjamin Herrenschmidt 	return 0;
14846d03b885SBenjamin Herrenschmidt }
14856d03b885SBenjamin Herrenschmidt 
14866d03b885SBenjamin Herrenschmidt static int memblock_debug_open(struct inode *inode, struct file *file)
14876d03b885SBenjamin Herrenschmidt {
14886d03b885SBenjamin Herrenschmidt 	return single_open(file, memblock_debug_show, inode->i_private);
14896d03b885SBenjamin Herrenschmidt }
14906d03b885SBenjamin Herrenschmidt 
14916d03b885SBenjamin Herrenschmidt static const struct file_operations memblock_debug_fops = {
14926d03b885SBenjamin Herrenschmidt 	.open = memblock_debug_open,
14936d03b885SBenjamin Herrenschmidt 	.read = seq_read,
14946d03b885SBenjamin Herrenschmidt 	.llseek = seq_lseek,
14956d03b885SBenjamin Herrenschmidt 	.release = single_release,
14966d03b885SBenjamin Herrenschmidt };
14976d03b885SBenjamin Herrenschmidt 
14986d03b885SBenjamin Herrenschmidt static int __init memblock_init_debugfs(void)
14996d03b885SBenjamin Herrenschmidt {
15006d03b885SBenjamin Herrenschmidt 	struct dentry *root = debugfs_create_dir("memblock", NULL);
15016d03b885SBenjamin Herrenschmidt 	if (!root)
15026d03b885SBenjamin Herrenschmidt 		return -ENXIO;
15036d03b885SBenjamin Herrenschmidt 	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
15046d03b885SBenjamin Herrenschmidt 	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
15056d03b885SBenjamin Herrenschmidt 
15066d03b885SBenjamin Herrenschmidt 	return 0;
15076d03b885SBenjamin Herrenschmidt }
15086d03b885SBenjamin Herrenschmidt __initcall(memblock_init_debugfs);
15096d03b885SBenjamin Herrenschmidt 
15106d03b885SBenjamin Herrenschmidt #endif /* CONFIG_DEBUG_FS */
1511