xref: /linux/mm/memblock.c (revision 87029ee9390b2297dae699d5fb135b77992116e5)
195f72d1eSYinghai Lu /*
295f72d1eSYinghai Lu  * Procedures for maintaining information about logical memory blocks.
395f72d1eSYinghai Lu  *
495f72d1eSYinghai Lu  * Peter Bergner, IBM Corp.	June 2001.
595f72d1eSYinghai Lu  * Copyright (C) 2001 Peter Bergner.
695f72d1eSYinghai Lu  *
795f72d1eSYinghai Lu  *      This program is free software; you can redistribute it and/or
895f72d1eSYinghai Lu  *      modify it under the terms of the GNU General Public License
995f72d1eSYinghai Lu  *      as published by the Free Software Foundation; either version
1095f72d1eSYinghai Lu  *      2 of the License, or (at your option) any later version.
1195f72d1eSYinghai Lu  */
1295f72d1eSYinghai Lu 
1395f72d1eSYinghai Lu #include <linux/kernel.h>
14142b45a7SBenjamin Herrenschmidt #include <linux/slab.h>
1595f72d1eSYinghai Lu #include <linux/init.h>
1695f72d1eSYinghai Lu #include <linux/bitops.h>
17449e8df3SBenjamin Herrenschmidt #include <linux/poison.h>
18c196f76fSBenjamin Herrenschmidt #include <linux/pfn.h>
196d03b885SBenjamin Herrenschmidt #include <linux/debugfs.h>
206d03b885SBenjamin Herrenschmidt #include <linux/seq_file.h>
2195f72d1eSYinghai Lu #include <linux/memblock.h>
2295f72d1eSYinghai Lu 
2379442ed1STang Chen #include <asm-generic/sections.h>
2479442ed1STang Chen 
25fe091c20STejun Heo static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
26fe091c20STejun Heo static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
27fe091c20STejun Heo 
28fe091c20STejun Heo struct memblock memblock __initdata_memblock = {
29fe091c20STejun Heo 	.memory.regions		= memblock_memory_init_regions,
30fe091c20STejun Heo 	.memory.cnt		= 1,	/* empty dummy entry */
31fe091c20STejun Heo 	.memory.max		= INIT_MEMBLOCK_REGIONS,
32fe091c20STejun Heo 
33fe091c20STejun Heo 	.reserved.regions	= memblock_reserved_init_regions,
34fe091c20STejun Heo 	.reserved.cnt		= 1,	/* empty dummy entry */
35fe091c20STejun Heo 	.reserved.max		= INIT_MEMBLOCK_REGIONS,
36fe091c20STejun Heo 
3779442ed1STang Chen 	.bottom_up		= false,
38fe091c20STejun Heo 	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
39fe091c20STejun Heo };
4095f72d1eSYinghai Lu 
4110d06439SYinghai Lu int memblock_debug __initdata_memblock;
4255ac590cSTang Chen #ifdef CONFIG_MOVABLE_NODE
4355ac590cSTang Chen bool movable_node_enabled __initdata_memblock = false;
4455ac590cSTang Chen #endif
451aadc056STejun Heo static int memblock_can_resize __initdata_memblock;
46181eb394SGavin Shan static int memblock_memory_in_slab __initdata_memblock = 0;
47181eb394SGavin Shan static int memblock_reserved_in_slab __initdata_memblock = 0;
4895f72d1eSYinghai Lu 
49142b45a7SBenjamin Herrenschmidt /* inline so we don't get a warning when pr_debug is compiled out */
50c2233116SRaghavendra D Prabhu static __init_memblock const char *
51c2233116SRaghavendra D Prabhu memblock_type_name(struct memblock_type *type)
52142b45a7SBenjamin Herrenschmidt {
53142b45a7SBenjamin Herrenschmidt 	if (type == &memblock.memory)
54142b45a7SBenjamin Herrenschmidt 		return "memory";
55142b45a7SBenjamin Herrenschmidt 	else if (type == &memblock.reserved)
56142b45a7SBenjamin Herrenschmidt 		return "reserved";
57142b45a7SBenjamin Herrenschmidt 	else
58142b45a7SBenjamin Herrenschmidt 		return "unknown";
59142b45a7SBenjamin Herrenschmidt }
60142b45a7SBenjamin Herrenschmidt 
61eb18f1b5STejun Heo /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
62eb18f1b5STejun Heo static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
63eb18f1b5STejun Heo {
64eb18f1b5STejun Heo 	return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
65eb18f1b5STejun Heo }
66eb18f1b5STejun Heo 
676ed311b2SBenjamin Herrenschmidt /*
686ed311b2SBenjamin Herrenschmidt  * Address comparison utilities
696ed311b2SBenjamin Herrenschmidt  */
7010d06439SYinghai Lu static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
712898cc4cSBenjamin Herrenschmidt 				       phys_addr_t base2, phys_addr_t size2)
7295f72d1eSYinghai Lu {
7395f72d1eSYinghai Lu 	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
7495f72d1eSYinghai Lu }
7595f72d1eSYinghai Lu 
762d7d3eb2SH Hartley Sweeten static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
772d7d3eb2SH Hartley Sweeten 					phys_addr_t base, phys_addr_t size)
786ed311b2SBenjamin Herrenschmidt {
796ed311b2SBenjamin Herrenschmidt 	unsigned long i;
806ed311b2SBenjamin Herrenschmidt 
816ed311b2SBenjamin Herrenschmidt 	for (i = 0; i < type->cnt; i++) {
826ed311b2SBenjamin Herrenschmidt 		phys_addr_t rgnbase = type->regions[i].base;
836ed311b2SBenjamin Herrenschmidt 		phys_addr_t rgnsize = type->regions[i].size;
846ed311b2SBenjamin Herrenschmidt 		if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
856ed311b2SBenjamin Herrenschmidt 			break;
866ed311b2SBenjamin Herrenschmidt 	}
876ed311b2SBenjamin Herrenschmidt 
886ed311b2SBenjamin Herrenschmidt 	return (i < type->cnt) ? i : -1;
896ed311b2SBenjamin Herrenschmidt }
906ed311b2SBenjamin Herrenschmidt 
9179442ed1STang Chen /*
9279442ed1STang Chen  * __memblock_find_range_bottom_up - find free area utility in bottom-up
9379442ed1STang Chen  * @start: start of candidate range
9479442ed1STang Chen  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
9579442ed1STang Chen  * @size: size of free area to find
9679442ed1STang Chen  * @align: alignment of free area to find
9779442ed1STang Chen  * @nid: nid of the free area to find, %MAX_NUMNODES for any node
9879442ed1STang Chen  *
9979442ed1STang Chen  * Utility called from memblock_find_in_range_node(), find free area bottom-up.
10079442ed1STang Chen  *
10179442ed1STang Chen  * RETURNS:
10279442ed1STang Chen  * Found address on success, 0 on failure.
10379442ed1STang Chen  */
10479442ed1STang Chen static phys_addr_t __init_memblock
10579442ed1STang Chen __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
10679442ed1STang Chen 				phys_addr_t size, phys_addr_t align, int nid)
10779442ed1STang Chen {
10879442ed1STang Chen 	phys_addr_t this_start, this_end, cand;
10979442ed1STang Chen 	u64 i;
11079442ed1STang Chen 
11179442ed1STang Chen 	for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) {
11279442ed1STang Chen 		this_start = clamp(this_start, start, end);
11379442ed1STang Chen 		this_end = clamp(this_end, start, end);
11479442ed1STang Chen 
11579442ed1STang Chen 		cand = round_up(this_start, align);
11679442ed1STang Chen 		if (cand < this_end && this_end - cand >= size)
11779442ed1STang Chen 			return cand;
11879442ed1STang Chen 	}
11979442ed1STang Chen 
12079442ed1STang Chen 	return 0;
12179442ed1STang Chen }
12279442ed1STang Chen 
1237bd0b0f0STejun Heo /**
1241402899eSTang Chen  * __memblock_find_range_top_down - find free area utility, in top-down
1251402899eSTang Chen  * @start: start of candidate range
1261402899eSTang Chen  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
1271402899eSTang Chen  * @size: size of free area to find
1281402899eSTang Chen  * @align: alignment of free area to find
1291402899eSTang Chen  * @nid: nid of the free area to find, %MAX_NUMNODES for any node
1301402899eSTang Chen  *
1311402899eSTang Chen  * Utility called from memblock_find_in_range_node(), find free area top-down.
1321402899eSTang Chen  *
1331402899eSTang Chen  * RETURNS:
13479442ed1STang Chen  * Found address on success, 0 on failure.
1351402899eSTang Chen  */
1361402899eSTang Chen static phys_addr_t __init_memblock
1371402899eSTang Chen __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
1381402899eSTang Chen 			       phys_addr_t size, phys_addr_t align, int nid)
1391402899eSTang Chen {
1401402899eSTang Chen 	phys_addr_t this_start, this_end, cand;
1411402899eSTang Chen 	u64 i;
1421402899eSTang Chen 
1431402899eSTang Chen 	for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
1441402899eSTang Chen 		this_start = clamp(this_start, start, end);
1451402899eSTang Chen 		this_end = clamp(this_end, start, end);
1461402899eSTang Chen 
1471402899eSTang Chen 		if (this_end < size)
1481402899eSTang Chen 			continue;
1491402899eSTang Chen 
1501402899eSTang Chen 		cand = round_down(this_end - size, align);
1511402899eSTang Chen 		if (cand >= this_start)
1521402899eSTang Chen 			return cand;
1531402899eSTang Chen 	}
1541402899eSTang Chen 
1551402899eSTang Chen 	return 0;
1561402899eSTang Chen }
1571402899eSTang Chen 
1581402899eSTang Chen /**
1597bd0b0f0STejun Heo  * memblock_find_in_range_node - find free area in given range and node
1607bd0b0f0STejun Heo  * @size: size of free area to find
1617bd0b0f0STejun Heo  * @align: alignment of free area to find
162*87029ee9SGrygorii Strashko  * @start: start of candidate range
163*87029ee9SGrygorii Strashko  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
1647bd0b0f0STejun Heo  * @nid: nid of the free area to find, %MAX_NUMNODES for any node
1657bd0b0f0STejun Heo  *
1667bd0b0f0STejun Heo  * Find @size free area aligned to @align in the specified range and node.
1677bd0b0f0STejun Heo  *
16879442ed1STang Chen  * When allocation direction is bottom-up, the @start should be greater
16979442ed1STang Chen  * than the end of the kernel image. Otherwise, it will be trimmed. The
17079442ed1STang Chen  * reason is that we want the bottom-up allocation just near the kernel
17179442ed1STang Chen  * image so it is highly likely that the allocated memory and the kernel
17279442ed1STang Chen  * will reside in the same node.
17379442ed1STang Chen  *
17479442ed1STang Chen  * If bottom-up allocation failed, will try to allocate memory top-down.
17579442ed1STang Chen  *
1767bd0b0f0STejun Heo  * RETURNS:
17779442ed1STang Chen  * Found address on success, 0 on failure.
1786ed311b2SBenjamin Herrenschmidt  */
179*87029ee9SGrygorii Strashko phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
180*87029ee9SGrygorii Strashko 					phys_addr_t align, phys_addr_t start,
181*87029ee9SGrygorii Strashko 					phys_addr_t end, int nid)
182f7210e6cSTang Chen {
18379442ed1STang Chen 	int ret;
18479442ed1STang Chen 	phys_addr_t kernel_end;
18579442ed1STang Chen 
186f7210e6cSTang Chen 	/* pump up @end */
187f7210e6cSTang Chen 	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
188f7210e6cSTang Chen 		end = memblock.current_limit;
189f7210e6cSTang Chen 
190f7210e6cSTang Chen 	/* avoid allocating the first page */
191f7210e6cSTang Chen 	start = max_t(phys_addr_t, start, PAGE_SIZE);
192f7210e6cSTang Chen 	end = max(start, end);
19379442ed1STang Chen 	kernel_end = __pa_symbol(_end);
19479442ed1STang Chen 
19579442ed1STang Chen 	/*
19679442ed1STang Chen 	 * try bottom-up allocation only when bottom-up mode
19779442ed1STang Chen 	 * is set and @end is above the kernel image.
19879442ed1STang Chen 	 */
19979442ed1STang Chen 	if (memblock_bottom_up() && end > kernel_end) {
20079442ed1STang Chen 		phys_addr_t bottom_up_start;
20179442ed1STang Chen 
20279442ed1STang Chen 		/* make sure we will allocate above the kernel */
20379442ed1STang Chen 		bottom_up_start = max(start, kernel_end);
20479442ed1STang Chen 
20579442ed1STang Chen 		/* ok, try bottom-up allocation first */
20679442ed1STang Chen 		ret = __memblock_find_range_bottom_up(bottom_up_start, end,
20779442ed1STang Chen 						      size, align, nid);
20879442ed1STang Chen 		if (ret)
20979442ed1STang Chen 			return ret;
21079442ed1STang Chen 
21179442ed1STang Chen 		/*
21279442ed1STang Chen 		 * we always limit bottom-up allocation above the kernel,
21379442ed1STang Chen 		 * but top-down allocation doesn't have the limit, so
21479442ed1STang Chen 		 * retrying top-down allocation may succeed when bottom-up
21579442ed1STang Chen 		 * allocation failed.
21679442ed1STang Chen 		 *
21779442ed1STang Chen 		 * bottom-up allocation is expected to be fail very rarely,
21879442ed1STang Chen 		 * so we use WARN_ONCE() here to see the stack trace if
21979442ed1STang Chen 		 * fail happens.
22079442ed1STang Chen 		 */
22179442ed1STang Chen 		WARN_ONCE(1, "memblock: bottom-up allocation failed, "
22279442ed1STang Chen 			     "memory hotunplug may be affected\n");
22379442ed1STang Chen 	}
224f7210e6cSTang Chen 
2251402899eSTang Chen 	return __memblock_find_range_top_down(start, end, size, align, nid);
226f7210e6cSTang Chen }
2276ed311b2SBenjamin Herrenschmidt 
2287bd0b0f0STejun Heo /**
2297bd0b0f0STejun Heo  * memblock_find_in_range - find free area in given range
2307bd0b0f0STejun Heo  * @start: start of candidate range
2317bd0b0f0STejun Heo  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
2327bd0b0f0STejun Heo  * @size: size of free area to find
2337bd0b0f0STejun Heo  * @align: alignment of free area to find
2347bd0b0f0STejun Heo  *
2357bd0b0f0STejun Heo  * Find @size free area aligned to @align in the specified range.
2367bd0b0f0STejun Heo  *
2377bd0b0f0STejun Heo  * RETURNS:
23879442ed1STang Chen  * Found address on success, 0 on failure.
2397bd0b0f0STejun Heo  */
2407bd0b0f0STejun Heo phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
2417bd0b0f0STejun Heo 					phys_addr_t end, phys_addr_t size,
2427bd0b0f0STejun Heo 					phys_addr_t align)
2437bd0b0f0STejun Heo {
244*87029ee9SGrygorii Strashko 	return memblock_find_in_range_node(size, align, start, end,
2457bd0b0f0STejun Heo 					    MAX_NUMNODES);
2467bd0b0f0STejun Heo }
2477bd0b0f0STejun Heo 
24810d06439SYinghai Lu static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
24995f72d1eSYinghai Lu {
2501440c4e2STejun Heo 	type->total_size -= type->regions[r].size;
2517c0caeb8STejun Heo 	memmove(&type->regions[r], &type->regions[r + 1],
2527c0caeb8STejun Heo 		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
253e3239ff9SBenjamin Herrenschmidt 	type->cnt--;
25495f72d1eSYinghai Lu 
2558f7a6605SBenjamin Herrenschmidt 	/* Special case for empty arrays */
2568f7a6605SBenjamin Herrenschmidt 	if (type->cnt == 0) {
2571440c4e2STejun Heo 		WARN_ON(type->total_size != 0);
2588f7a6605SBenjamin Herrenschmidt 		type->cnt = 1;
2598f7a6605SBenjamin Herrenschmidt 		type->regions[0].base = 0;
2608f7a6605SBenjamin Herrenschmidt 		type->regions[0].size = 0;
26166a20757STang Chen 		type->regions[0].flags = 0;
2627c0caeb8STejun Heo 		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
2638f7a6605SBenjamin Herrenschmidt 	}
26495f72d1eSYinghai Lu }
26595f72d1eSYinghai Lu 
26629f67386SYinghai Lu phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
26729f67386SYinghai Lu 					phys_addr_t *addr)
26829f67386SYinghai Lu {
26929f67386SYinghai Lu 	if (memblock.reserved.regions == memblock_reserved_init_regions)
27029f67386SYinghai Lu 		return 0;
27129f67386SYinghai Lu 
272fd615c4eSGrygorii Strashko 	/*
273fd615c4eSGrygorii Strashko 	 * Don't allow nobootmem allocator to free reserved memory regions
274fd615c4eSGrygorii Strashko 	 * array if
275fd615c4eSGrygorii Strashko 	 *  - CONFIG_DEBUG_FS is enabled;
276fd615c4eSGrygorii Strashko 	 *  - CONFIG_ARCH_DISCARD_MEMBLOCK is not enabled;
277fd615c4eSGrygorii Strashko 	 *  - reserved memory regions array have been resized during boot.
278fd615c4eSGrygorii Strashko 	 * Otherwise debug_fs entry "sys/kernel/debug/memblock/reserved"
279fd615c4eSGrygorii Strashko 	 * will show garbage instead of state of memory reservations.
280fd615c4eSGrygorii Strashko 	 */
281fd615c4eSGrygorii Strashko 	if (IS_ENABLED(CONFIG_DEBUG_FS) &&
282fd615c4eSGrygorii Strashko 	    !IS_ENABLED(CONFIG_ARCH_DISCARD_MEMBLOCK))
283fd615c4eSGrygorii Strashko 		return 0;
284fd615c4eSGrygorii Strashko 
28529f67386SYinghai Lu 	*addr = __pa(memblock.reserved.regions);
28629f67386SYinghai Lu 
28729f67386SYinghai Lu 	return PAGE_ALIGN(sizeof(struct memblock_region) *
28829f67386SYinghai Lu 			  memblock.reserved.max);
28929f67386SYinghai Lu }
29029f67386SYinghai Lu 
29148c3b583SGreg Pearson /**
29248c3b583SGreg Pearson  * memblock_double_array - double the size of the memblock regions array
29348c3b583SGreg Pearson  * @type: memblock type of the regions array being doubled
29448c3b583SGreg Pearson  * @new_area_start: starting address of memory range to avoid overlap with
29548c3b583SGreg Pearson  * @new_area_size: size of memory range to avoid overlap with
29648c3b583SGreg Pearson  *
29748c3b583SGreg Pearson  * Double the size of the @type regions array. If memblock is being used to
29848c3b583SGreg Pearson  * allocate memory for a new reserved regions array and there is a previously
29948c3b583SGreg Pearson  * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
30048c3b583SGreg Pearson  * waiting to be reserved, ensure the memory used by the new array does
30148c3b583SGreg Pearson  * not overlap.
30248c3b583SGreg Pearson  *
30348c3b583SGreg Pearson  * RETURNS:
30448c3b583SGreg Pearson  * 0 on success, -1 on failure.
30548c3b583SGreg Pearson  */
30648c3b583SGreg Pearson static int __init_memblock memblock_double_array(struct memblock_type *type,
30748c3b583SGreg Pearson 						phys_addr_t new_area_start,
30848c3b583SGreg Pearson 						phys_addr_t new_area_size)
309142b45a7SBenjamin Herrenschmidt {
310142b45a7SBenjamin Herrenschmidt 	struct memblock_region *new_array, *old_array;
31129f67386SYinghai Lu 	phys_addr_t old_alloc_size, new_alloc_size;
312142b45a7SBenjamin Herrenschmidt 	phys_addr_t old_size, new_size, addr;
313142b45a7SBenjamin Herrenschmidt 	int use_slab = slab_is_available();
314181eb394SGavin Shan 	int *in_slab;
315142b45a7SBenjamin Herrenschmidt 
316142b45a7SBenjamin Herrenschmidt 	/* We don't allow resizing until we know about the reserved regions
317142b45a7SBenjamin Herrenschmidt 	 * of memory that aren't suitable for allocation
318142b45a7SBenjamin Herrenschmidt 	 */
319142b45a7SBenjamin Herrenschmidt 	if (!memblock_can_resize)
320142b45a7SBenjamin Herrenschmidt 		return -1;
321142b45a7SBenjamin Herrenschmidt 
322142b45a7SBenjamin Herrenschmidt 	/* Calculate new doubled size */
323142b45a7SBenjamin Herrenschmidt 	old_size = type->max * sizeof(struct memblock_region);
324142b45a7SBenjamin Herrenschmidt 	new_size = old_size << 1;
32529f67386SYinghai Lu 	/*
32629f67386SYinghai Lu 	 * We need to allocated new one align to PAGE_SIZE,
32729f67386SYinghai Lu 	 *   so we can free them completely later.
32829f67386SYinghai Lu 	 */
32929f67386SYinghai Lu 	old_alloc_size = PAGE_ALIGN(old_size);
33029f67386SYinghai Lu 	new_alloc_size = PAGE_ALIGN(new_size);
331142b45a7SBenjamin Herrenschmidt 
332181eb394SGavin Shan 	/* Retrieve the slab flag */
333181eb394SGavin Shan 	if (type == &memblock.memory)
334181eb394SGavin Shan 		in_slab = &memblock_memory_in_slab;
335181eb394SGavin Shan 	else
336181eb394SGavin Shan 		in_slab = &memblock_reserved_in_slab;
337181eb394SGavin Shan 
338142b45a7SBenjamin Herrenschmidt 	/* Try to find some space for it.
339142b45a7SBenjamin Herrenschmidt 	 *
340142b45a7SBenjamin Herrenschmidt 	 * WARNING: We assume that either slab_is_available() and we use it or
341fd07383bSAndrew Morton 	 * we use MEMBLOCK for allocations. That means that this is unsafe to
342fd07383bSAndrew Morton 	 * use when bootmem is currently active (unless bootmem itself is
343fd07383bSAndrew Morton 	 * implemented on top of MEMBLOCK which isn't the case yet)
344142b45a7SBenjamin Herrenschmidt 	 *
345142b45a7SBenjamin Herrenschmidt 	 * This should however not be an issue for now, as we currently only
346fd07383bSAndrew Morton 	 * call into MEMBLOCK while it's still active, or much later when slab
347fd07383bSAndrew Morton 	 * is active for memory hotplug operations
348142b45a7SBenjamin Herrenschmidt 	 */
349142b45a7SBenjamin Herrenschmidt 	if (use_slab) {
350142b45a7SBenjamin Herrenschmidt 		new_array = kmalloc(new_size, GFP_KERNEL);
3511f5026a7STejun Heo 		addr = new_array ? __pa(new_array) : 0;
3524e2f0775SGavin Shan 	} else {
35348c3b583SGreg Pearson 		/* only exclude range when trying to double reserved.regions */
35448c3b583SGreg Pearson 		if (type != &memblock.reserved)
35548c3b583SGreg Pearson 			new_area_start = new_area_size = 0;
35648c3b583SGreg Pearson 
35748c3b583SGreg Pearson 		addr = memblock_find_in_range(new_area_start + new_area_size,
35848c3b583SGreg Pearson 						memblock.current_limit,
35929f67386SYinghai Lu 						new_alloc_size, PAGE_SIZE);
36048c3b583SGreg Pearson 		if (!addr && new_area_size)
36148c3b583SGreg Pearson 			addr = memblock_find_in_range(0,
36248c3b583SGreg Pearson 				min(new_area_start, memblock.current_limit),
36329f67386SYinghai Lu 				new_alloc_size, PAGE_SIZE);
36448c3b583SGreg Pearson 
36515674868SSachin Kamat 		new_array = addr ? __va(addr) : NULL;
3664e2f0775SGavin Shan 	}
3671f5026a7STejun Heo 	if (!addr) {
368142b45a7SBenjamin Herrenschmidt 		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
369142b45a7SBenjamin Herrenschmidt 		       memblock_type_name(type), type->max, type->max * 2);
370142b45a7SBenjamin Herrenschmidt 		return -1;
371142b45a7SBenjamin Herrenschmidt 	}
372142b45a7SBenjamin Herrenschmidt 
373fd07383bSAndrew Morton 	memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
374fd07383bSAndrew Morton 			memblock_type_name(type), type->max * 2, (u64)addr,
375fd07383bSAndrew Morton 			(u64)addr + new_size - 1);
376ea9e4376SYinghai Lu 
377fd07383bSAndrew Morton 	/*
378fd07383bSAndrew Morton 	 * Found space, we now need to move the array over before we add the
379fd07383bSAndrew Morton 	 * reserved region since it may be our reserved array itself that is
380fd07383bSAndrew Morton 	 * full.
381142b45a7SBenjamin Herrenschmidt 	 */
382142b45a7SBenjamin Herrenschmidt 	memcpy(new_array, type->regions, old_size);
383142b45a7SBenjamin Herrenschmidt 	memset(new_array + type->max, 0, old_size);
384142b45a7SBenjamin Herrenschmidt 	old_array = type->regions;
385142b45a7SBenjamin Herrenschmidt 	type->regions = new_array;
386142b45a7SBenjamin Herrenschmidt 	type->max <<= 1;
387142b45a7SBenjamin Herrenschmidt 
388fd07383bSAndrew Morton 	/* Free old array. We needn't free it if the array is the static one */
389181eb394SGavin Shan 	if (*in_slab)
390181eb394SGavin Shan 		kfree(old_array);
391181eb394SGavin Shan 	else if (old_array != memblock_memory_init_regions &&
392142b45a7SBenjamin Herrenschmidt 		 old_array != memblock_reserved_init_regions)
39329f67386SYinghai Lu 		memblock_free(__pa(old_array), old_alloc_size);
394142b45a7SBenjamin Herrenschmidt 
395fd07383bSAndrew Morton 	/*
396fd07383bSAndrew Morton 	 * Reserve the new array if that comes from the memblock.  Otherwise, we
397fd07383bSAndrew Morton 	 * needn't do it
398181eb394SGavin Shan 	 */
399181eb394SGavin Shan 	if (!use_slab)
40029f67386SYinghai Lu 		BUG_ON(memblock_reserve(addr, new_alloc_size));
401181eb394SGavin Shan 
402181eb394SGavin Shan 	/* Update slab flag */
403181eb394SGavin Shan 	*in_slab = use_slab;
404181eb394SGavin Shan 
405142b45a7SBenjamin Herrenschmidt 	return 0;
406142b45a7SBenjamin Herrenschmidt }
407142b45a7SBenjamin Herrenschmidt 
408784656f9STejun Heo /**
409784656f9STejun Heo  * memblock_merge_regions - merge neighboring compatible regions
410784656f9STejun Heo  * @type: memblock type to scan
411784656f9STejun Heo  *
412784656f9STejun Heo  * Scan @type and merge neighboring compatible regions.
413784656f9STejun Heo  */
414784656f9STejun Heo static void __init_memblock memblock_merge_regions(struct memblock_type *type)
415784656f9STejun Heo {
416784656f9STejun Heo 	int i = 0;
417784656f9STejun Heo 
418784656f9STejun Heo 	/* cnt never goes below 1 */
419784656f9STejun Heo 	while (i < type->cnt - 1) {
420784656f9STejun Heo 		struct memblock_region *this = &type->regions[i];
421784656f9STejun Heo 		struct memblock_region *next = &type->regions[i + 1];
422784656f9STejun Heo 
4237c0caeb8STejun Heo 		if (this->base + this->size != next->base ||
4247c0caeb8STejun Heo 		    memblock_get_region_node(this) !=
42566a20757STang Chen 		    memblock_get_region_node(next) ||
42666a20757STang Chen 		    this->flags != next->flags) {
427784656f9STejun Heo 			BUG_ON(this->base + this->size > next->base);
428784656f9STejun Heo 			i++;
429784656f9STejun Heo 			continue;
430784656f9STejun Heo 		}
431784656f9STejun Heo 
432784656f9STejun Heo 		this->size += next->size;
433c0232ae8SLin Feng 		/* move forward from next + 1, index of which is i + 2 */
434c0232ae8SLin Feng 		memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
435784656f9STejun Heo 		type->cnt--;
436784656f9STejun Heo 	}
437784656f9STejun Heo }
438784656f9STejun Heo 
439784656f9STejun Heo /**
440784656f9STejun Heo  * memblock_insert_region - insert new memblock region
441784656f9STejun Heo  * @type:	memblock type to insert into
442784656f9STejun Heo  * @idx:	index for the insertion point
443784656f9STejun Heo  * @base:	base address of the new region
444784656f9STejun Heo  * @size:	size of the new region
445209ff86dSTang Chen  * @nid:	node id of the new region
44666a20757STang Chen  * @flags:	flags of the new region
447784656f9STejun Heo  *
448784656f9STejun Heo  * Insert new memblock region [@base,@base+@size) into @type at @idx.
449784656f9STejun Heo  * @type must already have extra room to accomodate the new region.
450784656f9STejun Heo  */
451784656f9STejun Heo static void __init_memblock memblock_insert_region(struct memblock_type *type,
452784656f9STejun Heo 						   int idx, phys_addr_t base,
45366a20757STang Chen 						   phys_addr_t size,
45466a20757STang Chen 						   int nid, unsigned long flags)
455784656f9STejun Heo {
456784656f9STejun Heo 	struct memblock_region *rgn = &type->regions[idx];
457784656f9STejun Heo 
458784656f9STejun Heo 	BUG_ON(type->cnt >= type->max);
459784656f9STejun Heo 	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
460784656f9STejun Heo 	rgn->base = base;
461784656f9STejun Heo 	rgn->size = size;
46266a20757STang Chen 	rgn->flags = flags;
4637c0caeb8STejun Heo 	memblock_set_region_node(rgn, nid);
464784656f9STejun Heo 	type->cnt++;
4651440c4e2STejun Heo 	type->total_size += size;
466784656f9STejun Heo }
467784656f9STejun Heo 
468784656f9STejun Heo /**
469784656f9STejun Heo  * memblock_add_region - add new memblock region
470784656f9STejun Heo  * @type: memblock type to add new region into
471784656f9STejun Heo  * @base: base address of the new region
472784656f9STejun Heo  * @size: size of the new region
4737fb0bc3fSTejun Heo  * @nid: nid of the new region
47466a20757STang Chen  * @flags: flags of the new region
475784656f9STejun Heo  *
476784656f9STejun Heo  * Add new memblock region [@base,@base+@size) into @type.  The new region
477784656f9STejun Heo  * is allowed to overlap with existing ones - overlaps don't affect already
478784656f9STejun Heo  * existing regions.  @type is guaranteed to be minimal (all neighbouring
479784656f9STejun Heo  * compatible regions are merged) after the addition.
480784656f9STejun Heo  *
481784656f9STejun Heo  * RETURNS:
482784656f9STejun Heo  * 0 on success, -errno on failure.
483784656f9STejun Heo  */
484581adcbeSTejun Heo static int __init_memblock memblock_add_region(struct memblock_type *type,
48566a20757STang Chen 				phys_addr_t base, phys_addr_t size,
48666a20757STang Chen 				int nid, unsigned long flags)
48795f72d1eSYinghai Lu {
488784656f9STejun Heo 	bool insert = false;
489eb18f1b5STejun Heo 	phys_addr_t obase = base;
490eb18f1b5STejun Heo 	phys_addr_t end = base + memblock_cap_size(base, &size);
491784656f9STejun Heo 	int i, nr_new;
49295f72d1eSYinghai Lu 
493b3dc627cSTejun Heo 	if (!size)
494b3dc627cSTejun Heo 		return 0;
495b3dc627cSTejun Heo 
496784656f9STejun Heo 	/* special case for empty array */
497784656f9STejun Heo 	if (type->regions[0].size == 0) {
4981440c4e2STejun Heo 		WARN_ON(type->cnt != 1 || type->total_size);
499784656f9STejun Heo 		type->regions[0].base = base;
500784656f9STejun Heo 		type->regions[0].size = size;
50166a20757STang Chen 		type->regions[0].flags = flags;
5027fb0bc3fSTejun Heo 		memblock_set_region_node(&type->regions[0], nid);
5031440c4e2STejun Heo 		type->total_size = size;
504784656f9STejun Heo 		return 0;
505784656f9STejun Heo 	}
506784656f9STejun Heo repeat:
507784656f9STejun Heo 	/*
508784656f9STejun Heo 	 * The following is executed twice.  Once with %false @insert and
509784656f9STejun Heo 	 * then with %true.  The first counts the number of regions needed
510784656f9STejun Heo 	 * to accomodate the new area.  The second actually inserts them.
511784656f9STejun Heo 	 */
512784656f9STejun Heo 	base = obase;
513784656f9STejun Heo 	nr_new = 0;
514784656f9STejun Heo 
5158f7a6605SBenjamin Herrenschmidt 	for (i = 0; i < type->cnt; i++) {
5168f7a6605SBenjamin Herrenschmidt 		struct memblock_region *rgn = &type->regions[i];
517784656f9STejun Heo 		phys_addr_t rbase = rgn->base;
518784656f9STejun Heo 		phys_addr_t rend = rbase + rgn->size;
5198f7a6605SBenjamin Herrenschmidt 
520784656f9STejun Heo 		if (rbase >= end)
5218f7a6605SBenjamin Herrenschmidt 			break;
522784656f9STejun Heo 		if (rend <= base)
523784656f9STejun Heo 			continue;
524784656f9STejun Heo 		/*
525784656f9STejun Heo 		 * @rgn overlaps.  If it separates the lower part of new
526784656f9STejun Heo 		 * area, insert that portion.
5278f7a6605SBenjamin Herrenschmidt 		 */
528784656f9STejun Heo 		if (rbase > base) {
529784656f9STejun Heo 			nr_new++;
530784656f9STejun Heo 			if (insert)
531784656f9STejun Heo 				memblock_insert_region(type, i++, base,
53266a20757STang Chen 						       rbase - base, nid,
53366a20757STang Chen 						       flags);
534784656f9STejun Heo 		}
535784656f9STejun Heo 		/* area below @rend is dealt with, forget about it */
536784656f9STejun Heo 		base = min(rend, end);
5378f7a6605SBenjamin Herrenschmidt 	}
5388f7a6605SBenjamin Herrenschmidt 
539784656f9STejun Heo 	/* insert the remaining portion */
540784656f9STejun Heo 	if (base < end) {
541784656f9STejun Heo 		nr_new++;
542784656f9STejun Heo 		if (insert)
54366a20757STang Chen 			memblock_insert_region(type, i, base, end - base,
54466a20757STang Chen 					       nid, flags);
5458f7a6605SBenjamin Herrenschmidt 	}
5468f7a6605SBenjamin Herrenschmidt 
547784656f9STejun Heo 	/*
548784656f9STejun Heo 	 * If this was the first round, resize array and repeat for actual
549784656f9STejun Heo 	 * insertions; otherwise, merge and return.
5508f7a6605SBenjamin Herrenschmidt 	 */
551784656f9STejun Heo 	if (!insert) {
552784656f9STejun Heo 		while (type->cnt + nr_new > type->max)
55348c3b583SGreg Pearson 			if (memblock_double_array(type, obase, size) < 0)
554784656f9STejun Heo 				return -ENOMEM;
555784656f9STejun Heo 		insert = true;
556784656f9STejun Heo 		goto repeat;
55795f72d1eSYinghai Lu 	} else {
558784656f9STejun Heo 		memblock_merge_regions(type);
55995f72d1eSYinghai Lu 		return 0;
56095f72d1eSYinghai Lu 	}
561784656f9STejun Heo }
56295f72d1eSYinghai Lu 
5637fb0bc3fSTejun Heo int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
5647fb0bc3fSTejun Heo 				       int nid)
5657fb0bc3fSTejun Heo {
56666a20757STang Chen 	return memblock_add_region(&memblock.memory, base, size, nid, 0);
5677fb0bc3fSTejun Heo }
5687fb0bc3fSTejun Heo 
569581adcbeSTejun Heo int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
57095f72d1eSYinghai Lu {
57166a20757STang Chen 	return memblock_add_region(&memblock.memory, base, size,
57266a20757STang Chen 				   MAX_NUMNODES, 0);
57395f72d1eSYinghai Lu }
57495f72d1eSYinghai Lu 
5756a9ceb31STejun Heo /**
5766a9ceb31STejun Heo  * memblock_isolate_range - isolate given range into disjoint memblocks
5776a9ceb31STejun Heo  * @type: memblock type to isolate range for
5786a9ceb31STejun Heo  * @base: base of range to isolate
5796a9ceb31STejun Heo  * @size: size of range to isolate
5806a9ceb31STejun Heo  * @start_rgn: out parameter for the start of isolated region
5816a9ceb31STejun Heo  * @end_rgn: out parameter for the end of isolated region
5826a9ceb31STejun Heo  *
5836a9ceb31STejun Heo  * Walk @type and ensure that regions don't cross the boundaries defined by
5846a9ceb31STejun Heo  * [@base,@base+@size).  Crossing regions are split at the boundaries,
5856a9ceb31STejun Heo  * which may create at most two more regions.  The index of the first
5866a9ceb31STejun Heo  * region inside the range is returned in *@start_rgn and end in *@end_rgn.
5876a9ceb31STejun Heo  *
5886a9ceb31STejun Heo  * RETURNS:
5896a9ceb31STejun Heo  * 0 on success, -errno on failure.
5906a9ceb31STejun Heo  */
5916a9ceb31STejun Heo static int __init_memblock memblock_isolate_range(struct memblock_type *type,
5926a9ceb31STejun Heo 					phys_addr_t base, phys_addr_t size,
5936a9ceb31STejun Heo 					int *start_rgn, int *end_rgn)
5946a9ceb31STejun Heo {
595eb18f1b5STejun Heo 	phys_addr_t end = base + memblock_cap_size(base, &size);
5966a9ceb31STejun Heo 	int i;
5976a9ceb31STejun Heo 
5986a9ceb31STejun Heo 	*start_rgn = *end_rgn = 0;
5996a9ceb31STejun Heo 
600b3dc627cSTejun Heo 	if (!size)
601b3dc627cSTejun Heo 		return 0;
602b3dc627cSTejun Heo 
6036a9ceb31STejun Heo 	/* we'll create at most two more regions */
6046a9ceb31STejun Heo 	while (type->cnt + 2 > type->max)
60548c3b583SGreg Pearson 		if (memblock_double_array(type, base, size) < 0)
6066a9ceb31STejun Heo 			return -ENOMEM;
6076a9ceb31STejun Heo 
6086a9ceb31STejun Heo 	for (i = 0; i < type->cnt; i++) {
6096a9ceb31STejun Heo 		struct memblock_region *rgn = &type->regions[i];
6106a9ceb31STejun Heo 		phys_addr_t rbase = rgn->base;
6116a9ceb31STejun Heo 		phys_addr_t rend = rbase + rgn->size;
6126a9ceb31STejun Heo 
6136a9ceb31STejun Heo 		if (rbase >= end)
6146a9ceb31STejun Heo 			break;
6156a9ceb31STejun Heo 		if (rend <= base)
6166a9ceb31STejun Heo 			continue;
6176a9ceb31STejun Heo 
6186a9ceb31STejun Heo 		if (rbase < base) {
6196a9ceb31STejun Heo 			/*
6206a9ceb31STejun Heo 			 * @rgn intersects from below.  Split and continue
6216a9ceb31STejun Heo 			 * to process the next region - the new top half.
6226a9ceb31STejun Heo 			 */
6236a9ceb31STejun Heo 			rgn->base = base;
6241440c4e2STejun Heo 			rgn->size -= base - rbase;
6251440c4e2STejun Heo 			type->total_size -= base - rbase;
6266a9ceb31STejun Heo 			memblock_insert_region(type, i, rbase, base - rbase,
62766a20757STang Chen 					       memblock_get_region_node(rgn),
62866a20757STang Chen 					       rgn->flags);
6296a9ceb31STejun Heo 		} else if (rend > end) {
6306a9ceb31STejun Heo 			/*
6316a9ceb31STejun Heo 			 * @rgn intersects from above.  Split and redo the
6326a9ceb31STejun Heo 			 * current region - the new bottom half.
6336a9ceb31STejun Heo 			 */
6346a9ceb31STejun Heo 			rgn->base = end;
6351440c4e2STejun Heo 			rgn->size -= end - rbase;
6361440c4e2STejun Heo 			type->total_size -= end - rbase;
6376a9ceb31STejun Heo 			memblock_insert_region(type, i--, rbase, end - rbase,
63866a20757STang Chen 					       memblock_get_region_node(rgn),
63966a20757STang Chen 					       rgn->flags);
6406a9ceb31STejun Heo 		} else {
6416a9ceb31STejun Heo 			/* @rgn is fully contained, record it */
6426a9ceb31STejun Heo 			if (!*end_rgn)
6436a9ceb31STejun Heo 				*start_rgn = i;
6446a9ceb31STejun Heo 			*end_rgn = i + 1;
6456a9ceb31STejun Heo 		}
6466a9ceb31STejun Heo 	}
6476a9ceb31STejun Heo 
6486a9ceb31STejun Heo 	return 0;
6496a9ceb31STejun Heo }
6506a9ceb31STejun Heo 
651581adcbeSTejun Heo static int __init_memblock __memblock_remove(struct memblock_type *type,
6528f7a6605SBenjamin Herrenschmidt 					     phys_addr_t base, phys_addr_t size)
65395f72d1eSYinghai Lu {
65471936180STejun Heo 	int start_rgn, end_rgn;
65571936180STejun Heo 	int i, ret;
65695f72d1eSYinghai Lu 
65771936180STejun Heo 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
65871936180STejun Heo 	if (ret)
65971936180STejun Heo 		return ret;
66095f72d1eSYinghai Lu 
66171936180STejun Heo 	for (i = end_rgn - 1; i >= start_rgn; i--)
66271936180STejun Heo 		memblock_remove_region(type, i);
66395f72d1eSYinghai Lu 	return 0;
66495f72d1eSYinghai Lu }
66595f72d1eSYinghai Lu 
666581adcbeSTejun Heo int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
66795f72d1eSYinghai Lu {
66895f72d1eSYinghai Lu 	return __memblock_remove(&memblock.memory, base, size);
66995f72d1eSYinghai Lu }
67095f72d1eSYinghai Lu 
671581adcbeSTejun Heo int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
67295f72d1eSYinghai Lu {
67324aa0788STejun Heo 	memblock_dbg("   memblock_free: [%#016llx-%#016llx] %pF\n",
674a150439cSH. Peter Anvin 		     (unsigned long long)base,
675931d13f5SGrygorii Strashko 		     (unsigned long long)base + size - 1,
676a150439cSH. Peter Anvin 		     (void *)_RET_IP_);
67724aa0788STejun Heo 
67895f72d1eSYinghai Lu 	return __memblock_remove(&memblock.reserved, base, size);
67995f72d1eSYinghai Lu }
68095f72d1eSYinghai Lu 
68166a20757STang Chen static int __init_memblock memblock_reserve_region(phys_addr_t base,
68266a20757STang Chen 						   phys_addr_t size,
68366a20757STang Chen 						   int nid,
68466a20757STang Chen 						   unsigned long flags)
68595f72d1eSYinghai Lu {
686e3239ff9SBenjamin Herrenschmidt 	struct memblock_type *_rgn = &memblock.reserved;
68795f72d1eSYinghai Lu 
68866a20757STang Chen 	memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
689a150439cSH. Peter Anvin 		     (unsigned long long)base,
690931d13f5SGrygorii Strashko 		     (unsigned long long)base + size - 1,
69166a20757STang Chen 		     flags, (void *)_RET_IP_);
69295f72d1eSYinghai Lu 
69366a20757STang Chen 	return memblock_add_region(_rgn, base, size, nid, flags);
69466a20757STang Chen }
69566a20757STang Chen 
69666a20757STang Chen int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
69766a20757STang Chen {
69866a20757STang Chen 	return memblock_reserve_region(base, size, MAX_NUMNODES, 0);
69995f72d1eSYinghai Lu }
70095f72d1eSYinghai Lu 
70135fd0808STejun Heo /**
70266b16edfSTang Chen  * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
70366b16edfSTang Chen  * @base: the base phys addr of the region
70466b16edfSTang Chen  * @size: the size of the region
70566b16edfSTang Chen  *
70666b16edfSTang Chen  * This function isolates region [@base, @base + @size), and mark it with flag
70766b16edfSTang Chen  * MEMBLOCK_HOTPLUG.
70866b16edfSTang Chen  *
70966b16edfSTang Chen  * Return 0 on succees, -errno on failure.
71066b16edfSTang Chen  */
71166b16edfSTang Chen int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
71266b16edfSTang Chen {
71366b16edfSTang Chen 	struct memblock_type *type = &memblock.memory;
71466b16edfSTang Chen 	int i, ret, start_rgn, end_rgn;
71566b16edfSTang Chen 
71666b16edfSTang Chen 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
71766b16edfSTang Chen 	if (ret)
71866b16edfSTang Chen 		return ret;
71966b16edfSTang Chen 
72066b16edfSTang Chen 	for (i = start_rgn; i < end_rgn; i++)
72166b16edfSTang Chen 		memblock_set_region_flags(&type->regions[i], MEMBLOCK_HOTPLUG);
72266b16edfSTang Chen 
72366b16edfSTang Chen 	memblock_merge_regions(type);
72466b16edfSTang Chen 	return 0;
72566b16edfSTang Chen }
72666b16edfSTang Chen 
72766b16edfSTang Chen /**
72866b16edfSTang Chen  * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
72966b16edfSTang Chen  * @base: the base phys addr of the region
73066b16edfSTang Chen  * @size: the size of the region
73166b16edfSTang Chen  *
73266b16edfSTang Chen  * This function isolates region [@base, @base + @size), and clear flag
73366b16edfSTang Chen  * MEMBLOCK_HOTPLUG for the isolated regions.
73466b16edfSTang Chen  *
73566b16edfSTang Chen  * Return 0 on succees, -errno on failure.
73666b16edfSTang Chen  */
73766b16edfSTang Chen int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
73866b16edfSTang Chen {
73966b16edfSTang Chen 	struct memblock_type *type = &memblock.memory;
74066b16edfSTang Chen 	int i, ret, start_rgn, end_rgn;
74166b16edfSTang Chen 
74266b16edfSTang Chen 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
74366b16edfSTang Chen 	if (ret)
74466b16edfSTang Chen 		return ret;
74566b16edfSTang Chen 
74666b16edfSTang Chen 	for (i = start_rgn; i < end_rgn; i++)
74766b16edfSTang Chen 		memblock_clear_region_flags(&type->regions[i],
74866b16edfSTang Chen 					    MEMBLOCK_HOTPLUG);
74966b16edfSTang Chen 
75066b16edfSTang Chen 	memblock_merge_regions(type);
75166b16edfSTang Chen 	return 0;
75266b16edfSTang Chen }
75366b16edfSTang Chen 
75466b16edfSTang Chen /**
75535fd0808STejun Heo  * __next_free_mem_range - next function for for_each_free_mem_range()
75635fd0808STejun Heo  * @idx: pointer to u64 loop variable
757d8bbdd77STang Chen  * @nid: node selector, %MAX_NUMNODES for all nodes
758dad7557eSWanpeng Li  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
759dad7557eSWanpeng Li  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
760dad7557eSWanpeng Li  * @out_nid: ptr to int for nid of the range, can be %NULL
76135fd0808STejun Heo  *
76235fd0808STejun Heo  * Find the first free area from *@idx which matches @nid, fill the out
76335fd0808STejun Heo  * parameters, and update *@idx for the next iteration.  The lower 32bit of
76435fd0808STejun Heo  * *@idx contains index into memory region and the upper 32bit indexes the
76535fd0808STejun Heo  * areas before each reserved region.  For example, if reserved regions
76635fd0808STejun Heo  * look like the following,
76735fd0808STejun Heo  *
76835fd0808STejun Heo  *	0:[0-16), 1:[32-48), 2:[128-130)
76935fd0808STejun Heo  *
77035fd0808STejun Heo  * The upper 32bit indexes the following regions.
77135fd0808STejun Heo  *
77235fd0808STejun Heo  *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
77335fd0808STejun Heo  *
77435fd0808STejun Heo  * As both region arrays are sorted, the function advances the two indices
77535fd0808STejun Heo  * in lockstep and returns each intersection.
77635fd0808STejun Heo  */
77735fd0808STejun Heo void __init_memblock __next_free_mem_range(u64 *idx, int nid,
77835fd0808STejun Heo 					   phys_addr_t *out_start,
77935fd0808STejun Heo 					   phys_addr_t *out_end, int *out_nid)
78035fd0808STejun Heo {
78135fd0808STejun Heo 	struct memblock_type *mem = &memblock.memory;
78235fd0808STejun Heo 	struct memblock_type *rsv = &memblock.reserved;
78335fd0808STejun Heo 	int mi = *idx & 0xffffffff;
78435fd0808STejun Heo 	int ri = *idx >> 32;
78535fd0808STejun Heo 
78635fd0808STejun Heo 	for ( ; mi < mem->cnt; mi++) {
78735fd0808STejun Heo 		struct memblock_region *m = &mem->regions[mi];
78835fd0808STejun Heo 		phys_addr_t m_start = m->base;
78935fd0808STejun Heo 		phys_addr_t m_end = m->base + m->size;
79035fd0808STejun Heo 
79135fd0808STejun Heo 		/* only memory regions are associated with nodes, check it */
79235fd0808STejun Heo 		if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
79335fd0808STejun Heo 			continue;
79435fd0808STejun Heo 
79535fd0808STejun Heo 		/* scan areas before each reservation for intersection */
79635fd0808STejun Heo 		for ( ; ri < rsv->cnt + 1; ri++) {
79735fd0808STejun Heo 			struct memblock_region *r = &rsv->regions[ri];
79835fd0808STejun Heo 			phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
79935fd0808STejun Heo 			phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
80035fd0808STejun Heo 
80135fd0808STejun Heo 			/* if ri advanced past mi, break out to advance mi */
80235fd0808STejun Heo 			if (r_start >= m_end)
80335fd0808STejun Heo 				break;
80435fd0808STejun Heo 			/* if the two regions intersect, we're done */
80535fd0808STejun Heo 			if (m_start < r_end) {
80635fd0808STejun Heo 				if (out_start)
80735fd0808STejun Heo 					*out_start = max(m_start, r_start);
80835fd0808STejun Heo 				if (out_end)
80935fd0808STejun Heo 					*out_end = min(m_end, r_end);
81035fd0808STejun Heo 				if (out_nid)
81135fd0808STejun Heo 					*out_nid = memblock_get_region_node(m);
81235fd0808STejun Heo 				/*
81335fd0808STejun Heo 				 * The region which ends first is advanced
81435fd0808STejun Heo 				 * for the next iteration.
81535fd0808STejun Heo 				 */
81635fd0808STejun Heo 				if (m_end <= r_end)
81735fd0808STejun Heo 					mi++;
81835fd0808STejun Heo 				else
81935fd0808STejun Heo 					ri++;
82035fd0808STejun Heo 				*idx = (u32)mi | (u64)ri << 32;
82135fd0808STejun Heo 				return;
82235fd0808STejun Heo 			}
82335fd0808STejun Heo 		}
82435fd0808STejun Heo 	}
82535fd0808STejun Heo 
82635fd0808STejun Heo 	/* signal end of iteration */
82735fd0808STejun Heo 	*idx = ULLONG_MAX;
82835fd0808STejun Heo }
82935fd0808STejun Heo 
8307bd0b0f0STejun Heo /**
8317bd0b0f0STejun Heo  * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
8327bd0b0f0STejun Heo  * @idx: pointer to u64 loop variable
8337bd0b0f0STejun Heo  * @nid: nid: node selector, %MAX_NUMNODES for all nodes
834dad7557eSWanpeng Li  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
835dad7557eSWanpeng Li  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
836dad7557eSWanpeng Li  * @out_nid: ptr to int for nid of the range, can be %NULL
8377bd0b0f0STejun Heo  *
8387bd0b0f0STejun Heo  * Reverse of __next_free_mem_range().
83955ac590cSTang Chen  *
84055ac590cSTang Chen  * Linux kernel cannot migrate pages used by itself. Memory hotplug users won't
84155ac590cSTang Chen  * be able to hot-remove hotpluggable memory used by the kernel. So this
84255ac590cSTang Chen  * function skip hotpluggable regions if needed when allocating memory for the
84355ac590cSTang Chen  * kernel.
8447bd0b0f0STejun Heo  */
8457bd0b0f0STejun Heo void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
8467bd0b0f0STejun Heo 					   phys_addr_t *out_start,
8477bd0b0f0STejun Heo 					   phys_addr_t *out_end, int *out_nid)
8487bd0b0f0STejun Heo {
8497bd0b0f0STejun Heo 	struct memblock_type *mem = &memblock.memory;
8507bd0b0f0STejun Heo 	struct memblock_type *rsv = &memblock.reserved;
8517bd0b0f0STejun Heo 	int mi = *idx & 0xffffffff;
8527bd0b0f0STejun Heo 	int ri = *idx >> 32;
8537bd0b0f0STejun Heo 
8547bd0b0f0STejun Heo 	if (*idx == (u64)ULLONG_MAX) {
8557bd0b0f0STejun Heo 		mi = mem->cnt - 1;
8567bd0b0f0STejun Heo 		ri = rsv->cnt;
8577bd0b0f0STejun Heo 	}
8587bd0b0f0STejun Heo 
8597bd0b0f0STejun Heo 	for ( ; mi >= 0; mi--) {
8607bd0b0f0STejun Heo 		struct memblock_region *m = &mem->regions[mi];
8617bd0b0f0STejun Heo 		phys_addr_t m_start = m->base;
8627bd0b0f0STejun Heo 		phys_addr_t m_end = m->base + m->size;
8637bd0b0f0STejun Heo 
8647bd0b0f0STejun Heo 		/* only memory regions are associated with nodes, check it */
8657bd0b0f0STejun Heo 		if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
8667bd0b0f0STejun Heo 			continue;
8677bd0b0f0STejun Heo 
86855ac590cSTang Chen 		/* skip hotpluggable memory regions if needed */
86955ac590cSTang Chen 		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
87055ac590cSTang Chen 			continue;
87155ac590cSTang Chen 
8727bd0b0f0STejun Heo 		/* scan areas before each reservation for intersection */
8737bd0b0f0STejun Heo 		for ( ; ri >= 0; ri--) {
8747bd0b0f0STejun Heo 			struct memblock_region *r = &rsv->regions[ri];
8757bd0b0f0STejun Heo 			phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
8767bd0b0f0STejun Heo 			phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
8777bd0b0f0STejun Heo 
8787bd0b0f0STejun Heo 			/* if ri advanced past mi, break out to advance mi */
8797bd0b0f0STejun Heo 			if (r_end <= m_start)
8807bd0b0f0STejun Heo 				break;
8817bd0b0f0STejun Heo 			/* if the two regions intersect, we're done */
8827bd0b0f0STejun Heo 			if (m_end > r_start) {
8837bd0b0f0STejun Heo 				if (out_start)
8847bd0b0f0STejun Heo 					*out_start = max(m_start, r_start);
8857bd0b0f0STejun Heo 				if (out_end)
8867bd0b0f0STejun Heo 					*out_end = min(m_end, r_end);
8877bd0b0f0STejun Heo 				if (out_nid)
8887bd0b0f0STejun Heo 					*out_nid = memblock_get_region_node(m);
8897bd0b0f0STejun Heo 
8907bd0b0f0STejun Heo 				if (m_start >= r_start)
8917bd0b0f0STejun Heo 					mi--;
8927bd0b0f0STejun Heo 				else
8937bd0b0f0STejun Heo 					ri--;
8947bd0b0f0STejun Heo 				*idx = (u32)mi | (u64)ri << 32;
8957bd0b0f0STejun Heo 				return;
8967bd0b0f0STejun Heo 			}
8977bd0b0f0STejun Heo 		}
8987bd0b0f0STejun Heo 	}
8997bd0b0f0STejun Heo 
9007bd0b0f0STejun Heo 	*idx = ULLONG_MAX;
9017bd0b0f0STejun Heo }
9027bd0b0f0STejun Heo 
9037c0caeb8STejun Heo #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
9047c0caeb8STejun Heo /*
9057c0caeb8STejun Heo  * Common iterator interface used to define for_each_mem_range().
9067c0caeb8STejun Heo  */
9077c0caeb8STejun Heo void __init_memblock __next_mem_pfn_range(int *idx, int nid,
9087c0caeb8STejun Heo 				unsigned long *out_start_pfn,
9097c0caeb8STejun Heo 				unsigned long *out_end_pfn, int *out_nid)
9107c0caeb8STejun Heo {
9117c0caeb8STejun Heo 	struct memblock_type *type = &memblock.memory;
9127c0caeb8STejun Heo 	struct memblock_region *r;
9137c0caeb8STejun Heo 
9147c0caeb8STejun Heo 	while (++*idx < type->cnt) {
9157c0caeb8STejun Heo 		r = &type->regions[*idx];
9167c0caeb8STejun Heo 
9177c0caeb8STejun Heo 		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
9187c0caeb8STejun Heo 			continue;
9197c0caeb8STejun Heo 		if (nid == MAX_NUMNODES || nid == r->nid)
9207c0caeb8STejun Heo 			break;
9217c0caeb8STejun Heo 	}
9227c0caeb8STejun Heo 	if (*idx >= type->cnt) {
9237c0caeb8STejun Heo 		*idx = -1;
9247c0caeb8STejun Heo 		return;
9257c0caeb8STejun Heo 	}
9267c0caeb8STejun Heo 
9277c0caeb8STejun Heo 	if (out_start_pfn)
9287c0caeb8STejun Heo 		*out_start_pfn = PFN_UP(r->base);
9297c0caeb8STejun Heo 	if (out_end_pfn)
9307c0caeb8STejun Heo 		*out_end_pfn = PFN_DOWN(r->base + r->size);
9317c0caeb8STejun Heo 	if (out_nid)
9327c0caeb8STejun Heo 		*out_nid = r->nid;
9337c0caeb8STejun Heo }
9347c0caeb8STejun Heo 
9357c0caeb8STejun Heo /**
9367c0caeb8STejun Heo  * memblock_set_node - set node ID on memblock regions
9377c0caeb8STejun Heo  * @base: base of area to set node ID for
9387c0caeb8STejun Heo  * @size: size of area to set node ID for
939e7e8de59STang Chen  * @type: memblock type to set node ID for
9407c0caeb8STejun Heo  * @nid: node ID to set
9417c0caeb8STejun Heo  *
942e7e8de59STang Chen  * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
9437c0caeb8STejun Heo  * Regions which cross the area boundaries are split as necessary.
9447c0caeb8STejun Heo  *
9457c0caeb8STejun Heo  * RETURNS:
9467c0caeb8STejun Heo  * 0 on success, -errno on failure.
9477c0caeb8STejun Heo  */
9487c0caeb8STejun Heo int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
949e7e8de59STang Chen 				      struct memblock_type *type, int nid)
9507c0caeb8STejun Heo {
9516a9ceb31STejun Heo 	int start_rgn, end_rgn;
9526a9ceb31STejun Heo 	int i, ret;
9537c0caeb8STejun Heo 
9546a9ceb31STejun Heo 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
9556a9ceb31STejun Heo 	if (ret)
9566a9ceb31STejun Heo 		return ret;
9577c0caeb8STejun Heo 
9586a9ceb31STejun Heo 	for (i = start_rgn; i < end_rgn; i++)
959e9d24ad3SWanpeng Li 		memblock_set_region_node(&type->regions[i], nid);
9607c0caeb8STejun Heo 
9617c0caeb8STejun Heo 	memblock_merge_regions(type);
9627c0caeb8STejun Heo 	return 0;
9637c0caeb8STejun Heo }
9647c0caeb8STejun Heo #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
9657c0caeb8STejun Heo 
9667bd0b0f0STejun Heo static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
9677bd0b0f0STejun Heo 					phys_addr_t align, phys_addr_t max_addr,
9687bd0b0f0STejun Heo 					int nid)
96995f72d1eSYinghai Lu {
9706ed311b2SBenjamin Herrenschmidt 	phys_addr_t found;
97195f72d1eSYinghai Lu 
97279f40fabSGrygorii Strashko 	if (!align)
97379f40fabSGrygorii Strashko 		align = SMP_CACHE_BYTES;
97494f3d3afSVineet Gupta 
975847854f5STejun Heo 	/* align @size to avoid excessive fragmentation on reserved array */
976847854f5STejun Heo 	size = round_up(size, align);
977847854f5STejun Heo 
978*87029ee9SGrygorii Strashko 	found = memblock_find_in_range_node(size, align, 0, max_addr, nid);
9799c8c27e2STejun Heo 	if (found && !memblock_reserve(found, size))
9806ed311b2SBenjamin Herrenschmidt 		return found;
9816ed311b2SBenjamin Herrenschmidt 
9826ed311b2SBenjamin Herrenschmidt 	return 0;
98395f72d1eSYinghai Lu }
98495f72d1eSYinghai Lu 
9857bd0b0f0STejun Heo phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
9867bd0b0f0STejun Heo {
9877bd0b0f0STejun Heo 	return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
9887bd0b0f0STejun Heo }
9897bd0b0f0STejun Heo 
9907bd0b0f0STejun Heo phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
9917bd0b0f0STejun Heo {
9927bd0b0f0STejun Heo 	return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES);
9937bd0b0f0STejun Heo }
9947bd0b0f0STejun Heo 
9956ed311b2SBenjamin Herrenschmidt phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
99695f72d1eSYinghai Lu {
9976ed311b2SBenjamin Herrenschmidt 	phys_addr_t alloc;
9986ed311b2SBenjamin Herrenschmidt 
9996ed311b2SBenjamin Herrenschmidt 	alloc = __memblock_alloc_base(size, align, max_addr);
10006ed311b2SBenjamin Herrenschmidt 
10016ed311b2SBenjamin Herrenschmidt 	if (alloc == 0)
10026ed311b2SBenjamin Herrenschmidt 		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
10036ed311b2SBenjamin Herrenschmidt 		      (unsigned long long) size, (unsigned long long) max_addr);
10046ed311b2SBenjamin Herrenschmidt 
10056ed311b2SBenjamin Herrenschmidt 	return alloc;
100695f72d1eSYinghai Lu }
100795f72d1eSYinghai Lu 
10086ed311b2SBenjamin Herrenschmidt phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
100995f72d1eSYinghai Lu {
10106ed311b2SBenjamin Herrenschmidt 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
101195f72d1eSYinghai Lu }
101295f72d1eSYinghai Lu 
10139d1e2492SBenjamin Herrenschmidt phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
10149d1e2492SBenjamin Herrenschmidt {
10159d1e2492SBenjamin Herrenschmidt 	phys_addr_t res = memblock_alloc_nid(size, align, nid);
10169d1e2492SBenjamin Herrenschmidt 
10179d1e2492SBenjamin Herrenschmidt 	if (res)
10189d1e2492SBenjamin Herrenschmidt 		return res;
101915fb0972STejun Heo 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
102095f72d1eSYinghai Lu }
102195f72d1eSYinghai Lu 
10229d1e2492SBenjamin Herrenschmidt 
10239d1e2492SBenjamin Herrenschmidt /*
10249d1e2492SBenjamin Herrenschmidt  * Remaining API functions
10259d1e2492SBenjamin Herrenschmidt  */
10269d1e2492SBenjamin Herrenschmidt 
10272898cc4cSBenjamin Herrenschmidt phys_addr_t __init memblock_phys_mem_size(void)
102895f72d1eSYinghai Lu {
10291440c4e2STejun Heo 	return memblock.memory.total_size;
103095f72d1eSYinghai Lu }
103195f72d1eSYinghai Lu 
1032595ad9afSYinghai Lu phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1033595ad9afSYinghai Lu {
1034595ad9afSYinghai Lu 	unsigned long pages = 0;
1035595ad9afSYinghai Lu 	struct memblock_region *r;
1036595ad9afSYinghai Lu 	unsigned long start_pfn, end_pfn;
1037595ad9afSYinghai Lu 
1038595ad9afSYinghai Lu 	for_each_memblock(memory, r) {
1039595ad9afSYinghai Lu 		start_pfn = memblock_region_memory_base_pfn(r);
1040595ad9afSYinghai Lu 		end_pfn = memblock_region_memory_end_pfn(r);
1041595ad9afSYinghai Lu 		start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1042595ad9afSYinghai Lu 		end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1043595ad9afSYinghai Lu 		pages += end_pfn - start_pfn;
1044595ad9afSYinghai Lu 	}
1045595ad9afSYinghai Lu 
1046595ad9afSYinghai Lu 	return (phys_addr_t)pages << PAGE_SHIFT;
1047595ad9afSYinghai Lu }
1048595ad9afSYinghai Lu 
10490a93ebefSSam Ravnborg /* lowest address */
10500a93ebefSSam Ravnborg phys_addr_t __init_memblock memblock_start_of_DRAM(void)
10510a93ebefSSam Ravnborg {
10520a93ebefSSam Ravnborg 	return memblock.memory.regions[0].base;
10530a93ebefSSam Ravnborg }
10540a93ebefSSam Ravnborg 
105510d06439SYinghai Lu phys_addr_t __init_memblock memblock_end_of_DRAM(void)
105695f72d1eSYinghai Lu {
105795f72d1eSYinghai Lu 	int idx = memblock.memory.cnt - 1;
105895f72d1eSYinghai Lu 
1059e3239ff9SBenjamin Herrenschmidt 	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
106095f72d1eSYinghai Lu }
106195f72d1eSYinghai Lu 
1062c0ce8fefSTejun Heo void __init memblock_enforce_memory_limit(phys_addr_t limit)
106395f72d1eSYinghai Lu {
106495f72d1eSYinghai Lu 	unsigned long i;
1065c0ce8fefSTejun Heo 	phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
106695f72d1eSYinghai Lu 
1067c0ce8fefSTejun Heo 	if (!limit)
106895f72d1eSYinghai Lu 		return;
106995f72d1eSYinghai Lu 
1070c0ce8fefSTejun Heo 	/* find out max address */
107195f72d1eSYinghai Lu 	for (i = 0; i < memblock.memory.cnt; i++) {
1072c0ce8fefSTejun Heo 		struct memblock_region *r = &memblock.memory.regions[i];
107395f72d1eSYinghai Lu 
1074c0ce8fefSTejun Heo 		if (limit <= r->size) {
1075c0ce8fefSTejun Heo 			max_addr = r->base + limit;
107695f72d1eSYinghai Lu 			break;
107795f72d1eSYinghai Lu 		}
1078c0ce8fefSTejun Heo 		limit -= r->size;
107995f72d1eSYinghai Lu 	}
1080c0ce8fefSTejun Heo 
1081c0ce8fefSTejun Heo 	/* truncate both memory and reserved regions */
1082c0ce8fefSTejun Heo 	__memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX);
1083c0ce8fefSTejun Heo 	__memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX);
108495f72d1eSYinghai Lu }
108595f72d1eSYinghai Lu 
1086cd79481dSYinghai Lu static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
108772d4b0b4SBenjamin Herrenschmidt {
108872d4b0b4SBenjamin Herrenschmidt 	unsigned int left = 0, right = type->cnt;
108972d4b0b4SBenjamin Herrenschmidt 
109072d4b0b4SBenjamin Herrenschmidt 	do {
109172d4b0b4SBenjamin Herrenschmidt 		unsigned int mid = (right + left) / 2;
109272d4b0b4SBenjamin Herrenschmidt 
109372d4b0b4SBenjamin Herrenschmidt 		if (addr < type->regions[mid].base)
109472d4b0b4SBenjamin Herrenschmidt 			right = mid;
109572d4b0b4SBenjamin Herrenschmidt 		else if (addr >= (type->regions[mid].base +
109672d4b0b4SBenjamin Herrenschmidt 				  type->regions[mid].size))
109772d4b0b4SBenjamin Herrenschmidt 			left = mid + 1;
109872d4b0b4SBenjamin Herrenschmidt 		else
109972d4b0b4SBenjamin Herrenschmidt 			return mid;
110072d4b0b4SBenjamin Herrenschmidt 	} while (left < right);
110172d4b0b4SBenjamin Herrenschmidt 	return -1;
110272d4b0b4SBenjamin Herrenschmidt }
110372d4b0b4SBenjamin Herrenschmidt 
11042898cc4cSBenjamin Herrenschmidt int __init memblock_is_reserved(phys_addr_t addr)
110595f72d1eSYinghai Lu {
110672d4b0b4SBenjamin Herrenschmidt 	return memblock_search(&memblock.reserved, addr) != -1;
110795f72d1eSYinghai Lu }
110872d4b0b4SBenjamin Herrenschmidt 
11093661ca66SYinghai Lu int __init_memblock memblock_is_memory(phys_addr_t addr)
111072d4b0b4SBenjamin Herrenschmidt {
111172d4b0b4SBenjamin Herrenschmidt 	return memblock_search(&memblock.memory, addr) != -1;
111272d4b0b4SBenjamin Herrenschmidt }
111372d4b0b4SBenjamin Herrenschmidt 
1114e76b63f8SYinghai Lu #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1115e76b63f8SYinghai Lu int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1116e76b63f8SYinghai Lu 			 unsigned long *start_pfn, unsigned long *end_pfn)
1117e76b63f8SYinghai Lu {
1118e76b63f8SYinghai Lu 	struct memblock_type *type = &memblock.memory;
1119e76b63f8SYinghai Lu 	int mid = memblock_search(type, (phys_addr_t)pfn << PAGE_SHIFT);
1120e76b63f8SYinghai Lu 
1121e76b63f8SYinghai Lu 	if (mid == -1)
1122e76b63f8SYinghai Lu 		return -1;
1123e76b63f8SYinghai Lu 
1124e76b63f8SYinghai Lu 	*start_pfn = type->regions[mid].base >> PAGE_SHIFT;
1125e76b63f8SYinghai Lu 	*end_pfn = (type->regions[mid].base + type->regions[mid].size)
1126e76b63f8SYinghai Lu 			>> PAGE_SHIFT;
1127e76b63f8SYinghai Lu 
1128e76b63f8SYinghai Lu 	return type->regions[mid].nid;
1129e76b63f8SYinghai Lu }
1130e76b63f8SYinghai Lu #endif
1131e76b63f8SYinghai Lu 
1132eab30949SStephen Boyd /**
1133eab30949SStephen Boyd  * memblock_is_region_memory - check if a region is a subset of memory
1134eab30949SStephen Boyd  * @base: base of region to check
1135eab30949SStephen Boyd  * @size: size of region to check
1136eab30949SStephen Boyd  *
1137eab30949SStephen Boyd  * Check if the region [@base, @base+@size) is a subset of a memory block.
1138eab30949SStephen Boyd  *
1139eab30949SStephen Boyd  * RETURNS:
1140eab30949SStephen Boyd  * 0 if false, non-zero if true
1141eab30949SStephen Boyd  */
11423661ca66SYinghai Lu int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
114372d4b0b4SBenjamin Herrenschmidt {
1144abb65272STomi Valkeinen 	int idx = memblock_search(&memblock.memory, base);
1145eb18f1b5STejun Heo 	phys_addr_t end = base + memblock_cap_size(base, &size);
114672d4b0b4SBenjamin Herrenschmidt 
114772d4b0b4SBenjamin Herrenschmidt 	if (idx == -1)
114895f72d1eSYinghai Lu 		return 0;
1149abb65272STomi Valkeinen 	return memblock.memory.regions[idx].base <= base &&
1150abb65272STomi Valkeinen 		(memblock.memory.regions[idx].base +
1151eb18f1b5STejun Heo 		 memblock.memory.regions[idx].size) >= end;
115295f72d1eSYinghai Lu }
115395f72d1eSYinghai Lu 
1154eab30949SStephen Boyd /**
1155eab30949SStephen Boyd  * memblock_is_region_reserved - check if a region intersects reserved memory
1156eab30949SStephen Boyd  * @base: base of region to check
1157eab30949SStephen Boyd  * @size: size of region to check
1158eab30949SStephen Boyd  *
1159eab30949SStephen Boyd  * Check if the region [@base, @base+@size) intersects a reserved memory block.
1160eab30949SStephen Boyd  *
1161eab30949SStephen Boyd  * RETURNS:
1162eab30949SStephen Boyd  * 0 if false, non-zero if true
1163eab30949SStephen Boyd  */
116410d06439SYinghai Lu int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
116595f72d1eSYinghai Lu {
1166eb18f1b5STejun Heo 	memblock_cap_size(base, &size);
1167f1c2c19cSBenjamin Herrenschmidt 	return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
116895f72d1eSYinghai Lu }
116995f72d1eSYinghai Lu 
11706ede1fd3SYinghai Lu void __init_memblock memblock_trim_memory(phys_addr_t align)
11716ede1fd3SYinghai Lu {
11726ede1fd3SYinghai Lu 	int i;
11736ede1fd3SYinghai Lu 	phys_addr_t start, end, orig_start, orig_end;
11746ede1fd3SYinghai Lu 	struct memblock_type *mem = &memblock.memory;
11756ede1fd3SYinghai Lu 
11766ede1fd3SYinghai Lu 	for (i = 0; i < mem->cnt; i++) {
11776ede1fd3SYinghai Lu 		orig_start = mem->regions[i].base;
11786ede1fd3SYinghai Lu 		orig_end = mem->regions[i].base + mem->regions[i].size;
11796ede1fd3SYinghai Lu 		start = round_up(orig_start, align);
11806ede1fd3SYinghai Lu 		end = round_down(orig_end, align);
11816ede1fd3SYinghai Lu 
11826ede1fd3SYinghai Lu 		if (start == orig_start && end == orig_end)
11836ede1fd3SYinghai Lu 			continue;
11846ede1fd3SYinghai Lu 
11856ede1fd3SYinghai Lu 		if (start < end) {
11866ede1fd3SYinghai Lu 			mem->regions[i].base = start;
11876ede1fd3SYinghai Lu 			mem->regions[i].size = end - start;
11886ede1fd3SYinghai Lu 		} else {
11896ede1fd3SYinghai Lu 			memblock_remove_region(mem, i);
11906ede1fd3SYinghai Lu 			i--;
11916ede1fd3SYinghai Lu 		}
11926ede1fd3SYinghai Lu 	}
11936ede1fd3SYinghai Lu }
1194e63075a3SBenjamin Herrenschmidt 
11953661ca66SYinghai Lu void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1196e63075a3SBenjamin Herrenschmidt {
1197e63075a3SBenjamin Herrenschmidt 	memblock.current_limit = limit;
1198e63075a3SBenjamin Herrenschmidt }
1199e63075a3SBenjamin Herrenschmidt 
12007c0caeb8STejun Heo static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
12016ed311b2SBenjamin Herrenschmidt {
12026ed311b2SBenjamin Herrenschmidt 	unsigned long long base, size;
120366a20757STang Chen 	unsigned long flags;
12046ed311b2SBenjamin Herrenschmidt 	int i;
12056ed311b2SBenjamin Herrenschmidt 
12067c0caeb8STejun Heo 	pr_info(" %s.cnt  = 0x%lx\n", name, type->cnt);
12076ed311b2SBenjamin Herrenschmidt 
12087c0caeb8STejun Heo 	for (i = 0; i < type->cnt; i++) {
12097c0caeb8STejun Heo 		struct memblock_region *rgn = &type->regions[i];
12107c0caeb8STejun Heo 		char nid_buf[32] = "";
12116ed311b2SBenjamin Herrenschmidt 
12127c0caeb8STejun Heo 		base = rgn->base;
12137c0caeb8STejun Heo 		size = rgn->size;
121466a20757STang Chen 		flags = rgn->flags;
12157c0caeb8STejun Heo #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
12167c0caeb8STejun Heo 		if (memblock_get_region_node(rgn) != MAX_NUMNODES)
12177c0caeb8STejun Heo 			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
12187c0caeb8STejun Heo 				 memblock_get_region_node(rgn));
12197c0caeb8STejun Heo #endif
122066a20757STang Chen 		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
122166a20757STang Chen 			name, i, base, base + size - 1, size, nid_buf, flags);
12226ed311b2SBenjamin Herrenschmidt 	}
12236ed311b2SBenjamin Herrenschmidt }
12246ed311b2SBenjamin Herrenschmidt 
12254ff7b82fSTejun Heo void __init_memblock __memblock_dump_all(void)
12266ed311b2SBenjamin Herrenschmidt {
12276ed311b2SBenjamin Herrenschmidt 	pr_info("MEMBLOCK configuration:\n");
12281440c4e2STejun Heo 	pr_info(" memory size = %#llx reserved size = %#llx\n",
12291440c4e2STejun Heo 		(unsigned long long)memblock.memory.total_size,
12301440c4e2STejun Heo 		(unsigned long long)memblock.reserved.total_size);
12316ed311b2SBenjamin Herrenschmidt 
12326ed311b2SBenjamin Herrenschmidt 	memblock_dump(&memblock.memory, "memory");
12336ed311b2SBenjamin Herrenschmidt 	memblock_dump(&memblock.reserved, "reserved");
12346ed311b2SBenjamin Herrenschmidt }
12356ed311b2SBenjamin Herrenschmidt 
12361aadc056STejun Heo void __init memblock_allow_resize(void)
12376ed311b2SBenjamin Herrenschmidt {
1238142b45a7SBenjamin Herrenschmidt 	memblock_can_resize = 1;
12396ed311b2SBenjamin Herrenschmidt }
12406ed311b2SBenjamin Herrenschmidt 
12416ed311b2SBenjamin Herrenschmidt static int __init early_memblock(char *p)
12426ed311b2SBenjamin Herrenschmidt {
12436ed311b2SBenjamin Herrenschmidt 	if (p && strstr(p, "debug"))
12446ed311b2SBenjamin Herrenschmidt 		memblock_debug = 1;
12456ed311b2SBenjamin Herrenschmidt 	return 0;
12466ed311b2SBenjamin Herrenschmidt }
12476ed311b2SBenjamin Herrenschmidt early_param("memblock", early_memblock);
12486ed311b2SBenjamin Herrenschmidt 
1249c378ddd5STejun Heo #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
12506d03b885SBenjamin Herrenschmidt 
12516d03b885SBenjamin Herrenschmidt static int memblock_debug_show(struct seq_file *m, void *private)
12526d03b885SBenjamin Herrenschmidt {
12536d03b885SBenjamin Herrenschmidt 	struct memblock_type *type = m->private;
12546d03b885SBenjamin Herrenschmidt 	struct memblock_region *reg;
12556d03b885SBenjamin Herrenschmidt 	int i;
12566d03b885SBenjamin Herrenschmidt 
12576d03b885SBenjamin Herrenschmidt 	for (i = 0; i < type->cnt; i++) {
12586d03b885SBenjamin Herrenschmidt 		reg = &type->regions[i];
12596d03b885SBenjamin Herrenschmidt 		seq_printf(m, "%4d: ", i);
12606d03b885SBenjamin Herrenschmidt 		if (sizeof(phys_addr_t) == 4)
12616d03b885SBenjamin Herrenschmidt 			seq_printf(m, "0x%08lx..0x%08lx\n",
12626d03b885SBenjamin Herrenschmidt 				   (unsigned long)reg->base,
12636d03b885SBenjamin Herrenschmidt 				   (unsigned long)(reg->base + reg->size - 1));
12646d03b885SBenjamin Herrenschmidt 		else
12656d03b885SBenjamin Herrenschmidt 			seq_printf(m, "0x%016llx..0x%016llx\n",
12666d03b885SBenjamin Herrenschmidt 				   (unsigned long long)reg->base,
12676d03b885SBenjamin Herrenschmidt 				   (unsigned long long)(reg->base + reg->size - 1));
12686d03b885SBenjamin Herrenschmidt 
12696d03b885SBenjamin Herrenschmidt 	}
12706d03b885SBenjamin Herrenschmidt 	return 0;
12716d03b885SBenjamin Herrenschmidt }
12726d03b885SBenjamin Herrenschmidt 
12736d03b885SBenjamin Herrenschmidt static int memblock_debug_open(struct inode *inode, struct file *file)
12746d03b885SBenjamin Herrenschmidt {
12756d03b885SBenjamin Herrenschmidt 	return single_open(file, memblock_debug_show, inode->i_private);
12766d03b885SBenjamin Herrenschmidt }
12776d03b885SBenjamin Herrenschmidt 
12786d03b885SBenjamin Herrenschmidt static const struct file_operations memblock_debug_fops = {
12796d03b885SBenjamin Herrenschmidt 	.open = memblock_debug_open,
12806d03b885SBenjamin Herrenschmidt 	.read = seq_read,
12816d03b885SBenjamin Herrenschmidt 	.llseek = seq_lseek,
12826d03b885SBenjamin Herrenschmidt 	.release = single_release,
12836d03b885SBenjamin Herrenschmidt };
12846d03b885SBenjamin Herrenschmidt 
12856d03b885SBenjamin Herrenschmidt static int __init memblock_init_debugfs(void)
12866d03b885SBenjamin Herrenschmidt {
12876d03b885SBenjamin Herrenschmidt 	struct dentry *root = debugfs_create_dir("memblock", NULL);
12886d03b885SBenjamin Herrenschmidt 	if (!root)
12896d03b885SBenjamin Herrenschmidt 		return -ENXIO;
12906d03b885SBenjamin Herrenschmidt 	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
12916d03b885SBenjamin Herrenschmidt 	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
12926d03b885SBenjamin Herrenschmidt 
12936d03b885SBenjamin Herrenschmidt 	return 0;
12946d03b885SBenjamin Herrenschmidt }
12956d03b885SBenjamin Herrenschmidt __initcall(memblock_init_debugfs);
12966d03b885SBenjamin Herrenschmidt 
12976d03b885SBenjamin Herrenschmidt #endif /* CONFIG_DEBUG_FS */
1298