xref: /linux/mm/memblock.c (revision 8b7b85384fad6e21e8a28628e7ebacb5a6329de4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Procedures for maintaining information about logical memory blocks.
4  *
5  * Peter Bergner, IBM Corp.	June 2001.
6  * Copyright (C) 2001 Peter Bergner.
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/bitops.h>
13 #include <linux/poison.h>
14 #include <linux/pfn.h>
15 #include <linux/debugfs.h>
16 #include <linux/kmemleak.h>
17 #include <linux/seq_file.h>
18 #include <linux/memblock.h>
19 #include <linux/mutex.h>
20 #include <linux/string_helpers.h>
21 
22 #ifdef CONFIG_KEXEC_HANDOVER
23 #include <linux/libfdt.h>
24 #include <linux/kexec_handover.h>
25 #include <linux/kho/abi/memblock.h>
26 #endif /* CONFIG_KEXEC_HANDOVER */
27 
28 #include <asm/sections.h>
29 #include <linux/io.h>
30 
31 #include "internal.h"
32 
33 #define INIT_MEMBLOCK_REGIONS			128
34 #define INIT_PHYSMEM_REGIONS			4
35 
36 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
37 # define INIT_MEMBLOCK_RESERVED_REGIONS		INIT_MEMBLOCK_REGIONS
38 #endif
39 
40 #ifndef INIT_MEMBLOCK_MEMORY_REGIONS
41 #define INIT_MEMBLOCK_MEMORY_REGIONS		INIT_MEMBLOCK_REGIONS
42 #endif
43 
44 /**
45  * DOC: memblock overview
46  *
47  * Memblock is a method of managing memory regions during the early
48  * boot period when the usual kernel memory allocators are not up and
49  * running.
50  *
51  * Memblock views the system memory as collections of contiguous
52  * regions. There are several types of these collections:
53  *
54  * * ``memory`` - describes the physical memory available to the
55  *   kernel; this may differ from the actual physical memory installed
56  *   in the system, for instance when the memory is restricted with
57  *   ``mem=`` command line parameter
58  * * ``reserved`` - describes the regions that were allocated
59  * * ``physmem`` - describes the actual physical memory available during
60  *   boot regardless of the possible restrictions and memory hot(un)plug;
61  *   the ``physmem`` type is only available on some architectures.
62  *
63  * Each region is represented by struct memblock_region that
64  * defines the region extents, its attributes and NUMA node id on NUMA
65  * systems. Every memory type is described by the struct memblock_type
66  * which contains an array of memory regions along with
67  * the allocator metadata. The "memory" and "reserved" types are nicely
68  * wrapped with struct memblock. This structure is statically
69  * initialized at build time. The region arrays are initially sized to
70  * %INIT_MEMBLOCK_MEMORY_REGIONS for "memory" and
71  * %INIT_MEMBLOCK_RESERVED_REGIONS for "reserved". The region array
72  * for "physmem" is initially sized to %INIT_PHYSMEM_REGIONS.
73  * The memblock_allow_resize() enables automatic resizing of the region
74  * arrays during addition of new regions. This feature should be used
75  * with care so that memory allocated for the region array will not
76  * overlap with areas that should be reserved, for example initrd.
77  *
78  * The early architecture setup should tell memblock what the physical
79  * memory layout is by using memblock_add() or memblock_add_node()
80  * functions. The first function does not assign the region to a NUMA
81  * node and it is appropriate for UMA systems. Yet, it is possible to
82  * use it on NUMA systems as well and assign the region to a NUMA node
83  * later in the setup process using memblock_set_node(). The
84  * memblock_add_node() performs such an assignment directly.
85  *
86  * Once memblock is setup the memory can be allocated using one of the
87  * API variants:
88  *
89  * * memblock_phys_alloc*() - these functions return the **physical**
90  *   address of the allocated memory
91  * * memblock_alloc*() - these functions return the **virtual** address
92  *   of the allocated memory.
93  *
94  * Note, that both API variants use implicit assumptions about allowed
95  * memory ranges and the fallback methods. Consult the documentation
96  * of memblock_alloc_internal() and memblock_alloc_range_nid()
97  * functions for more elaborate description.
98  *
99  * As the system boot progresses, the architecture specific mem_init()
100  * function frees all the memory to the buddy page allocator.
101  *
102  * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
103  * memblock data structures (except "physmem") will be discarded after the
104  * system initialization completes.
105  */
106 
107 #ifndef CONFIG_NUMA
108 struct pglist_data __refdata contig_page_data;
109 EXPORT_SYMBOL(contig_page_data);
110 #endif
111 
112 unsigned long max_low_pfn;
113 unsigned long min_low_pfn;
114 unsigned long max_pfn;
115 unsigned long long max_possible_pfn;
116 
117 #ifdef CONFIG_MEMBLOCK_KHO_SCRATCH
118 /* When set to true, only allocate from MEMBLOCK_KHO_SCRATCH ranges */
119 static bool kho_scratch_only;
120 #else
121 #define kho_scratch_only false
122 #endif
123 
124 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_MEMORY_REGIONS] __initdata_memblock;
125 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
126 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
127 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
128 #endif
129 
130 struct memblock memblock __initdata_memblock = {
131 	.memory.regions		= memblock_memory_init_regions,
132 	.memory.max		= INIT_MEMBLOCK_MEMORY_REGIONS,
133 	.memory.name		= "memory",
134 
135 	.reserved.regions	= memblock_reserved_init_regions,
136 	.reserved.max		= INIT_MEMBLOCK_RESERVED_REGIONS,
137 	.reserved.name		= "reserved",
138 
139 	.bottom_up		= false,
140 	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
141 };
142 
143 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
144 struct memblock_type physmem = {
145 	.regions		= memblock_physmem_init_regions,
146 	.max			= INIT_PHYSMEM_REGIONS,
147 	.name			= "physmem",
148 };
149 #endif
150 
151 /*
152  * keep a pointer to &memblock.memory in the text section to use it in
153  * __next_mem_range() and its helpers.
154  *  For architectures that do not keep memblock data after init, this
155  * pointer will be reset to NULL at memblock_discard()
156  */
157 static __refdata struct memblock_type *memblock_memory = &memblock.memory;
158 
159 #define for_each_memblock_type(i, memblock_type, rgn)			\
160 	for (i = 0, rgn = &memblock_type->regions[0];			\
161 	     i < memblock_type->cnt;					\
162 	     i++, rgn = &memblock_type->regions[i])
163 
164 #define memblock_dbg(fmt, ...)						\
165 	do {								\
166 		if (memblock_debug)					\
167 			pr_info(fmt, ##__VA_ARGS__);			\
168 	} while (0)
169 
170 static int memblock_debug __initdata_memblock;
171 static bool system_has_some_mirror __initdata_memblock;
172 static int memblock_can_resize __initdata_memblock;
173 static int memblock_memory_in_slab __initdata_memblock;
174 static int memblock_reserved_in_slab __initdata_memblock;
175 
176 bool __init_memblock memblock_has_mirror(void)
177 {
178 	return system_has_some_mirror;
179 }
180 
181 static enum memblock_flags __init_memblock choose_memblock_flags(void)
182 {
183 	/* skip non-scratch memory for kho early boot allocations */
184 	if (kho_scratch_only)
185 		return MEMBLOCK_KHO_SCRATCH;
186 
187 	return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
188 }
189 
190 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
191 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
192 {
193 	return *size = min(*size, PHYS_ADDR_MAX - base);
194 }
195 
196 /*
197  * Address comparison utilities
198  */
199 unsigned long __init_memblock
200 memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, phys_addr_t base2,
201 		       phys_addr_t size2)
202 {
203 	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
204 }
205 
206 bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
207 					phys_addr_t base, phys_addr_t size)
208 {
209 	unsigned long i;
210 
211 	memblock_cap_size(base, &size);
212 
213 	for (i = 0; i < type->cnt; i++)
214 		if (memblock_addrs_overlap(base, size, type->regions[i].base,
215 					   type->regions[i].size))
216 			return true;
217 	return false;
218 }
219 
220 /**
221  * __memblock_find_range_bottom_up - find free area utility in bottom-up
222  * @start: start of candidate range
223  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
224  *       %MEMBLOCK_ALLOC_ACCESSIBLE
225  * @size: size of free area to find
226  * @align: alignment of free area to find
227  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
228  * @flags: pick from blocks based on memory attributes
229  *
230  * Utility called from memblock_find_in_range_node(), find free area bottom-up.
231  *
232  * Return:
233  * Found address on success, 0 on failure.
234  */
235 static phys_addr_t __init_memblock
236 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
237 				phys_addr_t size, phys_addr_t align, int nid,
238 				enum memblock_flags flags)
239 {
240 	phys_addr_t this_start, this_end, cand;
241 	u64 i;
242 
243 	for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
244 		this_start = clamp(this_start, start, end);
245 		this_end = clamp(this_end, start, end);
246 
247 		cand = round_up(this_start, align);
248 		if (cand < this_end && this_end - cand >= size)
249 			return cand;
250 	}
251 
252 	return 0;
253 }
254 
255 /**
256  * __memblock_find_range_top_down - find free area utility, in top-down
257  * @start: start of candidate range
258  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
259  *       %MEMBLOCK_ALLOC_ACCESSIBLE
260  * @size: size of free area to find
261  * @align: alignment of free area to find
262  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
263  * @flags: pick from blocks based on memory attributes
264  *
265  * Utility called from memblock_find_in_range_node(), find free area top-down.
266  *
267  * Return:
268  * Found address on success, 0 on failure.
269  */
270 static phys_addr_t __init_memblock
271 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
272 			       phys_addr_t size, phys_addr_t align, int nid,
273 			       enum memblock_flags flags)
274 {
275 	phys_addr_t this_start, this_end, cand;
276 	u64 i;
277 
278 	for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
279 					NULL) {
280 		this_start = clamp(this_start, start, end);
281 		this_end = clamp(this_end, start, end);
282 
283 		if (this_end < size)
284 			continue;
285 
286 		cand = round_down(this_end - size, align);
287 		if (cand >= this_start)
288 			return cand;
289 	}
290 
291 	return 0;
292 }
293 
294 /**
295  * memblock_find_in_range_node - find free area in given range and node
296  * @size: size of free area to find
297  * @align: alignment of free area to find
298  * @start: start of candidate range
299  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
300  *       %MEMBLOCK_ALLOC_ACCESSIBLE
301  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
302  * @flags: pick from blocks based on memory attributes
303  *
304  * Find @size free area aligned to @align in the specified range and node.
305  *
306  * Return:
307  * Found address on success, 0 on failure.
308  */
309 static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
310 					phys_addr_t align, phys_addr_t start,
311 					phys_addr_t end, int nid,
312 					enum memblock_flags flags)
313 {
314 	/* pump up @end */
315 	if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
316 	    end == MEMBLOCK_ALLOC_NOLEAKTRACE)
317 		end = memblock.current_limit;
318 
319 	/* avoid allocating the first page */
320 	start = max_t(phys_addr_t, start, PAGE_SIZE);
321 	end = max(start, end);
322 
323 	if (memblock_bottom_up())
324 		return __memblock_find_range_bottom_up(start, end, size, align,
325 						       nid, flags);
326 	else
327 		return __memblock_find_range_top_down(start, end, size, align,
328 						      nid, flags);
329 }
330 
331 /**
332  * memblock_find_in_range - find free area in given range
333  * @start: start of candidate range
334  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
335  *       %MEMBLOCK_ALLOC_ACCESSIBLE
336  * @size: size of free area to find
337  * @align: alignment of free area to find
338  *
339  * Find @size free area aligned to @align in the specified range.
340  *
341  * Return:
342  * Found address on success, 0 on failure.
343  */
344 static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
345 					phys_addr_t end, phys_addr_t size,
346 					phys_addr_t align)
347 {
348 	phys_addr_t ret;
349 	enum memblock_flags flags = choose_memblock_flags();
350 
351 again:
352 	ret = memblock_find_in_range_node(size, align, start, end,
353 					    NUMA_NO_NODE, flags);
354 
355 	if (!ret && (flags & MEMBLOCK_MIRROR)) {
356 		pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
357 			&size);
358 		flags &= ~MEMBLOCK_MIRROR;
359 		goto again;
360 	}
361 
362 	return ret;
363 }
364 
365 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
366 {
367 	type->total_size -= type->regions[r].size;
368 	memmove(&type->regions[r], &type->regions[r + 1],
369 		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
370 	type->cnt--;
371 
372 	/* Special case for empty arrays */
373 	if (type->cnt == 0) {
374 		WARN_ON(type->total_size != 0);
375 		type->regions[0].base = 0;
376 		type->regions[0].size = 0;
377 		type->regions[0].flags = 0;
378 		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
379 	}
380 }
381 
382 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
383 /**
384  * memblock_discard - discard memory and reserved arrays if they were allocated
385  */
386 void __init memblock_discard(void)
387 {
388 	phys_addr_t addr, size;
389 
390 	if (memblock.reserved.regions != memblock_reserved_init_regions) {
391 		addr = __pa(memblock.reserved.regions);
392 		size = PAGE_ALIGN(sizeof(struct memblock_region) *
393 				  memblock.reserved.max);
394 		if (memblock_reserved_in_slab)
395 			kfree(memblock.reserved.regions);
396 		else
397 			memblock_free_late(addr, size);
398 	}
399 
400 	if (memblock.memory.regions != memblock_memory_init_regions) {
401 		addr = __pa(memblock.memory.regions);
402 		size = PAGE_ALIGN(sizeof(struct memblock_region) *
403 				  memblock.memory.max);
404 		if (memblock_memory_in_slab)
405 			kfree(memblock.memory.regions);
406 		else
407 			memblock_free_late(addr, size);
408 	}
409 
410 	memblock_memory = NULL;
411 }
412 #endif
413 
414 /**
415  * memblock_double_array - double the size of the memblock regions array
416  * @type: memblock type of the regions array being doubled
417  * @new_area_start: starting address of memory range to avoid overlap with
418  * @new_area_size: size of memory range to avoid overlap with
419  *
420  * Double the size of the @type regions array. If memblock is being used to
421  * allocate memory for a new reserved regions array and there is a previously
422  * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
423  * waiting to be reserved, ensure the memory used by the new array does
424  * not overlap.
425  *
426  * Return:
427  * 0 on success, -1 on failure.
428  */
429 static int __init_memblock memblock_double_array(struct memblock_type *type,
430 						phys_addr_t new_area_start,
431 						phys_addr_t new_area_size)
432 {
433 	struct memblock_region *new_array, *old_array;
434 	phys_addr_t old_alloc_size, new_alloc_size;
435 	phys_addr_t old_size, new_size, addr, new_end;
436 	int use_slab = slab_is_available();
437 	int *in_slab;
438 
439 	/* We don't allow resizing until we know about the reserved regions
440 	 * of memory that aren't suitable for allocation
441 	 */
442 	if (!memblock_can_resize)
443 		panic("memblock: cannot resize %s array\n", type->name);
444 
445 	/* Calculate new doubled size */
446 	old_size = type->max * sizeof(struct memblock_region);
447 	new_size = old_size << 1;
448 	/*
449 	 * We need to allocated new one align to PAGE_SIZE,
450 	 *   so we can free them completely later.
451 	 */
452 	old_alloc_size = PAGE_ALIGN(old_size);
453 	new_alloc_size = PAGE_ALIGN(new_size);
454 
455 	/* Retrieve the slab flag */
456 	if (type == &memblock.memory)
457 		in_slab = &memblock_memory_in_slab;
458 	else
459 		in_slab = &memblock_reserved_in_slab;
460 
461 	/* Try to find some space for it */
462 	if (use_slab) {
463 		new_array = kmalloc(new_size, GFP_KERNEL);
464 		addr = new_array ? __pa(new_array) : 0;
465 	} else {
466 		/* only exclude range when trying to double reserved.regions */
467 		if (type != &memblock.reserved)
468 			new_area_start = new_area_size = 0;
469 
470 		addr = memblock_find_in_range(new_area_start + new_area_size,
471 						memblock.current_limit,
472 						new_alloc_size, PAGE_SIZE);
473 		if (!addr && new_area_size)
474 			addr = memblock_find_in_range(0,
475 				min(new_area_start, memblock.current_limit),
476 				new_alloc_size, PAGE_SIZE);
477 
478 		if (addr) {
479 			/* The memory may not have been accepted, yet. */
480 			accept_memory(addr, new_alloc_size);
481 
482 			new_array = __va(addr);
483 		} else {
484 			new_array = NULL;
485 		}
486 	}
487 	if (!addr) {
488 		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
489 		       type->name, type->max, type->max * 2);
490 		return -1;
491 	}
492 
493 	new_end = addr + new_size - 1;
494 	memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
495 			type->name, type->max * 2, &addr, &new_end);
496 
497 	/*
498 	 * Found space, we now need to move the array over before we add the
499 	 * reserved region since it may be our reserved array itself that is
500 	 * full.
501 	 */
502 	memcpy(new_array, type->regions, old_size);
503 	memset(new_array + type->max, 0, old_size);
504 	old_array = type->regions;
505 	type->regions = new_array;
506 	type->max <<= 1;
507 
508 	/* Free old array. We needn't free it if the array is the static one */
509 	if (*in_slab)
510 		kfree(old_array);
511 	else if (old_array != memblock_memory_init_regions &&
512 		 old_array != memblock_reserved_init_regions)
513 		memblock_free(old_array, old_alloc_size);
514 
515 	/*
516 	 * Reserve the new array if that comes from the memblock.  Otherwise, we
517 	 * needn't do it
518 	 */
519 	if (!use_slab)
520 		BUG_ON(memblock_reserve_kern(addr, new_alloc_size));
521 
522 	/* Update slab flag */
523 	*in_slab = use_slab;
524 
525 	return 0;
526 }
527 
528 /**
529  * memblock_merge_regions - merge neighboring compatible regions
530  * @type: memblock type to scan
531  * @start_rgn: start scanning from (@start_rgn - 1)
532  * @end_rgn: end scanning at (@end_rgn - 1)
533  * Scan @type and merge neighboring compatible regions in [@start_rgn - 1, @end_rgn)
534  */
535 static void __init_memblock memblock_merge_regions(struct memblock_type *type,
536 						   unsigned long start_rgn,
537 						   unsigned long end_rgn)
538 {
539 	int i = 0;
540 	if (start_rgn)
541 		i = start_rgn - 1;
542 	end_rgn = min(end_rgn, type->cnt - 1);
543 	while (i < end_rgn) {
544 		struct memblock_region *this = &type->regions[i];
545 		struct memblock_region *next = &type->regions[i + 1];
546 
547 		if (this->base + this->size != next->base ||
548 		    memblock_get_region_node(this) !=
549 		    memblock_get_region_node(next) ||
550 		    this->flags != next->flags) {
551 			BUG_ON(this->base + this->size > next->base);
552 			i++;
553 			continue;
554 		}
555 
556 		this->size += next->size;
557 		/* move forward from next + 1, index of which is i + 2 */
558 		memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
559 		type->cnt--;
560 		end_rgn--;
561 	}
562 }
563 
564 /**
565  * memblock_insert_region - insert new memblock region
566  * @type:	memblock type to insert into
567  * @idx:	index for the insertion point
568  * @base:	base address of the new region
569  * @size:	size of the new region
570  * @nid:	node id of the new region
571  * @flags:	flags of the new region
572  *
573  * Insert new memblock region [@base, @base + @size) into @type at @idx.
574  * @type must already have extra room to accommodate the new region.
575  */
576 static void __init_memblock memblock_insert_region(struct memblock_type *type,
577 						   int idx, phys_addr_t base,
578 						   phys_addr_t size,
579 						   int nid,
580 						   enum memblock_flags flags)
581 {
582 	struct memblock_region *rgn = &type->regions[idx];
583 
584 	BUG_ON(type->cnt >= type->max);
585 	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
586 	rgn->base = base;
587 	rgn->size = size;
588 	rgn->flags = flags;
589 	memblock_set_region_node(rgn, nid);
590 	type->cnt++;
591 	type->total_size += size;
592 }
593 
594 /**
595  * memblock_add_range - add new memblock region
596  * @type: memblock type to add new region into
597  * @base: base address of the new region
598  * @size: size of the new region
599  * @nid: nid of the new region
600  * @flags: flags of the new region
601  *
602  * Add new memblock region [@base, @base + @size) into @type.  The new region
603  * is allowed to overlap with existing ones - overlaps don't affect already
604  * existing regions.  @type is guaranteed to be minimal (all neighbouring
605  * compatible regions are merged) after the addition.
606  *
607  * Return:
608  * 0 on success, -errno on failure.
609  */
610 static int __init_memblock memblock_add_range(struct memblock_type *type,
611 				phys_addr_t base, phys_addr_t size,
612 				int nid, enum memblock_flags flags)
613 {
614 	bool insert = false;
615 	phys_addr_t obase = base;
616 	phys_addr_t end = base + memblock_cap_size(base, &size);
617 	int idx, nr_new, start_rgn = -1, end_rgn;
618 	struct memblock_region *rgn;
619 
620 	if (!size)
621 		return 0;
622 
623 	/* special case for empty array */
624 	if (type->regions[0].size == 0) {
625 		WARN_ON(type->cnt != 0 || type->total_size);
626 		type->regions[0].base = base;
627 		type->regions[0].size = size;
628 		type->regions[0].flags = flags;
629 		memblock_set_region_node(&type->regions[0], nid);
630 		type->total_size = size;
631 		type->cnt = 1;
632 		return 0;
633 	}
634 
635 	/*
636 	 * The worst case is when new range overlaps all existing regions,
637 	 * then we'll need type->cnt + 1 empty regions in @type. So if
638 	 * type->cnt * 2 + 1 is less than or equal to type->max, we know
639 	 * that there is enough empty regions in @type, and we can insert
640 	 * regions directly.
641 	 */
642 	if (type->cnt * 2 + 1 <= type->max)
643 		insert = true;
644 
645 repeat:
646 	/*
647 	 * The following is executed twice.  Once with %false @insert and
648 	 * then with %true.  The first counts the number of regions needed
649 	 * to accommodate the new area.  The second actually inserts them.
650 	 */
651 	base = obase;
652 	nr_new = 0;
653 
654 	for_each_memblock_type(idx, type, rgn) {
655 		phys_addr_t rbase = rgn->base;
656 		phys_addr_t rend = rbase + rgn->size;
657 
658 		if (rbase >= end)
659 			break;
660 		if (rend <= base)
661 			continue;
662 		/*
663 		 * @rgn overlaps.  If it separates the lower part of new
664 		 * area, insert that portion.
665 		 */
666 		if (rbase > base) {
667 #ifdef CONFIG_NUMA
668 			WARN_ON(nid != memblock_get_region_node(rgn));
669 #endif
670 			WARN_ON(flags != MEMBLOCK_NONE && flags != rgn->flags);
671 			nr_new++;
672 			if (insert) {
673 				if (start_rgn == -1)
674 					start_rgn = idx;
675 				end_rgn = idx + 1;
676 				memblock_insert_region(type, idx++, base,
677 						       rbase - base, nid,
678 						       flags);
679 			}
680 		}
681 		/* area below @rend is dealt with, forget about it */
682 		base = min(rend, end);
683 	}
684 
685 	/* insert the remaining portion */
686 	if (base < end) {
687 		nr_new++;
688 		if (insert) {
689 			if (start_rgn == -1)
690 				start_rgn = idx;
691 			end_rgn = idx + 1;
692 			memblock_insert_region(type, idx, base, end - base,
693 					       nid, flags);
694 		}
695 	}
696 
697 	if (!nr_new)
698 		return 0;
699 
700 	/*
701 	 * If this was the first round, resize array and repeat for actual
702 	 * insertions; otherwise, merge and return.
703 	 */
704 	if (!insert) {
705 		while (type->cnt + nr_new > type->max)
706 			if (memblock_double_array(type, obase, size) < 0)
707 				return -ENOMEM;
708 		insert = true;
709 		goto repeat;
710 	} else {
711 		memblock_merge_regions(type, start_rgn, end_rgn);
712 		return 0;
713 	}
714 }
715 
716 /**
717  * memblock_add_node - add new memblock region within a NUMA node
718  * @base: base address of the new region
719  * @size: size of the new region
720  * @nid: nid of the new region
721  * @flags: flags of the new region
722  *
723  * Add new memblock region [@base, @base + @size) to the "memory"
724  * type. See memblock_add_range() description for mode details
725  *
726  * Return:
727  * 0 on success, -errno on failure.
728  */
729 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
730 				      int nid, enum memblock_flags flags)
731 {
732 	phys_addr_t end = base + size - 1;
733 
734 	memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__,
735 		     &base, &end, nid, flags, (void *)_RET_IP_);
736 
737 	return memblock_add_range(&memblock.memory, base, size, nid, flags);
738 }
739 
740 /**
741  * memblock_add - add new memblock region
742  * @base: base address of the new region
743  * @size: size of the new region
744  *
745  * Add new memblock region [@base, @base + @size) to the "memory"
746  * type. See memblock_add_range() description for mode details
747  *
748  * Return:
749  * 0 on success, -errno on failure.
750  */
751 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
752 {
753 	phys_addr_t end = base + size - 1;
754 
755 	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
756 		     &base, &end, (void *)_RET_IP_);
757 
758 	return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
759 }
760 
761 /**
762  * memblock_validate_numa_coverage - check if amount of memory with
763  * no node ID assigned is less than a threshold
764  * @threshold_bytes: maximal memory size that can have unassigned node
765  * ID (in bytes).
766  *
767  * A buggy firmware may report memory that does not belong to any node.
768  * Check if amount of such memory is below @threshold_bytes.
769  *
770  * Return: true on success, false on failure.
771  */
772 bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_bytes)
773 {
774 	unsigned long nr_pages = 0;
775 	unsigned long start_pfn, end_pfn, mem_size_mb;
776 	int nid, i;
777 
778 	/* calculate lost page */
779 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
780 		if (!numa_valid_node(nid))
781 			nr_pages += end_pfn - start_pfn;
782 	}
783 
784 	if ((nr_pages << PAGE_SHIFT) > threshold_bytes) {
785 		mem_size_mb = memblock_phys_mem_size() / SZ_1M;
786 		pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n",
787 		       (nr_pages << PAGE_SHIFT) / SZ_1M, mem_size_mb);
788 		return false;
789 	}
790 
791 	return true;
792 }
793 
794 
795 /**
796  * memblock_isolate_range - isolate given range into disjoint memblocks
797  * @type: memblock type to isolate range for
798  * @base: base of range to isolate
799  * @size: size of range to isolate
800  * @start_rgn: out parameter for the start of isolated region
801  * @end_rgn: out parameter for the end of isolated region
802  *
803  * Walk @type and ensure that regions don't cross the boundaries defined by
804  * [@base, @base + @size).  Crossing regions are split at the boundaries,
805  * which may create at most two more regions.  The index of the first
806  * region inside the range is returned in *@start_rgn and the index of the
807  * first region after the range is returned in *@end_rgn.
808  *
809  * Return:
810  * 0 on success, -errno on failure.
811  */
812 static int __init_memblock memblock_isolate_range(struct memblock_type *type,
813 					phys_addr_t base, phys_addr_t size,
814 					int *start_rgn, int *end_rgn)
815 {
816 	phys_addr_t end = base + memblock_cap_size(base, &size);
817 	int idx;
818 	struct memblock_region *rgn;
819 
820 	*start_rgn = *end_rgn = 0;
821 
822 	if (!size)
823 		return 0;
824 
825 	/* we'll create at most two more regions */
826 	while (type->cnt + 2 > type->max)
827 		if (memblock_double_array(type, base, size) < 0)
828 			return -ENOMEM;
829 
830 	for_each_memblock_type(idx, type, rgn) {
831 		phys_addr_t rbase = rgn->base;
832 		phys_addr_t rend = rbase + rgn->size;
833 
834 		if (rbase >= end)
835 			break;
836 		if (rend <= base)
837 			continue;
838 
839 		if (rbase < base) {
840 			/*
841 			 * @rgn intersects from below.  Split and continue
842 			 * to process the next region - the new top half.
843 			 */
844 			rgn->base = base;
845 			rgn->size -= base - rbase;
846 			type->total_size -= base - rbase;
847 			memblock_insert_region(type, idx, rbase, base - rbase,
848 					       memblock_get_region_node(rgn),
849 					       rgn->flags);
850 		} else if (rend > end) {
851 			/*
852 			 * @rgn intersects from above.  Split and redo the
853 			 * current region - the new bottom half.
854 			 */
855 			rgn->base = end;
856 			rgn->size -= end - rbase;
857 			type->total_size -= end - rbase;
858 			memblock_insert_region(type, idx--, rbase, end - rbase,
859 					       memblock_get_region_node(rgn),
860 					       rgn->flags);
861 		} else {
862 			/* @rgn is fully contained, record it */
863 			if (!*end_rgn)
864 				*start_rgn = idx;
865 			*end_rgn = idx + 1;
866 		}
867 	}
868 
869 	return 0;
870 }
871 
872 static int __init_memblock memblock_remove_range(struct memblock_type *type,
873 					  phys_addr_t base, phys_addr_t size)
874 {
875 	int start_rgn, end_rgn;
876 	int i, ret;
877 
878 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
879 	if (ret)
880 		return ret;
881 
882 	for (i = end_rgn - 1; i >= start_rgn; i--)
883 		memblock_remove_region(type, i);
884 	return 0;
885 }
886 
887 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
888 {
889 	phys_addr_t end = base + size - 1;
890 
891 	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
892 		     &base, &end, (void *)_RET_IP_);
893 
894 	return memblock_remove_range(&memblock.memory, base, size);
895 }
896 
897 /**
898  * memblock_free - free boot memory allocation
899  * @ptr: starting address of the  boot memory allocation
900  * @size: size of the boot memory block in bytes
901  *
902  * Free boot memory block previously allocated by memblock_alloc_xx() API.
903  * The freeing memory will not be released to the buddy allocator.
904  */
905 void __init_memblock memblock_free(void *ptr, size_t size)
906 {
907 	if (ptr)
908 		memblock_phys_free(__pa(ptr), size);
909 }
910 
911 /**
912  * memblock_phys_free - free boot memory block
913  * @base: phys starting address of the  boot memory block
914  * @size: size of the boot memory block in bytes
915  *
916  * Free boot memory block previously allocated by memblock_phys_alloc_xx() API.
917  * The freeing memory will not be released to the buddy allocator.
918  */
919 int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size)
920 {
921 	phys_addr_t end = base + size - 1;
922 
923 	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
924 		     &base, &end, (void *)_RET_IP_);
925 
926 	kmemleak_free_part_phys(base, size);
927 	return memblock_remove_range(&memblock.reserved, base, size);
928 }
929 
930 int __init_memblock __memblock_reserve(phys_addr_t base, phys_addr_t size,
931 				       int nid, enum memblock_flags flags)
932 {
933 	phys_addr_t end = base + size - 1;
934 
935 	memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__,
936 		     &base, &end, nid, flags, (void *)_RET_IP_);
937 
938 	return memblock_add_range(&memblock.reserved, base, size, nid, flags);
939 }
940 
941 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
942 int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
943 {
944 	phys_addr_t end = base + size - 1;
945 
946 	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
947 		     &base, &end, (void *)_RET_IP_);
948 
949 	return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
950 }
951 #endif
952 
953 #ifdef CONFIG_MEMBLOCK_KHO_SCRATCH
954 __init void memblock_set_kho_scratch_only(void)
955 {
956 	kho_scratch_only = true;
957 }
958 
959 __init void memblock_clear_kho_scratch_only(void)
960 {
961 	kho_scratch_only = false;
962 }
963 
964 __init void memmap_init_kho_scratch_pages(void)
965 {
966 	phys_addr_t start, end;
967 	unsigned long pfn;
968 	int nid;
969 	u64 i;
970 
971 	if (!IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT))
972 		return;
973 
974 	/*
975 	 * Initialize struct pages for free scratch memory.
976 	 * The struct pages for reserved scratch memory will be set up in
977 	 * memmap_init_reserved_pages()
978 	 */
979 	__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
980 			     MEMBLOCK_KHO_SCRATCH, &start, &end, &nid) {
981 		for (pfn = PFN_UP(start); pfn < PFN_DOWN(end); pfn++)
982 			init_deferred_page(pfn, nid);
983 	}
984 }
985 #endif
986 
987 /**
988  * memblock_setclr_flag - set or clear flag for a memory region
989  * @type: memblock type to set/clear flag for
990  * @base: base address of the region
991  * @size: size of the region
992  * @set: set or clear the flag
993  * @flag: the flag to update
994  *
995  * This function isolates region [@base, @base + @size), and sets/clears flag
996  *
997  * Return: 0 on success, -errno on failure.
998  */
999 static int __init_memblock memblock_setclr_flag(struct memblock_type *type,
1000 				phys_addr_t base, phys_addr_t size, int set, int flag)
1001 {
1002 	int i, ret, start_rgn, end_rgn;
1003 
1004 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1005 	if (ret)
1006 		return ret;
1007 
1008 	for (i = start_rgn; i < end_rgn; i++) {
1009 		struct memblock_region *r = &type->regions[i];
1010 
1011 		if (set)
1012 			r->flags |= flag;
1013 		else
1014 			r->flags &= ~flag;
1015 	}
1016 
1017 	memblock_merge_regions(type, start_rgn, end_rgn);
1018 	return 0;
1019 }
1020 
1021 /**
1022  * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
1023  * @base: the base phys addr of the region
1024  * @size: the size of the region
1025  *
1026  * Return: 0 on success, -errno on failure.
1027  */
1028 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
1029 {
1030 	return memblock_setclr_flag(&memblock.memory, base, size, 1, MEMBLOCK_HOTPLUG);
1031 }
1032 
1033 /**
1034  * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
1035  * @base: the base phys addr of the region
1036  * @size: the size of the region
1037  *
1038  * Return: 0 on success, -errno on failure.
1039  */
1040 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
1041 {
1042 	return memblock_setclr_flag(&memblock.memory, base, size, 0, MEMBLOCK_HOTPLUG);
1043 }
1044 
1045 /**
1046  * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
1047  * @base: the base phys addr of the region
1048  * @size: the size of the region
1049  *
1050  * Return: 0 on success, -errno on failure.
1051  */
1052 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
1053 {
1054 	if (!mirrored_kernelcore)
1055 		return 0;
1056 
1057 	system_has_some_mirror = true;
1058 
1059 	return memblock_setclr_flag(&memblock.memory, base, size, 1, MEMBLOCK_MIRROR);
1060 }
1061 
1062 /**
1063  * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
1064  * @base: the base phys addr of the region
1065  * @size: the size of the region
1066  *
1067  * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the
1068  * direct mapping of the physical memory. These regions will still be
1069  * covered by the memory map. The struct page representing NOMAP memory
1070  * frames in the memory map will be PageReserved()
1071  *
1072  * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from
1073  * memblock, the caller must inform kmemleak to ignore that memory
1074  *
1075  * Return: 0 on success, -errno on failure.
1076  */
1077 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
1078 {
1079 	return memblock_setclr_flag(&memblock.memory, base, size, 1, MEMBLOCK_NOMAP);
1080 }
1081 
1082 /**
1083  * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
1084  * @base: the base phys addr of the region
1085  * @size: the size of the region
1086  *
1087  * Return: 0 on success, -errno on failure.
1088  */
1089 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
1090 {
1091 	return memblock_setclr_flag(&memblock.memory, base, size, 0, MEMBLOCK_NOMAP);
1092 }
1093 
1094 /**
1095  * memblock_reserved_mark_noinit - Mark a reserved memory region with flag
1096  * MEMBLOCK_RSRV_NOINIT
1097  *
1098  * @base: the base phys addr of the region
1099  * @size: the size of the region
1100  *
1101  * The struct pages for the reserved regions marked %MEMBLOCK_RSRV_NOINIT will
1102  * not be fully initialized to allow the caller optimize their initialization.
1103  *
1104  * When %CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled, setting this flag
1105  * completely bypasses the initialization of struct pages for such region.
1106  *
1107  * When %CONFIG_DEFERRED_STRUCT_PAGE_INIT is disabled, struct pages in this
1108  * region will be initialized with default values but won't be marked as
1109  * reserved.
1110  *
1111  * Return: 0 on success, -errno on failure.
1112  */
1113 int __init_memblock memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size)
1114 {
1115 	return memblock_setclr_flag(&memblock.reserved, base, size, 1,
1116 				    MEMBLOCK_RSRV_NOINIT);
1117 }
1118 
1119 /**
1120  * memblock_mark_kho_scratch - Mark a memory region as MEMBLOCK_KHO_SCRATCH.
1121  * @base: the base phys addr of the region
1122  * @size: the size of the region
1123  *
1124  * Only memory regions marked with %MEMBLOCK_KHO_SCRATCH will be considered
1125  * for allocations during early boot with kexec handover.
1126  *
1127  * Return: 0 on success, -errno on failure.
1128  */
1129 __init int memblock_mark_kho_scratch(phys_addr_t base, phys_addr_t size)
1130 {
1131 	return memblock_setclr_flag(&memblock.memory, base, size, 1,
1132 				    MEMBLOCK_KHO_SCRATCH);
1133 }
1134 
1135 /**
1136  * memblock_clear_kho_scratch - Clear MEMBLOCK_KHO_SCRATCH flag for a
1137  * specified region.
1138  * @base: the base phys addr of the region
1139  * @size: the size of the region
1140  *
1141  * Return: 0 on success, -errno on failure.
1142  */
1143 __init int memblock_clear_kho_scratch(phys_addr_t base, phys_addr_t size)
1144 {
1145 	return memblock_setclr_flag(&memblock.memory, base, size, 0,
1146 				    MEMBLOCK_KHO_SCRATCH);
1147 }
1148 
1149 static bool should_skip_region(struct memblock_type *type,
1150 			       struct memblock_region *m,
1151 			       int nid, int flags)
1152 {
1153 	int m_nid = memblock_get_region_node(m);
1154 
1155 	/* we never skip regions when iterating memblock.reserved or physmem */
1156 	if (type != memblock_memory)
1157 		return false;
1158 
1159 	/* only memory regions are associated with nodes, check it */
1160 	if (numa_valid_node(nid) && nid != m_nid)
1161 		return true;
1162 
1163 	/* skip hotpluggable memory regions if needed */
1164 	if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
1165 	    !(flags & MEMBLOCK_HOTPLUG))
1166 		return true;
1167 
1168 	/* if we want mirror memory skip non-mirror memory regions */
1169 	if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1170 		return true;
1171 
1172 	/* skip nomap memory unless we were asked for it explicitly */
1173 	if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1174 		return true;
1175 
1176 	/* skip driver-managed memory unless we were asked for it explicitly */
1177 	if (!(flags & MEMBLOCK_DRIVER_MANAGED) && memblock_is_driver_managed(m))
1178 		return true;
1179 
1180 	/*
1181 	 * In early alloc during kexec handover, we can only consider
1182 	 * MEMBLOCK_KHO_SCRATCH regions for the allocations
1183 	 */
1184 	if ((flags & MEMBLOCK_KHO_SCRATCH) && !memblock_is_kho_scratch(m))
1185 		return true;
1186 
1187 	return false;
1188 }
1189 
1190 /**
1191  * __next_mem_range - next function for for_each_free_mem_range() etc.
1192  * @idx: pointer to u64 loop variable
1193  * @nid: node selector, %NUMA_NO_NODE for all nodes
1194  * @flags: pick from blocks based on memory attributes
1195  * @type_a: pointer to memblock_type from where the range is taken
1196  * @type_b: pointer to memblock_type which excludes memory from being taken
1197  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1198  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1199  * @out_nid: ptr to int for nid of the range, can be %NULL
1200  *
1201  * Find the first area from *@idx which matches @nid, fill the out
1202  * parameters, and update *@idx for the next iteration.  The lower 32bit of
1203  * *@idx contains index into type_a and the upper 32bit indexes the
1204  * areas before each region in type_b.	For example, if type_b regions
1205  * look like the following,
1206  *
1207  *	0:[0-16), 1:[32-48), 2:[128-130)
1208  *
1209  * The upper 32bit indexes the following regions.
1210  *
1211  *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
1212  *
1213  * As both region arrays are sorted, the function advances the two indices
1214  * in lockstep and returns each intersection.
1215  */
1216 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
1217 		      struct memblock_type *type_a,
1218 		      struct memblock_type *type_b, phys_addr_t *out_start,
1219 		      phys_addr_t *out_end, int *out_nid)
1220 {
1221 	int idx_a = *idx & 0xffffffff;
1222 	int idx_b = *idx >> 32;
1223 
1224 	for (; idx_a < type_a->cnt; idx_a++) {
1225 		struct memblock_region *m = &type_a->regions[idx_a];
1226 
1227 		phys_addr_t m_start = m->base;
1228 		phys_addr_t m_end = m->base + m->size;
1229 		int	    m_nid = memblock_get_region_node(m);
1230 
1231 		if (should_skip_region(type_a, m, nid, flags))
1232 			continue;
1233 
1234 		if (!type_b) {
1235 			if (out_start)
1236 				*out_start = m_start;
1237 			if (out_end)
1238 				*out_end = m_end;
1239 			if (out_nid)
1240 				*out_nid = m_nid;
1241 			idx_a++;
1242 			*idx = (u32)idx_a | (u64)idx_b << 32;
1243 			return;
1244 		}
1245 
1246 		/* scan areas before each reservation */
1247 		for (; idx_b < type_b->cnt + 1; idx_b++) {
1248 			struct memblock_region *r;
1249 			phys_addr_t r_start;
1250 			phys_addr_t r_end;
1251 
1252 			r = &type_b->regions[idx_b];
1253 			r_start = idx_b ? r[-1].base + r[-1].size : 0;
1254 			r_end = idx_b < type_b->cnt ?
1255 				r->base : PHYS_ADDR_MAX;
1256 
1257 			/*
1258 			 * if idx_b advanced past idx_a,
1259 			 * break out to advance idx_a
1260 			 */
1261 			if (r_start >= m_end)
1262 				break;
1263 			/* if the two regions intersect, we're done */
1264 			if (m_start < r_end) {
1265 				if (out_start)
1266 					*out_start =
1267 						max(m_start, r_start);
1268 				if (out_end)
1269 					*out_end = min(m_end, r_end);
1270 				if (out_nid)
1271 					*out_nid = m_nid;
1272 				/*
1273 				 * The region which ends first is
1274 				 * advanced for the next iteration.
1275 				 */
1276 				if (m_end <= r_end)
1277 					idx_a++;
1278 				else
1279 					idx_b++;
1280 				*idx = (u32)idx_a | (u64)idx_b << 32;
1281 				return;
1282 			}
1283 		}
1284 	}
1285 
1286 	/* signal end of iteration */
1287 	*idx = ULLONG_MAX;
1288 }
1289 
1290 /**
1291  * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1292  *
1293  * @idx: pointer to u64 loop variable
1294  * @nid: node selector, %NUMA_NO_NODE for all nodes
1295  * @flags: pick from blocks based on memory attributes
1296  * @type_a: pointer to memblock_type from where the range is taken
1297  * @type_b: pointer to memblock_type which excludes memory from being taken
1298  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1299  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1300  * @out_nid: ptr to int for nid of the range, can be %NULL
1301  *
1302  * Finds the next range from type_a which is not marked as unsuitable
1303  * in type_b.
1304  *
1305  * Reverse of __next_mem_range().
1306  */
1307 void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1308 					  enum memblock_flags flags,
1309 					  struct memblock_type *type_a,
1310 					  struct memblock_type *type_b,
1311 					  phys_addr_t *out_start,
1312 					  phys_addr_t *out_end, int *out_nid)
1313 {
1314 	int idx_a = *idx & 0xffffffff;
1315 	int idx_b = *idx >> 32;
1316 
1317 	if (*idx == (u64)ULLONG_MAX) {
1318 		idx_a = type_a->cnt - 1;
1319 		if (type_b != NULL)
1320 			idx_b = type_b->cnt;
1321 		else
1322 			idx_b = 0;
1323 	}
1324 
1325 	for (; idx_a >= 0; idx_a--) {
1326 		struct memblock_region *m = &type_a->regions[idx_a];
1327 
1328 		phys_addr_t m_start = m->base;
1329 		phys_addr_t m_end = m->base + m->size;
1330 		int m_nid = memblock_get_region_node(m);
1331 
1332 		if (should_skip_region(type_a, m, nid, flags))
1333 			continue;
1334 
1335 		if (!type_b) {
1336 			if (out_start)
1337 				*out_start = m_start;
1338 			if (out_end)
1339 				*out_end = m_end;
1340 			if (out_nid)
1341 				*out_nid = m_nid;
1342 			idx_a--;
1343 			*idx = (u32)idx_a | (u64)idx_b << 32;
1344 			return;
1345 		}
1346 
1347 		/* scan areas before each reservation */
1348 		for (; idx_b >= 0; idx_b--) {
1349 			struct memblock_region *r;
1350 			phys_addr_t r_start;
1351 			phys_addr_t r_end;
1352 
1353 			r = &type_b->regions[idx_b];
1354 			r_start = idx_b ? r[-1].base + r[-1].size : 0;
1355 			r_end = idx_b < type_b->cnt ?
1356 				r->base : PHYS_ADDR_MAX;
1357 			/*
1358 			 * if idx_b advanced past idx_a,
1359 			 * break out to advance idx_a
1360 			 */
1361 
1362 			if (r_end <= m_start)
1363 				break;
1364 			/* if the two regions intersect, we're done */
1365 			if (m_end > r_start) {
1366 				if (out_start)
1367 					*out_start = max(m_start, r_start);
1368 				if (out_end)
1369 					*out_end = min(m_end, r_end);
1370 				if (out_nid)
1371 					*out_nid = m_nid;
1372 				if (m_start >= r_start)
1373 					idx_a--;
1374 				else
1375 					idx_b--;
1376 				*idx = (u32)idx_a | (u64)idx_b << 32;
1377 				return;
1378 			}
1379 		}
1380 	}
1381 	/* signal end of iteration */
1382 	*idx = ULLONG_MAX;
1383 }
1384 
1385 /*
1386  * Common iterator interface used to define for_each_mem_pfn_range().
1387  */
1388 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1389 				unsigned long *out_start_pfn,
1390 				unsigned long *out_end_pfn, int *out_nid)
1391 {
1392 	struct memblock_type *type = &memblock.memory;
1393 	struct memblock_region *r;
1394 	int r_nid;
1395 
1396 	while (++*idx < type->cnt) {
1397 		r = &type->regions[*idx];
1398 		r_nid = memblock_get_region_node(r);
1399 
1400 		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1401 			continue;
1402 		if (!numa_valid_node(nid) || nid == r_nid)
1403 			break;
1404 	}
1405 	if (*idx >= type->cnt) {
1406 		*idx = -1;
1407 		return;
1408 	}
1409 
1410 	if (out_start_pfn)
1411 		*out_start_pfn = PFN_UP(r->base);
1412 	if (out_end_pfn)
1413 		*out_end_pfn = PFN_DOWN(r->base + r->size);
1414 	if (out_nid)
1415 		*out_nid = r_nid;
1416 }
1417 
1418 /**
1419  * memblock_set_node - set node ID on memblock regions
1420  * @base: base of area to set node ID for
1421  * @size: size of area to set node ID for
1422  * @type: memblock type to set node ID for
1423  * @nid: node ID to set
1424  *
1425  * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1426  * Regions which cross the area boundaries are split as necessary.
1427  *
1428  * Return:
1429  * 0 on success, -errno on failure.
1430  */
1431 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1432 				      struct memblock_type *type, int nid)
1433 {
1434 #ifdef CONFIG_NUMA
1435 	int start_rgn, end_rgn;
1436 	int i, ret;
1437 
1438 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1439 	if (ret)
1440 		return ret;
1441 
1442 	for (i = start_rgn; i < end_rgn; i++)
1443 		memblock_set_region_node(&type->regions[i], nid);
1444 
1445 	memblock_merge_regions(type, start_rgn, end_rgn);
1446 #endif
1447 	return 0;
1448 }
1449 
1450 /**
1451  * memblock_alloc_range_nid - allocate boot memory block
1452  * @size: size of memory block to be allocated in bytes
1453  * @align: alignment of the region and block's size
1454  * @start: the lower bound of the memory region to allocate (phys address)
1455  * @end: the upper bound of the memory region to allocate (phys address)
1456  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1457  * @exact_nid: control the allocation fall back to other nodes
1458  *
1459  * The allocation is performed from memory region limited by
1460  * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1461  *
1462  * If the specified node can not hold the requested memory and @exact_nid
1463  * is false, the allocation falls back to any node in the system.
1464  *
1465  * For systems with memory mirroring, the allocation is attempted first
1466  * from the regions with mirroring enabled and then retried from any
1467  * memory region.
1468  *
1469  * In addition, function using kmemleak_alloc_phys for allocated boot
1470  * memory block, it is never reported as leaks.
1471  *
1472  * Return:
1473  * Physical address of allocated memory block on success, %0 on failure.
1474  */
1475 phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1476 					phys_addr_t align, phys_addr_t start,
1477 					phys_addr_t end, int nid,
1478 					bool exact_nid)
1479 {
1480 	enum memblock_flags flags = choose_memblock_flags();
1481 	phys_addr_t found;
1482 
1483 	/*
1484 	 * Detect any accidental use of these APIs after slab is ready, as at
1485 	 * this moment memblock may be deinitialized already and its
1486 	 * internal data may be destroyed (after execution of memblock_free_all)
1487 	 */
1488 	if (WARN_ON_ONCE(slab_is_available())) {
1489 		void *vaddr = kzalloc_node(size, GFP_NOWAIT, nid);
1490 
1491 		return vaddr ? virt_to_phys(vaddr) : 0;
1492 	}
1493 
1494 	if (!align) {
1495 		/* Can't use WARNs this early in boot on powerpc */
1496 		dump_stack();
1497 		align = SMP_CACHE_BYTES;
1498 	}
1499 
1500 again:
1501 	found = memblock_find_in_range_node(size, align, start, end, nid,
1502 					    flags);
1503 	if (found && !__memblock_reserve(found, size, nid, MEMBLOCK_RSRV_KERN))
1504 		goto done;
1505 
1506 	if (numa_valid_node(nid) && !exact_nid) {
1507 		found = memblock_find_in_range_node(size, align, start,
1508 						    end, NUMA_NO_NODE,
1509 						    flags);
1510 		if (found && !memblock_reserve_kern(found, size))
1511 			goto done;
1512 	}
1513 
1514 	if (flags & MEMBLOCK_MIRROR) {
1515 		flags &= ~MEMBLOCK_MIRROR;
1516 		pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
1517 			&size);
1518 		goto again;
1519 	}
1520 
1521 	return 0;
1522 
1523 done:
1524 	/*
1525 	 * Skip kmemleak for those places like kasan_init() and
1526 	 * early_pgtable_alloc() due to high volume.
1527 	 */
1528 	if (end != MEMBLOCK_ALLOC_NOLEAKTRACE)
1529 		/*
1530 		 * Memblock allocated blocks are never reported as
1531 		 * leaks. This is because many of these blocks are
1532 		 * only referred via the physical address which is
1533 		 * not looked up by kmemleak.
1534 		 */
1535 		kmemleak_alloc_phys(found, size, 0);
1536 
1537 	/*
1538 	 * Some Virtual Machine platforms, such as Intel TDX or AMD SEV-SNP,
1539 	 * require memory to be accepted before it can be used by the
1540 	 * guest.
1541 	 *
1542 	 * Accept the memory of the allocated buffer.
1543 	 */
1544 	accept_memory(found, size);
1545 
1546 	return found;
1547 }
1548 
1549 /**
1550  * memblock_phys_alloc_range - allocate a memory block inside specified range
1551  * @size: size of memory block to be allocated in bytes
1552  * @align: alignment of the region and block's size
1553  * @start: the lower bound of the memory region to allocate (physical address)
1554  * @end: the upper bound of the memory region to allocate (physical address)
1555  *
1556  * Allocate @size bytes in the between @start and @end.
1557  *
1558  * Return: physical address of the allocated memory block on success,
1559  * %0 on failure.
1560  */
1561 phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1562 					     phys_addr_t align,
1563 					     phys_addr_t start,
1564 					     phys_addr_t end)
1565 {
1566 	memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1567 		     __func__, (u64)size, (u64)align, &start, &end,
1568 		     (void *)_RET_IP_);
1569 	return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1570 					false);
1571 }
1572 
1573 /**
1574  * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
1575  * @size: size of memory block to be allocated in bytes
1576  * @align: alignment of the region and block's size
1577  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1578  *
1579  * Allocates memory block from the specified NUMA node. If the node
1580  * has no available memory, attempts to allocated from any node in the
1581  * system.
1582  *
1583  * Return: physical address of the allocated memory block on success,
1584  * %0 on failure.
1585  */
1586 phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1587 {
1588 	return memblock_alloc_range_nid(size, align, 0,
1589 					MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1590 }
1591 
1592 /**
1593  * memblock_alloc_internal - allocate boot memory block
1594  * @size: size of memory block to be allocated in bytes
1595  * @align: alignment of the region and block's size
1596  * @min_addr: the lower bound of the memory region to allocate (phys address)
1597  * @max_addr: the upper bound of the memory region to allocate (phys address)
1598  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1599  * @exact_nid: control the allocation fall back to other nodes
1600  *
1601  * Allocates memory block using memblock_alloc_range_nid() and
1602  * converts the returned physical address to virtual.
1603  *
1604  * The @min_addr limit is dropped if it can not be satisfied and the allocation
1605  * will fall back to memory below @min_addr. Other constraints, such
1606  * as node and mirrored memory will be handled again in
1607  * memblock_alloc_range_nid().
1608  *
1609  * Return:
1610  * Virtual address of allocated memory block on success, NULL on failure.
1611  */
1612 static void * __init memblock_alloc_internal(
1613 				phys_addr_t size, phys_addr_t align,
1614 				phys_addr_t min_addr, phys_addr_t max_addr,
1615 				int nid, bool exact_nid)
1616 {
1617 	phys_addr_t alloc;
1618 
1619 
1620 	if (max_addr > memblock.current_limit)
1621 		max_addr = memblock.current_limit;
1622 
1623 	alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1624 					exact_nid);
1625 
1626 	/* retry allocation without lower limit */
1627 	if (!alloc && min_addr)
1628 		alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1629 						exact_nid);
1630 
1631 	if (!alloc)
1632 		return NULL;
1633 
1634 	return phys_to_virt(alloc);
1635 }
1636 
1637 /**
1638  * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
1639  * without zeroing memory
1640  * @size: size of memory block to be allocated in bytes
1641  * @align: alignment of the region and block's size
1642  * @min_addr: the lower bound of the memory region from where the allocation
1643  *	  is preferred (phys address)
1644  * @max_addr: the upper bound of the memory region from where the allocation
1645  *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1646  *	      allocate only from memory limited by memblock.current_limit value
1647  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1648  *
1649  * Public function, provides additional debug information (including caller
1650  * info), if enabled. Does not zero allocated memory.
1651  *
1652  * Return:
1653  * Virtual address of allocated memory block on success, NULL on failure.
1654  */
1655 void * __init memblock_alloc_exact_nid_raw(
1656 			phys_addr_t size, phys_addr_t align,
1657 			phys_addr_t min_addr, phys_addr_t max_addr,
1658 			int nid)
1659 {
1660 	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1661 		     __func__, (u64)size, (u64)align, nid, &min_addr,
1662 		     &max_addr, (void *)_RET_IP_);
1663 
1664 	return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1665 				       true);
1666 }
1667 
1668 /**
1669  * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1670  * memory and without panicking
1671  * @size: size of memory block to be allocated in bytes
1672  * @align: alignment of the region and block's size
1673  * @min_addr: the lower bound of the memory region from where the allocation
1674  *	  is preferred (phys address)
1675  * @max_addr: the upper bound of the memory region from where the allocation
1676  *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1677  *	      allocate only from memory limited by memblock.current_limit value
1678  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1679  *
1680  * Public function, provides additional debug information (including caller
1681  * info), if enabled. Does not zero allocated memory, does not panic if request
1682  * cannot be satisfied.
1683  *
1684  * Return:
1685  * Virtual address of allocated memory block on success, NULL on failure.
1686  */
1687 void * __init memblock_alloc_try_nid_raw(
1688 			phys_addr_t size, phys_addr_t align,
1689 			phys_addr_t min_addr, phys_addr_t max_addr,
1690 			int nid)
1691 {
1692 	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1693 		     __func__, (u64)size, (u64)align, nid, &min_addr,
1694 		     &max_addr, (void *)_RET_IP_);
1695 
1696 	return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1697 				       false);
1698 }
1699 
1700 /**
1701  * memblock_alloc_try_nid - allocate boot memory block
1702  * @size: size of memory block to be allocated in bytes
1703  * @align: alignment of the region and block's size
1704  * @min_addr: the lower bound of the memory region from where the allocation
1705  *	  is preferred (phys address)
1706  * @max_addr: the upper bound of the memory region from where the allocation
1707  *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1708  *	      allocate only from memory limited by memblock.current_limit value
1709  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1710  *
1711  * Public function, provides additional debug information (including caller
1712  * info), if enabled. This function zeroes the allocated memory.
1713  *
1714  * Return:
1715  * Virtual address of allocated memory block on success, NULL on failure.
1716  */
1717 void * __init memblock_alloc_try_nid(
1718 			phys_addr_t size, phys_addr_t align,
1719 			phys_addr_t min_addr, phys_addr_t max_addr,
1720 			int nid)
1721 {
1722 	void *ptr;
1723 
1724 	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1725 		     __func__, (u64)size, (u64)align, nid, &min_addr,
1726 		     &max_addr, (void *)_RET_IP_);
1727 	ptr = memblock_alloc_internal(size, align,
1728 					   min_addr, max_addr, nid, false);
1729 	if (ptr)
1730 		memset(ptr, 0, size);
1731 
1732 	return ptr;
1733 }
1734 
1735 /**
1736  * __memblock_alloc_or_panic - Try to allocate memory and panic on failure
1737  * @size: size of memory block to be allocated in bytes
1738  * @align: alignment of the region and block's size
1739  * @func: caller func name
1740  *
1741  * This function attempts to allocate memory using memblock_alloc,
1742  * and in case of failure, it calls panic with the formatted message.
1743  * This function should not be used directly, please use the macro memblock_alloc_or_panic.
1744  */
1745 void *__init __memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align,
1746 				       const char *func)
1747 {
1748 	void *addr = memblock_alloc(size, align);
1749 
1750 	if (unlikely(!addr))
1751 		panic("%s: Failed to allocate %pap bytes\n", func, &size);
1752 	return addr;
1753 }
1754 
1755 /**
1756  * memblock_free_late - free pages directly to buddy allocator
1757  * @base: phys starting address of the  boot memory block
1758  * @size: size of the boot memory block in bytes
1759  *
1760  * This is only useful when the memblock allocator has already been torn
1761  * down, but we are still initializing the system.  Pages are released directly
1762  * to the buddy allocator.
1763  */
1764 void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
1765 {
1766 	phys_addr_t cursor, end;
1767 
1768 	end = base + size - 1;
1769 	memblock_dbg("%s: [%pa-%pa] %pS\n",
1770 		     __func__, &base, &end, (void *)_RET_IP_);
1771 	kmemleak_free_part_phys(base, size);
1772 	cursor = PFN_UP(base);
1773 	end = PFN_DOWN(base + size);
1774 
1775 	for (; cursor < end; cursor++) {
1776 		memblock_free_pages(cursor, 0);
1777 		totalram_pages_inc();
1778 	}
1779 }
1780 
1781 /*
1782  * Remaining API functions
1783  */
1784 
1785 phys_addr_t __init_memblock memblock_phys_mem_size(void)
1786 {
1787 	return memblock.memory.total_size;
1788 }
1789 
1790 phys_addr_t __init_memblock memblock_reserved_size(void)
1791 {
1792 	return memblock.reserved.total_size;
1793 }
1794 
1795 phys_addr_t __init_memblock memblock_reserved_kern_size(phys_addr_t limit, int nid)
1796 {
1797 	struct memblock_region *r;
1798 	phys_addr_t total = 0;
1799 
1800 	for_each_reserved_mem_region(r) {
1801 		phys_addr_t size = r->size;
1802 
1803 		if (r->base > limit)
1804 			break;
1805 
1806 		if (r->base + r->size > limit)
1807 			size = limit - r->base;
1808 
1809 		if (nid == memblock_get_region_node(r) || !numa_valid_node(nid))
1810 			if (r->flags & MEMBLOCK_RSRV_KERN)
1811 				total += size;
1812 	}
1813 
1814 	return total;
1815 }
1816 
1817 /**
1818  * memblock_estimated_nr_free_pages - return estimated number of free pages
1819  * from memblock point of view
1820  *
1821  * During bootup, subsystems might need a rough estimate of the number of free
1822  * pages in the whole system, before precise numbers are available from the
1823  * buddy. Especially with CONFIG_DEFERRED_STRUCT_PAGE_INIT, the numbers
1824  * obtained from the buddy might be very imprecise during bootup.
1825  *
1826  * Return:
1827  * An estimated number of free pages from memblock point of view.
1828  */
1829 unsigned long __init memblock_estimated_nr_free_pages(void)
1830 {
1831 	return PHYS_PFN(memblock_phys_mem_size() -
1832 			memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE, NUMA_NO_NODE));
1833 }
1834 
1835 /* lowest address */
1836 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1837 {
1838 	return memblock.memory.regions[0].base;
1839 }
1840 
1841 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1842 {
1843 	int idx = memblock.memory.cnt - 1;
1844 
1845 	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1846 }
1847 
1848 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1849 {
1850 	phys_addr_t max_addr = PHYS_ADDR_MAX;
1851 	struct memblock_region *r;
1852 
1853 	/*
1854 	 * translate the memory @limit size into the max address within one of
1855 	 * the memory memblock regions, if the @limit exceeds the total size
1856 	 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1857 	 */
1858 	for_each_mem_region(r) {
1859 		if (limit <= r->size) {
1860 			max_addr = r->base + limit;
1861 			break;
1862 		}
1863 		limit -= r->size;
1864 	}
1865 
1866 	return max_addr;
1867 }
1868 
1869 void __init memblock_enforce_memory_limit(phys_addr_t limit)
1870 {
1871 	phys_addr_t max_addr;
1872 
1873 	if (!limit)
1874 		return;
1875 
1876 	max_addr = __find_max_addr(limit);
1877 
1878 	/* @limit exceeds the total size of the memory, do nothing */
1879 	if (max_addr == PHYS_ADDR_MAX)
1880 		return;
1881 
1882 	/* truncate both memory and reserved regions */
1883 	memblock_remove_range(&memblock.memory, max_addr,
1884 			      PHYS_ADDR_MAX);
1885 	memblock_remove_range(&memblock.reserved, max_addr,
1886 			      PHYS_ADDR_MAX);
1887 }
1888 
1889 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1890 {
1891 	int start_rgn, end_rgn;
1892 	int i, ret;
1893 
1894 	if (!size)
1895 		return;
1896 
1897 	if (!memblock_memory->total_size) {
1898 		pr_warn("%s: No memory registered yet\n", __func__);
1899 		return;
1900 	}
1901 
1902 	ret = memblock_isolate_range(&memblock.memory, base, size,
1903 						&start_rgn, &end_rgn);
1904 	if (ret)
1905 		return;
1906 
1907 	/* remove all the MAP regions */
1908 	for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1909 		if (!memblock_is_nomap(&memblock.memory.regions[i]))
1910 			memblock_remove_region(&memblock.memory, i);
1911 
1912 	for (i = start_rgn - 1; i >= 0; i--)
1913 		if (!memblock_is_nomap(&memblock.memory.regions[i]))
1914 			memblock_remove_region(&memblock.memory, i);
1915 
1916 	/* truncate the reserved regions */
1917 	memblock_remove_range(&memblock.reserved, 0, base);
1918 	memblock_remove_range(&memblock.reserved,
1919 			base + size, PHYS_ADDR_MAX);
1920 }
1921 
1922 void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1923 {
1924 	phys_addr_t max_addr;
1925 
1926 	if (!limit)
1927 		return;
1928 
1929 	max_addr = __find_max_addr(limit);
1930 
1931 	/* @limit exceeds the total size of the memory, do nothing */
1932 	if (max_addr == PHYS_ADDR_MAX)
1933 		return;
1934 
1935 	memblock_cap_memory_range(0, max_addr);
1936 }
1937 
1938 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1939 {
1940 	unsigned int left = 0, right = type->cnt;
1941 
1942 	do {
1943 		unsigned int mid = (right + left) / 2;
1944 
1945 		if (addr < type->regions[mid].base)
1946 			right = mid;
1947 		else if (addr >= (type->regions[mid].base +
1948 				  type->regions[mid].size))
1949 			left = mid + 1;
1950 		else
1951 			return mid;
1952 	} while (left < right);
1953 	return -1;
1954 }
1955 
1956 bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1957 {
1958 	return memblock_search(&memblock.reserved, addr) != -1;
1959 }
1960 
1961 bool __init_memblock memblock_is_memory(phys_addr_t addr)
1962 {
1963 	return memblock_search(&memblock.memory, addr) != -1;
1964 }
1965 
1966 bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1967 {
1968 	int i = memblock_search(&memblock.memory, addr);
1969 
1970 	if (i == -1)
1971 		return false;
1972 	return !memblock_is_nomap(&memblock.memory.regions[i]);
1973 }
1974 
1975 int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1976 			 unsigned long *start_pfn, unsigned long *end_pfn)
1977 {
1978 	struct memblock_type *type = &memblock.memory;
1979 	int mid = memblock_search(type, PFN_PHYS(pfn));
1980 
1981 	if (mid == -1)
1982 		return NUMA_NO_NODE;
1983 
1984 	*start_pfn = PFN_DOWN(type->regions[mid].base);
1985 	*end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1986 
1987 	return memblock_get_region_node(&type->regions[mid]);
1988 }
1989 
1990 /**
1991  * memblock_is_region_memory - check if a region is a subset of memory
1992  * @base: base of region to check
1993  * @size: size of region to check
1994  *
1995  * Check if the region [@base, @base + @size) is a subset of a memory block.
1996  *
1997  * Return:
1998  * 0 if false, non-zero if true
1999  */
2000 bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
2001 {
2002 	int idx = memblock_search(&memblock.memory, base);
2003 	phys_addr_t end = base + memblock_cap_size(base, &size);
2004 
2005 	if (idx == -1)
2006 		return false;
2007 	return (memblock.memory.regions[idx].base +
2008 		 memblock.memory.regions[idx].size) >= end;
2009 }
2010 
2011 /**
2012  * memblock_is_region_reserved - check if a region intersects reserved memory
2013  * @base: base of region to check
2014  * @size: size of region to check
2015  *
2016  * Check if the region [@base, @base + @size) intersects a reserved
2017  * memory block.
2018  *
2019  * Return:
2020  * True if they intersect, false if not.
2021  */
2022 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
2023 {
2024 	return memblock_overlaps_region(&memblock.reserved, base, size);
2025 }
2026 
2027 void __init_memblock memblock_trim_memory(phys_addr_t align)
2028 {
2029 	phys_addr_t start, end, orig_start, orig_end;
2030 	struct memblock_region *r;
2031 
2032 	for_each_mem_region(r) {
2033 		orig_start = r->base;
2034 		orig_end = r->base + r->size;
2035 		start = round_up(orig_start, align);
2036 		end = round_down(orig_end, align);
2037 
2038 		if (start == orig_start && end == orig_end)
2039 			continue;
2040 
2041 		if (start < end) {
2042 			r->base = start;
2043 			r->size = end - start;
2044 		} else {
2045 			memblock_remove_region(&memblock.memory,
2046 					       r - memblock.memory.regions);
2047 			r--;
2048 		}
2049 	}
2050 }
2051 
2052 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
2053 {
2054 	memblock.current_limit = limit;
2055 }
2056 
2057 phys_addr_t __init_memblock memblock_get_current_limit(void)
2058 {
2059 	return memblock.current_limit;
2060 }
2061 
2062 static void __init_memblock memblock_dump(struct memblock_type *type)
2063 {
2064 	phys_addr_t base, end, size;
2065 	enum memblock_flags flags;
2066 	int idx;
2067 	struct memblock_region *rgn;
2068 
2069 	pr_info(" %s.cnt  = 0x%lx\n", type->name, type->cnt);
2070 
2071 	for_each_memblock_type(idx, type, rgn) {
2072 		char nid_buf[32] = "";
2073 
2074 		base = rgn->base;
2075 		size = rgn->size;
2076 		end = base + size - 1;
2077 		flags = rgn->flags;
2078 #ifdef CONFIG_NUMA
2079 		if (numa_valid_node(memblock_get_region_node(rgn)))
2080 			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
2081 				 memblock_get_region_node(rgn));
2082 #endif
2083 		pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
2084 			type->name, idx, &base, &end, &size, nid_buf, flags);
2085 	}
2086 }
2087 
2088 static void __init_memblock __memblock_dump_all(void)
2089 {
2090 	pr_info("MEMBLOCK configuration:\n");
2091 	pr_info(" memory size = %pa reserved size = %pa\n",
2092 		&memblock.memory.total_size,
2093 		&memblock.reserved.total_size);
2094 
2095 	memblock_dump(&memblock.memory);
2096 	memblock_dump(&memblock.reserved);
2097 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2098 	memblock_dump(&physmem);
2099 #endif
2100 }
2101 
2102 void __init_memblock memblock_dump_all(void)
2103 {
2104 	if (memblock_debug)
2105 		__memblock_dump_all();
2106 }
2107 
2108 void __init memblock_allow_resize(void)
2109 {
2110 	memblock_can_resize = 1;
2111 }
2112 
2113 static int __init early_memblock(char *p)
2114 {
2115 	if (p && strstr(p, "debug"))
2116 		memblock_debug = 1;
2117 	return 0;
2118 }
2119 early_param("memblock", early_memblock);
2120 
2121 static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
2122 {
2123 	struct page *start_pg, *end_pg;
2124 	phys_addr_t pg, pgend;
2125 
2126 	/*
2127 	 * Convert start_pfn/end_pfn to a struct page pointer.
2128 	 */
2129 	start_pg = pfn_to_page(start_pfn - 1) + 1;
2130 	end_pg = pfn_to_page(end_pfn - 1) + 1;
2131 
2132 	/*
2133 	 * Convert to physical addresses, and round start upwards and end
2134 	 * downwards.
2135 	 */
2136 	pg = PAGE_ALIGN(__pa(start_pg));
2137 	pgend = PAGE_ALIGN_DOWN(__pa(end_pg));
2138 
2139 	/*
2140 	 * If there are free pages between these, free the section of the
2141 	 * memmap array.
2142 	 */
2143 	if (pg < pgend)
2144 		memblock_phys_free(pg, pgend - pg);
2145 }
2146 
2147 /*
2148  * The mem_map array can get very big.  Free the unused area of the memory map.
2149  */
2150 static void __init free_unused_memmap(void)
2151 {
2152 	unsigned long start, end, prev_end = 0;
2153 	int i;
2154 
2155 	if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
2156 	    IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
2157 		return;
2158 
2159 	/*
2160 	 * This relies on each bank being in address order.
2161 	 * The banks are sorted previously in bootmem_init().
2162 	 */
2163 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
2164 #ifdef CONFIG_SPARSEMEM
2165 		/*
2166 		 * Take care not to free memmap entries that don't exist
2167 		 * due to SPARSEMEM sections which aren't present.
2168 		 */
2169 		start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
2170 #endif
2171 		/*
2172 		 * Align down here since many operations in VM subsystem
2173 		 * presume that there are no holes in the memory map inside
2174 		 * a pageblock
2175 		 */
2176 		start = pageblock_start_pfn(start);
2177 
2178 		/*
2179 		 * If we had a previous bank, and there is a space
2180 		 * between the current bank and the previous, free it.
2181 		 */
2182 		if (prev_end && prev_end < start)
2183 			free_memmap(prev_end, start);
2184 
2185 		/*
2186 		 * Align up here since many operations in VM subsystem
2187 		 * presume that there are no holes in the memory map inside
2188 		 * a pageblock
2189 		 */
2190 		prev_end = pageblock_align(end);
2191 	}
2192 
2193 #ifdef CONFIG_SPARSEMEM
2194 	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
2195 		prev_end = pageblock_align(end);
2196 		free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
2197 	}
2198 #endif
2199 }
2200 
2201 static void __init __free_pages_memory(unsigned long start, unsigned long end)
2202 {
2203 	int order;
2204 
2205 	while (start < end) {
2206 		/*
2207 		 * Free the pages in the largest chunks alignment allows.
2208 		 *
2209 		 * __ffs() behaviour is undefined for 0. start == 0 is
2210 		 * MAX_PAGE_ORDER-aligned, set order to MAX_PAGE_ORDER for
2211 		 * the case.
2212 		 */
2213 		if (start)
2214 			order = min_t(int, MAX_PAGE_ORDER, __ffs(start));
2215 		else
2216 			order = MAX_PAGE_ORDER;
2217 
2218 		while (start + (1UL << order) > end)
2219 			order--;
2220 
2221 		memblock_free_pages(start, order);
2222 
2223 		start += (1UL << order);
2224 	}
2225 }
2226 
2227 static unsigned long __init __free_memory_core(phys_addr_t start,
2228 				 phys_addr_t end)
2229 {
2230 	unsigned long start_pfn = PFN_UP(start);
2231 	unsigned long end_pfn = PFN_DOWN(end);
2232 
2233 	if (!IS_ENABLED(CONFIG_HIGHMEM) && end_pfn > max_low_pfn)
2234 		end_pfn = max_low_pfn;
2235 
2236 	if (start_pfn >= end_pfn)
2237 		return 0;
2238 
2239 	__free_pages_memory(start_pfn, end_pfn);
2240 
2241 	return end_pfn - start_pfn;
2242 }
2243 
2244 /*
2245  * Initialised pages do not have PageReserved set. This function is called
2246  * for each reserved range and marks the pages PageReserved.
2247  * When deferred initialization of struct pages is enabled it also ensures
2248  * that struct pages are properly initialised.
2249  */
2250 static void __init memmap_init_reserved_range(phys_addr_t start,
2251 					      phys_addr_t end, int nid)
2252 {
2253 	unsigned long pfn;
2254 
2255 	for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) {
2256 		struct page *page = pfn_to_page(pfn);
2257 
2258 		init_deferred_page(pfn, nid);
2259 
2260 		/*
2261 		 * no need for atomic set_bit because the struct
2262 		 * page is not visible yet so nobody should
2263 		 * access it yet.
2264 		 */
2265 		__SetPageReserved(page);
2266 	}
2267 }
2268 
2269 static void __init memmap_init_reserved_pages(void)
2270 {
2271 	struct memblock_region *region;
2272 	phys_addr_t start, end;
2273 	int nid;
2274 	unsigned long max_reserved;
2275 
2276 	/*
2277 	 * set nid on all reserved pages and also treat struct
2278 	 * pages for the NOMAP regions as PageReserved
2279 	 */
2280 repeat:
2281 	max_reserved = memblock.reserved.max;
2282 	for_each_mem_region(region) {
2283 		nid = memblock_get_region_node(region);
2284 		start = region->base;
2285 		end = start + region->size;
2286 
2287 		if (memblock_is_nomap(region))
2288 			memmap_init_reserved_range(start, end, nid);
2289 
2290 		memblock_set_node(start, region->size, &memblock.reserved, nid);
2291 	}
2292 	/*
2293 	 * 'max' is changed means memblock.reserved has been doubled its
2294 	 * array, which may result a new reserved region before current
2295 	 * 'start'. Now we should repeat the procedure to set its node id.
2296 	 */
2297 	if (max_reserved != memblock.reserved.max)
2298 		goto repeat;
2299 
2300 	/*
2301 	 * initialize struct pages for reserved regions that don't have
2302 	 * the MEMBLOCK_RSRV_NOINIT flag set
2303 	 */
2304 	for_each_reserved_mem_region(region) {
2305 		if (!memblock_is_reserved_noinit(region)) {
2306 			nid = memblock_get_region_node(region);
2307 			start = region->base;
2308 			end = start + region->size;
2309 
2310 			if (!numa_valid_node(nid))
2311 				nid = early_pfn_to_nid(PFN_DOWN(start));
2312 
2313 			memmap_init_reserved_range(start, end, nid);
2314 		}
2315 	}
2316 }
2317 
2318 static unsigned long __init free_low_memory_core_early(void)
2319 {
2320 	unsigned long count = 0;
2321 	phys_addr_t start, end;
2322 	u64 i;
2323 
2324 	memblock_clear_hotplug(0, -1);
2325 
2326 	memmap_init_reserved_pages();
2327 
2328 	/*
2329 	 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
2330 	 *  because in some case like Node0 doesn't have RAM installed
2331 	 *  low ram will be on Node1
2332 	 */
2333 	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
2334 				NULL)
2335 		count += __free_memory_core(start, end);
2336 
2337 	return count;
2338 }
2339 
2340 static int reset_managed_pages_done __initdata;
2341 
2342 static void __init reset_node_managed_pages(pg_data_t *pgdat)
2343 {
2344 	struct zone *z;
2345 
2346 	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
2347 		atomic_long_set(&z->managed_pages, 0);
2348 }
2349 
2350 void __init reset_all_zones_managed_pages(void)
2351 {
2352 	struct pglist_data *pgdat;
2353 
2354 	if (reset_managed_pages_done)
2355 		return;
2356 
2357 	for_each_online_pgdat(pgdat)
2358 		reset_node_managed_pages(pgdat);
2359 
2360 	reset_managed_pages_done = 1;
2361 }
2362 
2363 /**
2364  * memblock_free_all - release free pages to the buddy allocator
2365  */
2366 void __init memblock_free_all(void)
2367 {
2368 	unsigned long pages;
2369 
2370 	free_unused_memmap();
2371 	reset_all_zones_managed_pages();
2372 
2373 	memblock_clear_kho_scratch_only();
2374 	pages = free_low_memory_core_early();
2375 	totalram_pages_add(pages);
2376 }
2377 
2378 /* Keep a table to reserve named memory */
2379 #define RESERVE_MEM_MAX_ENTRIES		8
2380 #define RESERVE_MEM_NAME_SIZE		16
2381 struct reserve_mem_table {
2382 	char			name[RESERVE_MEM_NAME_SIZE];
2383 	phys_addr_t		start;
2384 	phys_addr_t		size;
2385 };
2386 static struct reserve_mem_table reserved_mem_table[RESERVE_MEM_MAX_ENTRIES];
2387 static int reserved_mem_count;
2388 static DEFINE_MUTEX(reserve_mem_lock);
2389 
2390 /* Add wildcard region with a lookup name */
2391 static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
2392 				   const char *name)
2393 {
2394 	struct reserve_mem_table *map;
2395 
2396 	map = &reserved_mem_table[reserved_mem_count++];
2397 	map->start = start;
2398 	map->size = size;
2399 	strscpy(map->name, name);
2400 }
2401 
2402 static struct reserve_mem_table *reserve_mem_find_by_name_nolock(const char *name)
2403 {
2404 	struct reserve_mem_table *map;
2405 	int i;
2406 
2407 	for (i = 0; i < reserved_mem_count; i++) {
2408 		map = &reserved_mem_table[i];
2409 		if (!map->size)
2410 			continue;
2411 		if (strcmp(name, map->name) == 0)
2412 			return map;
2413 	}
2414 	return NULL;
2415 }
2416 
2417 /**
2418  * reserve_mem_find_by_name - Find reserved memory region with a given name
2419  * @name: The name that is attached to a reserved memory region
2420  * @start: If found, holds the start address
2421  * @size: If found, holds the size of the address.
2422  *
2423  * @start and @size are only updated if @name is found.
2424  *
2425  * Returns: 1 if found or 0 if not found.
2426  */
2427 int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size)
2428 {
2429 	struct reserve_mem_table *map;
2430 
2431 	guard(mutex)(&reserve_mem_lock);
2432 	map = reserve_mem_find_by_name_nolock(name);
2433 	if (!map)
2434 		return 0;
2435 
2436 	*start = map->start;
2437 	*size = map->size;
2438 	return 1;
2439 }
2440 EXPORT_SYMBOL_GPL(reserve_mem_find_by_name);
2441 
2442 /**
2443  * reserve_mem_release_by_name - Release reserved memory region with a given name
2444  * @name: The name that is attached to a reserved memory region
2445  *
2446  * Forcibly release the pages in the reserved memory region so that those memory
2447  * can be used as free memory. After released the reserved region size becomes 0.
2448  *
2449  * Returns: 1 if released or 0 if not found.
2450  */
2451 int reserve_mem_release_by_name(const char *name)
2452 {
2453 	char buf[RESERVE_MEM_NAME_SIZE + 12];
2454 	struct reserve_mem_table *map;
2455 	void *start, *end;
2456 
2457 	guard(mutex)(&reserve_mem_lock);
2458 	map = reserve_mem_find_by_name_nolock(name);
2459 	if (!map)
2460 		return 0;
2461 
2462 	start = phys_to_virt(map->start);
2463 	end = start + map->size - 1;
2464 	snprintf(buf, sizeof(buf), "reserve_mem:%s", name);
2465 	free_reserved_area(start, end, 0, buf);
2466 	map->size = 0;
2467 
2468 	return 1;
2469 }
2470 
2471 #ifdef CONFIG_KEXEC_HANDOVER
2472 
2473 static int __init reserved_mem_preserve(void)
2474 {
2475 	unsigned int nr_preserved = 0;
2476 	int err;
2477 
2478 	for (unsigned int i = 0; i < reserved_mem_count; i++, nr_preserved++) {
2479 		struct reserve_mem_table *map = &reserved_mem_table[i];
2480 		struct page *page = phys_to_page(map->start);
2481 		unsigned int nr_pages = map->size >> PAGE_SHIFT;
2482 
2483 		err = kho_preserve_pages(page, nr_pages);
2484 		if (err)
2485 			goto err_unpreserve;
2486 	}
2487 
2488 	return 0;
2489 
2490 err_unpreserve:
2491 	for (unsigned int i = 0; i < nr_preserved; i++) {
2492 		struct reserve_mem_table *map = &reserved_mem_table[i];
2493 		struct page *page = phys_to_page(map->start);
2494 		unsigned int nr_pages = map->size >> PAGE_SHIFT;
2495 
2496 		kho_unpreserve_pages(page, nr_pages);
2497 	}
2498 
2499 	return err;
2500 }
2501 
2502 static int __init prepare_kho_fdt(void)
2503 {
2504 	struct page *fdt_page;
2505 	void *fdt;
2506 	int err;
2507 
2508 	fdt_page = alloc_page(GFP_KERNEL);
2509 	if (!fdt_page) {
2510 		err = -ENOMEM;
2511 		goto err_report;
2512 	}
2513 
2514 	fdt = page_to_virt(fdt_page);
2515 	err = kho_preserve_pages(fdt_page, 1);
2516 	if (err)
2517 		goto err_free_fdt;
2518 
2519 	err |= fdt_create(fdt, PAGE_SIZE);
2520 	err |= fdt_finish_reservemap(fdt);
2521 	err |= fdt_begin_node(fdt, "");
2522 	err |= fdt_property_string(fdt, "compatible", MEMBLOCK_KHO_NODE_COMPATIBLE);
2523 
2524 	for (unsigned int i = 0; !err && i < reserved_mem_count; i++) {
2525 		struct reserve_mem_table *map = &reserved_mem_table[i];
2526 
2527 		err |= fdt_begin_node(fdt, map->name);
2528 		err |= fdt_property_string(fdt, "compatible", RESERVE_MEM_KHO_NODE_COMPATIBLE);
2529 		err |= fdt_property(fdt, "start", &map->start, sizeof(map->start));
2530 		err |= fdt_property(fdt, "size", &map->size, sizeof(map->size));
2531 		err |= fdt_end_node(fdt);
2532 	}
2533 	err |= fdt_end_node(fdt);
2534 	err |= fdt_finish(fdt);
2535 
2536 	if (err)
2537 		goto err_unpreserve_fdt;
2538 
2539 	err = kho_add_subtree(MEMBLOCK_KHO_FDT, fdt);
2540 	if (err)
2541 		goto err_unpreserve_fdt;
2542 
2543 	err = reserved_mem_preserve();
2544 	if (err)
2545 		goto err_remove_subtree;
2546 
2547 	return 0;
2548 
2549 err_remove_subtree:
2550 	kho_remove_subtree(fdt);
2551 err_unpreserve_fdt:
2552 	kho_unpreserve_pages(fdt_page, 1);
2553 err_free_fdt:
2554 	put_page(fdt_page);
2555 err_report:
2556 	pr_err("failed to prepare memblock FDT for KHO: %d\n", err);
2557 
2558 	return err;
2559 }
2560 
2561 static int __init reserve_mem_init(void)
2562 {
2563 	int err;
2564 
2565 	if (!kho_is_enabled() || !reserved_mem_count)
2566 		return 0;
2567 
2568 	err = prepare_kho_fdt();
2569 	if (err)
2570 		return err;
2571 	return err;
2572 }
2573 late_initcall(reserve_mem_init);
2574 
2575 static void *__init reserve_mem_kho_retrieve_fdt(void)
2576 {
2577 	phys_addr_t fdt_phys;
2578 	static void *fdt;
2579 	int err;
2580 
2581 	if (fdt)
2582 		return fdt;
2583 
2584 	err = kho_retrieve_subtree(MEMBLOCK_KHO_FDT, &fdt_phys);
2585 	if (err) {
2586 		if (err != -ENOENT)
2587 			pr_warn("failed to retrieve FDT '%s' from KHO: %d\n",
2588 				MEMBLOCK_KHO_FDT, err);
2589 		return NULL;
2590 	}
2591 
2592 	fdt = phys_to_virt(fdt_phys);
2593 
2594 	err = fdt_node_check_compatible(fdt, 0, MEMBLOCK_KHO_NODE_COMPATIBLE);
2595 	if (err) {
2596 		pr_warn("FDT '%s' is incompatible with '%s': %d\n",
2597 			MEMBLOCK_KHO_FDT, MEMBLOCK_KHO_NODE_COMPATIBLE, err);
2598 		fdt = NULL;
2599 	}
2600 
2601 	return fdt;
2602 }
2603 
2604 static bool __init reserve_mem_kho_revive(const char *name, phys_addr_t size,
2605 					  phys_addr_t align)
2606 {
2607 	int err, len_start, len_size, offset;
2608 	const phys_addr_t *p_start, *p_size;
2609 	const void *fdt;
2610 
2611 	fdt = reserve_mem_kho_retrieve_fdt();
2612 	if (!fdt)
2613 		return false;
2614 
2615 	offset = fdt_subnode_offset(fdt, 0, name);
2616 	if (offset < 0) {
2617 		pr_warn("FDT '%s' has no child '%s': %d\n",
2618 			MEMBLOCK_KHO_FDT, name, offset);
2619 		return false;
2620 	}
2621 	err = fdt_node_check_compatible(fdt, offset, RESERVE_MEM_KHO_NODE_COMPATIBLE);
2622 	if (err) {
2623 		pr_warn("Node '%s' is incompatible with '%s': %d\n",
2624 			name, RESERVE_MEM_KHO_NODE_COMPATIBLE, err);
2625 		return false;
2626 	}
2627 
2628 	p_start = fdt_getprop(fdt, offset, "start", &len_start);
2629 	p_size = fdt_getprop(fdt, offset, "size", &len_size);
2630 	if (!p_start || len_start != sizeof(*p_start) || !p_size ||
2631 	    len_size != sizeof(*p_size)) {
2632 		return false;
2633 	}
2634 
2635 	if (*p_start & (align - 1)) {
2636 		pr_warn("KHO reserve-mem '%s' has wrong alignment (0x%lx, 0x%lx)\n",
2637 			name, (long)align, (long)*p_start);
2638 		return false;
2639 	}
2640 
2641 	if (*p_size != size) {
2642 		pr_warn("KHO reserve-mem '%s' has wrong size (0x%lx != 0x%lx)\n",
2643 			name, (long)*p_size, (long)size);
2644 		return false;
2645 	}
2646 
2647 	reserved_mem_add(*p_start, size, name);
2648 	pr_info("Revived memory reservation '%s' from KHO\n", name);
2649 
2650 	return true;
2651 }
2652 #else
2653 static bool __init reserve_mem_kho_revive(const char *name, phys_addr_t size,
2654 					  phys_addr_t align)
2655 {
2656 	return false;
2657 }
2658 #endif /* CONFIG_KEXEC_HANDOVER */
2659 
2660 /*
2661  * Parse reserve_mem=nn:align:name
2662  */
2663 static int __init reserve_mem(char *p)
2664 {
2665 	phys_addr_t start, size, align, tmp;
2666 	char *name;
2667 	char *oldp;
2668 	int len;
2669 
2670 	if (!p)
2671 		goto err_param;
2672 
2673 	/* Check if there's room for more reserved memory */
2674 	if (reserved_mem_count >= RESERVE_MEM_MAX_ENTRIES) {
2675 		pr_err("reserve_mem: no more room for reserved memory\n");
2676 		return -EBUSY;
2677 	}
2678 
2679 	oldp = p;
2680 	size = memparse(p, &p);
2681 	if (!size || p == oldp)
2682 		goto err_param;
2683 
2684 	if (*p != ':')
2685 		goto err_param;
2686 
2687 	align = memparse(p+1, &p);
2688 	if (*p != ':')
2689 		goto err_param;
2690 
2691 	/*
2692 	 * memblock_phys_alloc() doesn't like a zero size align,
2693 	 * but it is OK for this command to have it.
2694 	 */
2695 	if (align < SMP_CACHE_BYTES)
2696 		align = SMP_CACHE_BYTES;
2697 
2698 	name = p + 1;
2699 	len = strlen(name);
2700 
2701 	/* name needs to have length but not too big */
2702 	if (!len || len >= RESERVE_MEM_NAME_SIZE)
2703 		goto err_param;
2704 
2705 	/* Make sure that name has text */
2706 	for (p = name; *p; p++) {
2707 		if (!isspace(*p))
2708 			break;
2709 	}
2710 	if (!*p)
2711 		goto err_param;
2712 
2713 	/* Make sure the name is not already used */
2714 	if (reserve_mem_find_by_name(name, &start, &tmp)) {
2715 		pr_err("reserve_mem: name \"%s\" was already used\n", name);
2716 		return -EBUSY;
2717 	}
2718 
2719 	/* Pick previous allocations up from KHO if available */
2720 	if (reserve_mem_kho_revive(name, size, align))
2721 		return 1;
2722 
2723 	/* TODO: Allocation must be outside of scratch region */
2724 	start = memblock_phys_alloc(size, align);
2725 	if (!start) {
2726 		pr_err("reserve_mem: memblock allocation failed\n");
2727 		return -ENOMEM;
2728 	}
2729 
2730 	reserved_mem_add(start, size, name);
2731 
2732 	return 1;
2733 err_param:
2734 	pr_err("reserve_mem: empty or malformed parameter\n");
2735 	return -EINVAL;
2736 }
2737 __setup("reserve_mem=", reserve_mem);
2738 
2739 #ifdef CONFIG_DEBUG_FS
2740 #ifdef CONFIG_ARCH_KEEP_MEMBLOCK
2741 static const char * const flagname[] = {
2742 	[ilog2(MEMBLOCK_HOTPLUG)] = "HOTPLUG",
2743 	[ilog2(MEMBLOCK_MIRROR)] = "MIRROR",
2744 	[ilog2(MEMBLOCK_NOMAP)] = "NOMAP",
2745 	[ilog2(MEMBLOCK_DRIVER_MANAGED)] = "DRV_MNG",
2746 	[ilog2(MEMBLOCK_RSRV_NOINIT)] = "RSV_NIT",
2747 	[ilog2(MEMBLOCK_RSRV_KERN)] = "RSV_KERN",
2748 	[ilog2(MEMBLOCK_KHO_SCRATCH)] = "KHO_SCRATCH",
2749 };
2750 
2751 static int memblock_debug_show(struct seq_file *m, void *private)
2752 {
2753 	struct memblock_type *type = m->private;
2754 	struct memblock_region *reg;
2755 	int i, j, nid;
2756 	unsigned int count = ARRAY_SIZE(flagname);
2757 	phys_addr_t end;
2758 
2759 	for (i = 0; i < type->cnt; i++) {
2760 		reg = &type->regions[i];
2761 		end = reg->base + reg->size - 1;
2762 		nid = memblock_get_region_node(reg);
2763 
2764 		seq_printf(m, "%4d: ", i);
2765 		seq_printf(m, "%pa..%pa ", &reg->base, &end);
2766 		if (numa_valid_node(nid))
2767 			seq_printf(m, "%4d ", nid);
2768 		else
2769 			seq_printf(m, "%4c ", 'x');
2770 		if (reg->flags) {
2771 			for (j = 0; j < count; j++) {
2772 				if (reg->flags & (1U << j)) {
2773 					seq_printf(m, "%s\n", flagname[j]);
2774 					break;
2775 				}
2776 			}
2777 			if (j == count)
2778 				seq_printf(m, "%s\n", "UNKNOWN");
2779 		} else {
2780 			seq_printf(m, "%s\n", "NONE");
2781 		}
2782 	}
2783 	return 0;
2784 }
2785 DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2786 
2787 static inline void memblock_debugfs_expose_arrays(struct dentry *root)
2788 {
2789 	debugfs_create_file("memory", 0444, root,
2790 			    &memblock.memory, &memblock_debug_fops);
2791 	debugfs_create_file("reserved", 0444, root,
2792 			    &memblock.reserved, &memblock_debug_fops);
2793 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2794 	debugfs_create_file("physmem", 0444, root, &physmem,
2795 			    &memblock_debug_fops);
2796 #endif
2797 }
2798 
2799 #else
2800 
2801 static inline void memblock_debugfs_expose_arrays(struct dentry *root) { }
2802 
2803 #endif /* CONFIG_ARCH_KEEP_MEMBLOCK */
2804 
2805 static int memblock_reserve_mem_show(struct seq_file *m, void *private)
2806 {
2807 	struct reserve_mem_table *map;
2808 	char txtsz[16];
2809 
2810 	guard(mutex)(&reserve_mem_lock);
2811 	for (int i = 0; i < reserved_mem_count; i++) {
2812 		map = &reserved_mem_table[i];
2813 		if (!map->size)
2814 			continue;
2815 
2816 		memset(txtsz, 0, sizeof(txtsz));
2817 		string_get_size(map->size, 1, STRING_UNITS_2, txtsz, sizeof(txtsz));
2818 		seq_printf(m, "%s\t\t(%s)\n", map->name, txtsz);
2819 	}
2820 
2821 	return 0;
2822 }
2823 DEFINE_SHOW_ATTRIBUTE(memblock_reserve_mem);
2824 
2825 static int __init memblock_init_debugfs(void)
2826 {
2827 	struct dentry *root;
2828 
2829 	if (!IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !reserved_mem_count)
2830 		return 0;
2831 
2832 	root = debugfs_create_dir("memblock", NULL);
2833 
2834 	if (reserved_mem_count)
2835 		debugfs_create_file("reserve_mem_param", 0444, root, NULL,
2836 				    &memblock_reserve_mem_fops);
2837 
2838 	memblock_debugfs_expose_arrays(root);
2839 	return 0;
2840 }
2841 __initcall(memblock_init_debugfs);
2842 
2843 #endif /* CONFIG_DEBUG_FS */
2844