xref: /linux/mm/memblock.c (revision 0510bdab538e2af07a67bc58a0c6c4547b83f8d5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Procedures for maintaining information about logical memory blocks.
4  *
5  * Peter Bergner, IBM Corp.	June 2001.
6  * Copyright (C) 2001 Peter Bergner.
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/bitops.h>
13 #include <linux/poison.h>
14 #include <linux/pfn.h>
15 #include <linux/debugfs.h>
16 #include <linux/kmemleak.h>
17 #include <linux/seq_file.h>
18 #include <linux/memblock.h>
19 #include <linux/mutex.h>
20 #include <linux/string_helpers.h>
21 
22 #ifdef CONFIG_KEXEC_HANDOVER
23 #include <linux/libfdt.h>
24 #include <linux/kexec_handover.h>
25 #include <linux/kho/abi/memblock.h>
26 #endif /* CONFIG_KEXEC_HANDOVER */
27 
28 #include <asm/sections.h>
29 #include <linux/io.h>
30 
31 #include "internal.h"
32 
33 #define INIT_MEMBLOCK_REGIONS			128
34 #define INIT_PHYSMEM_REGIONS			4
35 
36 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
37 # define INIT_MEMBLOCK_RESERVED_REGIONS		INIT_MEMBLOCK_REGIONS
38 #endif
39 
40 #ifndef INIT_MEMBLOCK_MEMORY_REGIONS
41 #define INIT_MEMBLOCK_MEMORY_REGIONS		INIT_MEMBLOCK_REGIONS
42 #endif
43 
44 /**
45  * DOC: memblock overview
46  *
47  * Memblock is a method of managing memory regions during the early
48  * boot period when the usual kernel memory allocators are not up and
49  * running.
50  *
51  * Memblock views the system memory as collections of contiguous
52  * regions. There are several types of these collections:
53  *
54  * * ``memory`` - describes the physical memory available to the
55  *   kernel; this may differ from the actual physical memory installed
56  *   in the system, for instance when the memory is restricted with
57  *   ``mem=`` command line parameter
58  * * ``reserved`` - describes the regions that were allocated
59  * * ``physmem`` - describes the actual physical memory available during
60  *   boot regardless of the possible restrictions and memory hot(un)plug;
61  *   the ``physmem`` type is only available on some architectures.
62  *
63  * Each region is represented by struct memblock_region that
64  * defines the region extents, its attributes and NUMA node id on NUMA
65  * systems. Every memory type is described by the struct memblock_type
66  * which contains an array of memory regions along with
67  * the allocator metadata. The "memory" and "reserved" types are nicely
68  * wrapped with struct memblock. This structure is statically
69  * initialized at build time. The region arrays are initially sized to
70  * %INIT_MEMBLOCK_MEMORY_REGIONS for "memory" and
71  * %INIT_MEMBLOCK_RESERVED_REGIONS for "reserved". The region array
72  * for "physmem" is initially sized to %INIT_PHYSMEM_REGIONS.
73  * The memblock_allow_resize() enables automatic resizing of the region
74  * arrays during addition of new regions. This feature should be used
75  * with care so that memory allocated for the region array will not
76  * overlap with areas that should be reserved, for example initrd.
77  *
78  * The early architecture setup should tell memblock what the physical
79  * memory layout is by using memblock_add() or memblock_add_node()
80  * functions. The first function does not assign the region to a NUMA
81  * node and it is appropriate for UMA systems. Yet, it is possible to
82  * use it on NUMA systems as well and assign the region to a NUMA node
83  * later in the setup process using memblock_set_node(). The
84  * memblock_add_node() performs such an assignment directly.
85  *
86  * Once memblock is setup the memory can be allocated using one of the
87  * API variants:
88  *
89  * * memblock_phys_alloc*() - these functions return the **physical**
90  *   address of the allocated memory
91  * * memblock_alloc*() - these functions return the **virtual** address
92  *   of the allocated memory.
93  *
94  * Note, that both API variants use implicit assumptions about allowed
95  * memory ranges and the fallback methods. Consult the documentation
96  * of memblock_alloc_internal() and memblock_alloc_range_nid()
97  * functions for more elaborate description.
98  *
99  * As the system boot progresses, the architecture specific mem_init()
100  * function frees all the memory to the buddy page allocator.
101  *
102  * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
103  * memblock data structures (except "physmem") will be discarded after the
104  * system initialization completes.
105  */
106 
107 #ifndef CONFIG_NUMA
108 struct pglist_data __refdata contig_page_data;
109 EXPORT_SYMBOL(contig_page_data);
110 #endif
111 
112 unsigned long max_low_pfn;
113 unsigned long min_low_pfn;
114 unsigned long max_pfn;
115 unsigned long long max_possible_pfn;
116 
117 #ifdef CONFIG_MEMBLOCK_KHO_SCRATCH
118 /* When set to true, only allocate from MEMBLOCK_KHO_SCRATCH ranges */
119 static bool kho_scratch_only;
120 #else
121 #define kho_scratch_only false
122 #endif
123 
124 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_MEMORY_REGIONS] __initdata_memblock;
125 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
126 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
127 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
128 #endif
129 
130 struct memblock memblock __initdata_memblock = {
131 	.memory.regions		= memblock_memory_init_regions,
132 	.memory.max		= INIT_MEMBLOCK_MEMORY_REGIONS,
133 	.memory.name		= "memory",
134 
135 	.reserved.regions	= memblock_reserved_init_regions,
136 	.reserved.max		= INIT_MEMBLOCK_RESERVED_REGIONS,
137 	.reserved.name		= "reserved",
138 
139 	.bottom_up		= false,
140 	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
141 };
142 
143 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
144 struct memblock_type physmem = {
145 	.regions		= memblock_physmem_init_regions,
146 	.max			= INIT_PHYSMEM_REGIONS,
147 	.name			= "physmem",
148 };
149 #endif
150 
151 /*
152  * keep a pointer to &memblock.memory in the text section to use it in
153  * __next_mem_range() and its helpers.
154  *  For architectures that do not keep memblock data after init, this
155  * pointer will be reset to NULL at memblock_discard()
156  */
157 static __refdata struct memblock_type *memblock_memory = &memblock.memory;
158 
159 #define for_each_memblock_type(i, memblock_type, rgn)			\
160 	for (i = 0, rgn = &memblock_type->regions[0];			\
161 	     i < memblock_type->cnt;					\
162 	     i++, rgn = &memblock_type->regions[i])
163 
164 #define memblock_dbg(fmt, ...)						\
165 	do {								\
166 		if (memblock_debug)					\
167 			pr_info(fmt, ##__VA_ARGS__);			\
168 	} while (0)
169 
170 static int memblock_debug __initdata_memblock;
171 static bool system_has_some_mirror __initdata_memblock;
172 static int memblock_can_resize __initdata_memblock;
173 static int memblock_memory_in_slab __initdata_memblock;
174 static int memblock_reserved_in_slab __initdata_memblock;
175 
176 bool __init_memblock memblock_has_mirror(void)
177 {
178 	return system_has_some_mirror;
179 }
180 
181 static enum memblock_flags __init_memblock choose_memblock_flags(void)
182 {
183 	/* skip non-scratch memory for kho early boot allocations */
184 	if (kho_scratch_only)
185 		return MEMBLOCK_KHO_SCRATCH;
186 
187 	return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
188 }
189 
190 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
191 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
192 {
193 	return *size = min(*size, PHYS_ADDR_MAX - base);
194 }
195 
196 /*
197  * Address comparison utilities
198  */
199 unsigned long __init_memblock
200 memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, phys_addr_t base2,
201 		       phys_addr_t size2)
202 {
203 	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
204 }
205 
206 bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
207 					phys_addr_t base, phys_addr_t size)
208 {
209 	unsigned long i;
210 
211 	memblock_cap_size(base, &size);
212 
213 	for (i = 0; i < type->cnt; i++)
214 		if (memblock_addrs_overlap(base, size, type->regions[i].base,
215 					   type->regions[i].size))
216 			return true;
217 	return false;
218 }
219 
220 /**
221  * __memblock_find_range_bottom_up - find free area utility in bottom-up
222  * @start: start of candidate range
223  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
224  *       %MEMBLOCK_ALLOC_ACCESSIBLE
225  * @size: size of free area to find
226  * @align: alignment of free area to find
227  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
228  * @flags: pick from blocks based on memory attributes
229  *
230  * Utility called from memblock_find_in_range_node(), find free area bottom-up.
231  *
232  * Return:
233  * Found address on success, 0 on failure.
234  */
235 static phys_addr_t __init_memblock
236 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
237 				phys_addr_t size, phys_addr_t align, int nid,
238 				enum memblock_flags flags)
239 {
240 	phys_addr_t this_start, this_end, cand;
241 	u64 i;
242 
243 	for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
244 		this_start = clamp(this_start, start, end);
245 		this_end = clamp(this_end, start, end);
246 
247 		cand = round_up(this_start, align);
248 		if (cand < this_end && this_end - cand >= size)
249 			return cand;
250 	}
251 
252 	return 0;
253 }
254 
255 /**
256  * __memblock_find_range_top_down - find free area utility, in top-down
257  * @start: start of candidate range
258  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
259  *       %MEMBLOCK_ALLOC_ACCESSIBLE
260  * @size: size of free area to find
261  * @align: alignment of free area to find
262  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
263  * @flags: pick from blocks based on memory attributes
264  *
265  * Utility called from memblock_find_in_range_node(), find free area top-down.
266  *
267  * Return:
268  * Found address on success, 0 on failure.
269  */
270 static phys_addr_t __init_memblock
271 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
272 			       phys_addr_t size, phys_addr_t align, int nid,
273 			       enum memblock_flags flags)
274 {
275 	phys_addr_t this_start, this_end, cand;
276 	u64 i;
277 
278 	for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
279 					NULL) {
280 		this_start = clamp(this_start, start, end);
281 		this_end = clamp(this_end, start, end);
282 
283 		if (this_end < size)
284 			continue;
285 
286 		cand = round_down(this_end - size, align);
287 		if (cand >= this_start)
288 			return cand;
289 	}
290 
291 	return 0;
292 }
293 
294 /**
295  * memblock_find_in_range_node - find free area in given range and node
296  * @size: size of free area to find
297  * @align: alignment of free area to find
298  * @start: start of candidate range
299  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
300  *       %MEMBLOCK_ALLOC_ACCESSIBLE
301  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
302  * @flags: pick from blocks based on memory attributes
303  *
304  * Find @size free area aligned to @align in the specified range and node.
305  *
306  * Return:
307  * Found address on success, 0 on failure.
308  */
309 static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
310 					phys_addr_t align, phys_addr_t start,
311 					phys_addr_t end, int nid,
312 					enum memblock_flags flags)
313 {
314 	/* pump up @end */
315 	if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
316 	    end == MEMBLOCK_ALLOC_NOLEAKTRACE)
317 		end = memblock.current_limit;
318 
319 	/* avoid allocating the first page */
320 	start = max_t(phys_addr_t, start, PAGE_SIZE);
321 	end = max(start, end);
322 
323 	if (memblock_bottom_up())
324 		return __memblock_find_range_bottom_up(start, end, size, align,
325 						       nid, flags);
326 	else
327 		return __memblock_find_range_top_down(start, end, size, align,
328 						      nid, flags);
329 }
330 
331 /**
332  * memblock_find_in_range - find free area in given range
333  * @start: start of candidate range
334  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
335  *       %MEMBLOCK_ALLOC_ACCESSIBLE
336  * @size: size of free area to find
337  * @align: alignment of free area to find
338  *
339  * Find @size free area aligned to @align in the specified range.
340  *
341  * Return:
342  * Found address on success, 0 on failure.
343  */
344 static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
345 					phys_addr_t end, phys_addr_t size,
346 					phys_addr_t align)
347 {
348 	phys_addr_t ret;
349 	enum memblock_flags flags = choose_memblock_flags();
350 
351 again:
352 	ret = memblock_find_in_range_node(size, align, start, end,
353 					    NUMA_NO_NODE, flags);
354 
355 	if (!ret && (flags & MEMBLOCK_MIRROR)) {
356 		pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
357 			&size);
358 		flags &= ~MEMBLOCK_MIRROR;
359 		goto again;
360 	}
361 
362 	return ret;
363 }
364 
365 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
366 {
367 	type->total_size -= type->regions[r].size;
368 	memmove(&type->regions[r], &type->regions[r + 1],
369 		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
370 	type->cnt--;
371 
372 	/* Special case for empty arrays */
373 	if (type->cnt == 0) {
374 		WARN_ON(type->total_size != 0);
375 		type->regions[0].base = 0;
376 		type->regions[0].size = 0;
377 		type->regions[0].flags = 0;
378 		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
379 	}
380 }
381 
382 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
383 /**
384  * memblock_discard - discard memory and reserved arrays if they were allocated
385  */
386 void __init memblock_discard(void)
387 {
388 	phys_addr_t addr, size;
389 
390 	if (memblock.reserved.regions != memblock_reserved_init_regions) {
391 		addr = __pa(memblock.reserved.regions);
392 		size = PAGE_ALIGN(sizeof(struct memblock_region) *
393 				  memblock.reserved.max);
394 		if (memblock_reserved_in_slab)
395 			kfree(memblock.reserved.regions);
396 		else
397 			memblock_free_late(addr, size);
398 	}
399 
400 	if (memblock.memory.regions != memblock_memory_init_regions) {
401 		addr = __pa(memblock.memory.regions);
402 		size = PAGE_ALIGN(sizeof(struct memblock_region) *
403 				  memblock.memory.max);
404 		if (memblock_memory_in_slab)
405 			kfree(memblock.memory.regions);
406 		else
407 			memblock_free_late(addr, size);
408 	}
409 
410 	memblock_memory = NULL;
411 }
412 #endif
413 
414 /**
415  * memblock_double_array - double the size of the memblock regions array
416  * @type: memblock type of the regions array being doubled
417  * @new_area_start: starting address of memory range to avoid overlap with
418  * @new_area_size: size of memory range to avoid overlap with
419  *
420  * Double the size of the @type regions array. If memblock is being used to
421  * allocate memory for a new reserved regions array and there is a previously
422  * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
423  * waiting to be reserved, ensure the memory used by the new array does
424  * not overlap.
425  *
426  * Return:
427  * 0 on success, -1 on failure.
428  */
429 static int __init_memblock memblock_double_array(struct memblock_type *type,
430 						phys_addr_t new_area_start,
431 						phys_addr_t new_area_size)
432 {
433 	struct memblock_region *new_array, *old_array;
434 	phys_addr_t old_alloc_size, new_alloc_size;
435 	phys_addr_t old_size, new_size, addr, new_end;
436 	int use_slab = slab_is_available();
437 	int *in_slab;
438 
439 	/* We don't allow resizing until we know about the reserved regions
440 	 * of memory that aren't suitable for allocation
441 	 */
442 	if (!memblock_can_resize)
443 		panic("memblock: cannot resize %s array\n", type->name);
444 
445 	/* Calculate new doubled size */
446 	old_size = type->max * sizeof(struct memblock_region);
447 	new_size = old_size << 1;
448 	/*
449 	 * We need to allocated new one align to PAGE_SIZE,
450 	 *   so we can free them completely later.
451 	 */
452 	old_alloc_size = PAGE_ALIGN(old_size);
453 	new_alloc_size = PAGE_ALIGN(new_size);
454 
455 	/* Retrieve the slab flag */
456 	if (type == &memblock.memory)
457 		in_slab = &memblock_memory_in_slab;
458 	else
459 		in_slab = &memblock_reserved_in_slab;
460 
461 	/* Try to find some space for it */
462 	if (use_slab) {
463 		new_array = kmalloc(new_size, GFP_KERNEL);
464 		addr = new_array ? __pa(new_array) : 0;
465 	} else {
466 		/* only exclude range when trying to double reserved.regions */
467 		if (type != &memblock.reserved)
468 			new_area_start = new_area_size = 0;
469 
470 		addr = memblock_find_in_range(new_area_start + new_area_size,
471 						memblock.current_limit,
472 						new_alloc_size, PAGE_SIZE);
473 		if (!addr && new_area_size)
474 			addr = memblock_find_in_range(0,
475 				min(new_area_start, memblock.current_limit),
476 				new_alloc_size, PAGE_SIZE);
477 
478 		if (addr) {
479 			/* The memory may not have been accepted, yet. */
480 			accept_memory(addr, new_alloc_size);
481 
482 			new_array = __va(addr);
483 		} else {
484 			new_array = NULL;
485 		}
486 	}
487 	if (!addr) {
488 		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
489 		       type->name, type->max, type->max * 2);
490 		return -1;
491 	}
492 
493 	new_end = addr + new_size - 1;
494 	memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
495 			type->name, type->max * 2, &addr, &new_end);
496 
497 	/*
498 	 * Found space, we now need to move the array over before we add the
499 	 * reserved region since it may be our reserved array itself that is
500 	 * full.
501 	 */
502 	memcpy(new_array, type->regions, old_size);
503 	memset(new_array + type->max, 0, old_size);
504 	old_array = type->regions;
505 	type->regions = new_array;
506 	type->max <<= 1;
507 
508 	/* Free old array. We needn't free it if the array is the static one */
509 	if (*in_slab)
510 		kfree(old_array);
511 	else if (old_array != memblock_memory_init_regions &&
512 		 old_array != memblock_reserved_init_regions)
513 		memblock_free(old_array, old_alloc_size);
514 
515 	/*
516 	 * Reserve the new array if that comes from the memblock.  Otherwise, we
517 	 * needn't do it
518 	 */
519 	if (!use_slab)
520 		BUG_ON(memblock_reserve_kern(addr, new_alloc_size));
521 
522 	/* Update slab flag */
523 	*in_slab = use_slab;
524 
525 	return 0;
526 }
527 
528 /**
529  * memblock_merge_regions - merge neighboring compatible regions
530  * @type: memblock type to scan
531  * @start_rgn: start scanning from (@start_rgn - 1)
532  * @end_rgn: end scanning at (@end_rgn - 1)
533  * Scan @type and merge neighboring compatible regions in [@start_rgn - 1, @end_rgn)
534  */
535 static void __init_memblock memblock_merge_regions(struct memblock_type *type,
536 						   unsigned long start_rgn,
537 						   unsigned long end_rgn)
538 {
539 	int i = 0;
540 	if (start_rgn)
541 		i = start_rgn - 1;
542 	end_rgn = min(end_rgn, type->cnt - 1);
543 	while (i < end_rgn) {
544 		struct memblock_region *this = &type->regions[i];
545 		struct memblock_region *next = &type->regions[i + 1];
546 
547 		if (this->base + this->size != next->base ||
548 		    memblock_get_region_node(this) !=
549 		    memblock_get_region_node(next) ||
550 		    this->flags != next->flags) {
551 			BUG_ON(this->base + this->size > next->base);
552 			i++;
553 			continue;
554 		}
555 
556 		this->size += next->size;
557 		/* move forward from next + 1, index of which is i + 2 */
558 		memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
559 		type->cnt--;
560 		end_rgn--;
561 	}
562 }
563 
564 /**
565  * memblock_insert_region - insert new memblock region
566  * @type:	memblock type to insert into
567  * @idx:	index for the insertion point
568  * @base:	base address of the new region
569  * @size:	size of the new region
570  * @nid:	node id of the new region
571  * @flags:	flags of the new region
572  *
573  * Insert new memblock region [@base, @base + @size) into @type at @idx.
574  * @type must already have extra room to accommodate the new region.
575  */
576 static void __init_memblock memblock_insert_region(struct memblock_type *type,
577 						   int idx, phys_addr_t base,
578 						   phys_addr_t size,
579 						   int nid,
580 						   enum memblock_flags flags)
581 {
582 	struct memblock_region *rgn = &type->regions[idx];
583 
584 	BUG_ON(type->cnt >= type->max);
585 	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
586 	rgn->base = base;
587 	rgn->size = size;
588 	rgn->flags = flags;
589 	memblock_set_region_node(rgn, nid);
590 	type->cnt++;
591 	type->total_size += size;
592 }
593 
594 /**
595  * memblock_add_range - add new memblock region
596  * @type: memblock type to add new region into
597  * @base: base address of the new region
598  * @size: size of the new region
599  * @nid: nid of the new region
600  * @flags: flags of the new region
601  *
602  * Add new memblock region [@base, @base + @size) into @type.  The new region
603  * is allowed to overlap with existing ones - overlaps don't affect already
604  * existing regions.  @type is guaranteed to be minimal (all neighbouring
605  * compatible regions are merged) after the addition.
606  *
607  * Return:
608  * 0 on success, -errno on failure.
609  */
610 static int __init_memblock memblock_add_range(struct memblock_type *type,
611 				phys_addr_t base, phys_addr_t size,
612 				int nid, enum memblock_flags flags)
613 {
614 	bool insert = false;
615 	phys_addr_t obase = base;
616 	phys_addr_t end = base + memblock_cap_size(base, &size);
617 	int idx, nr_new, start_rgn = -1, end_rgn;
618 	struct memblock_region *rgn;
619 
620 	if (!size)
621 		return 0;
622 
623 	/* special case for empty array */
624 	if (type->regions[0].size == 0) {
625 		WARN_ON(type->cnt != 0 || type->total_size);
626 		type->regions[0].base = base;
627 		type->regions[0].size = size;
628 		type->regions[0].flags = flags;
629 		memblock_set_region_node(&type->regions[0], nid);
630 		type->total_size = size;
631 		type->cnt = 1;
632 		return 0;
633 	}
634 
635 	/*
636 	 * The worst case is when new range overlaps all existing regions,
637 	 * then we'll need type->cnt + 1 empty regions in @type. So if
638 	 * type->cnt * 2 + 1 is less than or equal to type->max, we know
639 	 * that there is enough empty regions in @type, and we can insert
640 	 * regions directly.
641 	 */
642 	if (type->cnt * 2 + 1 <= type->max)
643 		insert = true;
644 
645 repeat:
646 	/*
647 	 * The following is executed twice.  Once with %false @insert and
648 	 * then with %true.  The first counts the number of regions needed
649 	 * to accommodate the new area.  The second actually inserts them.
650 	 */
651 	base = obase;
652 	nr_new = 0;
653 
654 	for_each_memblock_type(idx, type, rgn) {
655 		phys_addr_t rbase = rgn->base;
656 		phys_addr_t rend = rbase + rgn->size;
657 
658 		if (rbase >= end)
659 			break;
660 		if (rend <= base)
661 			continue;
662 		/*
663 		 * @rgn overlaps.  If it separates the lower part of new
664 		 * area, insert that portion.
665 		 */
666 		if (rbase > base) {
667 #ifdef CONFIG_NUMA
668 			WARN_ON(nid != memblock_get_region_node(rgn));
669 #endif
670 			WARN_ON(flags != MEMBLOCK_NONE && flags != rgn->flags);
671 			nr_new++;
672 			if (insert) {
673 				if (start_rgn == -1)
674 					start_rgn = idx;
675 				end_rgn = idx + 1;
676 				memblock_insert_region(type, idx++, base,
677 						       rbase - base, nid,
678 						       flags);
679 			}
680 		}
681 		/* area below @rend is dealt with, forget about it */
682 		base = min(rend, end);
683 	}
684 
685 	/* insert the remaining portion */
686 	if (base < end) {
687 		nr_new++;
688 		if (insert) {
689 			if (start_rgn == -1)
690 				start_rgn = idx;
691 			end_rgn = idx + 1;
692 			memblock_insert_region(type, idx, base, end - base,
693 					       nid, flags);
694 		}
695 	}
696 
697 	if (!nr_new)
698 		return 0;
699 
700 	/*
701 	 * If this was the first round, resize array and repeat for actual
702 	 * insertions; otherwise, merge and return.
703 	 */
704 	if (!insert) {
705 		while (type->cnt + nr_new > type->max)
706 			if (memblock_double_array(type, obase, size) < 0)
707 				return -ENOMEM;
708 		insert = true;
709 		goto repeat;
710 	} else {
711 		memblock_merge_regions(type, start_rgn, end_rgn);
712 		return 0;
713 	}
714 }
715 
716 /**
717  * memblock_add_node - add new memblock region within a NUMA node
718  * @base: base address of the new region
719  * @size: size of the new region
720  * @nid: nid of the new region
721  * @flags: flags of the new region
722  *
723  * Add new memblock region [@base, @base + @size) to the "memory"
724  * type. See memblock_add_range() description for mode details
725  *
726  * Return:
727  * 0 on success, -errno on failure.
728  */
729 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
730 				      int nid, enum memblock_flags flags)
731 {
732 	phys_addr_t end = base + size - 1;
733 
734 	memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__,
735 		     &base, &end, nid, flags, (void *)_RET_IP_);
736 
737 	return memblock_add_range(&memblock.memory, base, size, nid, flags);
738 }
739 
740 /**
741  * memblock_add - add new memblock region
742  * @base: base address of the new region
743  * @size: size of the new region
744  *
745  * Add new memblock region [@base, @base + @size) to the "memory"
746  * type. See memblock_add_range() description for mode details
747  *
748  * Return:
749  * 0 on success, -errno on failure.
750  */
751 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
752 {
753 	phys_addr_t end = base + size - 1;
754 
755 	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
756 		     &base, &end, (void *)_RET_IP_);
757 
758 	return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
759 }
760 
761 /**
762  * memblock_validate_numa_coverage - check if amount of memory with
763  * no node ID assigned is less than a threshold
764  * @threshold_bytes: maximal memory size that can have unassigned node
765  * ID (in bytes).
766  *
767  * A buggy firmware may report memory that does not belong to any node.
768  * Check if amount of such memory is below @threshold_bytes.
769  *
770  * Return: true on success, false on failure.
771  */
772 bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_bytes)
773 {
774 	unsigned long nr_pages = 0;
775 	unsigned long start_pfn, end_pfn, mem_size_mb;
776 	int nid, i;
777 
778 	/* calculate lost page */
779 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
780 		if (!numa_valid_node(nid))
781 			nr_pages += end_pfn - start_pfn;
782 	}
783 
784 	if ((nr_pages << PAGE_SHIFT) > threshold_bytes) {
785 		mem_size_mb = memblock_phys_mem_size() / SZ_1M;
786 		pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n",
787 		       (nr_pages << PAGE_SHIFT) / SZ_1M, mem_size_mb);
788 		return false;
789 	}
790 
791 	return true;
792 }
793 
794 
795 /**
796  * memblock_isolate_range - isolate given range into disjoint memblocks
797  * @type: memblock type to isolate range for
798  * @base: base of range to isolate
799  * @size: size of range to isolate
800  * @start_rgn: out parameter for the start of isolated region
801  * @end_rgn: out parameter for the end of isolated region
802  *
803  * Walk @type and ensure that regions don't cross the boundaries defined by
804  * [@base, @base + @size).  Crossing regions are split at the boundaries,
805  * which may create at most two more regions.  The index of the first
806  * region inside the range is returned in *@start_rgn and the index of the
807  * first region after the range is returned in *@end_rgn.
808  *
809  * Return:
810  * 0 on success, -errno on failure.
811  */
812 static int __init_memblock memblock_isolate_range(struct memblock_type *type,
813 					phys_addr_t base, phys_addr_t size,
814 					int *start_rgn, int *end_rgn)
815 {
816 	phys_addr_t end = base + memblock_cap_size(base, &size);
817 	int idx;
818 	struct memblock_region *rgn;
819 
820 	*start_rgn = *end_rgn = 0;
821 
822 	if (!size)
823 		return 0;
824 
825 	/* we'll create at most two more regions */
826 	while (type->cnt + 2 > type->max)
827 		if (memblock_double_array(type, base, size) < 0)
828 			return -ENOMEM;
829 
830 	for_each_memblock_type(idx, type, rgn) {
831 		phys_addr_t rbase = rgn->base;
832 		phys_addr_t rend = rbase + rgn->size;
833 
834 		if (rbase >= end)
835 			break;
836 		if (rend <= base)
837 			continue;
838 
839 		if (rbase < base) {
840 			/*
841 			 * @rgn intersects from below.  Split and continue
842 			 * to process the next region - the new top half.
843 			 */
844 			rgn->base = base;
845 			rgn->size -= base - rbase;
846 			type->total_size -= base - rbase;
847 			memblock_insert_region(type, idx, rbase, base - rbase,
848 					       memblock_get_region_node(rgn),
849 					       rgn->flags);
850 		} else if (rend > end) {
851 			/*
852 			 * @rgn intersects from above.  Split and redo the
853 			 * current region - the new bottom half.
854 			 */
855 			rgn->base = end;
856 			rgn->size -= end - rbase;
857 			type->total_size -= end - rbase;
858 			memblock_insert_region(type, idx--, rbase, end - rbase,
859 					       memblock_get_region_node(rgn),
860 					       rgn->flags);
861 		} else {
862 			/* @rgn is fully contained, record it */
863 			if (!*end_rgn)
864 				*start_rgn = idx;
865 			*end_rgn = idx + 1;
866 		}
867 	}
868 
869 	return 0;
870 }
871 
872 static int __init_memblock memblock_remove_range(struct memblock_type *type,
873 					  phys_addr_t base, phys_addr_t size)
874 {
875 	int start_rgn, end_rgn;
876 	int i, ret;
877 
878 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
879 	if (ret)
880 		return ret;
881 
882 	for (i = end_rgn - 1; i >= start_rgn; i--)
883 		memblock_remove_region(type, i);
884 	return 0;
885 }
886 
887 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
888 {
889 	phys_addr_t end = base + size - 1;
890 
891 	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
892 		     &base, &end, (void *)_RET_IP_);
893 
894 	return memblock_remove_range(&memblock.memory, base, size);
895 }
896 
897 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
898 {
899 	void *pos;
900 	unsigned long pages = 0;
901 
902 	start = (void *)PAGE_ALIGN((unsigned long)start);
903 	end = (void *)((unsigned long)end & PAGE_MASK);
904 	for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
905 		struct page *page = virt_to_page(pos);
906 		void *direct_map_addr;
907 
908 		/*
909 		 * 'direct_map_addr' might be different from 'pos'
910 		 * because some architectures' virt_to_page()
911 		 * work with aliases.  Getting the direct map
912 		 * address ensures that we get a _writeable_
913 		 * alias for the memset().
914 		 */
915 		direct_map_addr = page_address(page);
916 		/*
917 		 * Perform a kasan-unchecked memset() since this memory
918 		 * has not been initialized.
919 		 */
920 		direct_map_addr = kasan_reset_tag(direct_map_addr);
921 		if ((unsigned int)poison <= 0xFF)
922 			memset(direct_map_addr, poison, PAGE_SIZE);
923 
924 		free_reserved_page(page);
925 	}
926 
927 	if (pages && s)
928 		pr_info("Freeing %s memory: %ldK\n", s, K(pages));
929 
930 	return pages;
931 }
932 
933 /**
934  * memblock_free - free boot memory allocation
935  * @ptr: starting address of the  boot memory allocation
936  * @size: size of the boot memory block in bytes
937  *
938  * Free boot memory block previously allocated by memblock_alloc_xx() API.
939  * The freeing memory will not be released to the buddy allocator.
940  */
941 void __init_memblock memblock_free(void *ptr, size_t size)
942 {
943 	if (ptr)
944 		memblock_phys_free(__pa(ptr), size);
945 }
946 
947 /**
948  * memblock_phys_free - free boot memory block
949  * @base: phys starting address of the  boot memory block
950  * @size: size of the boot memory block in bytes
951  *
952  * Free boot memory block previously allocated by memblock_phys_alloc_xx() API.
953  * The freeing memory will not be released to the buddy allocator.
954  */
955 int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size)
956 {
957 	phys_addr_t end = base + size - 1;
958 
959 	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
960 		     &base, &end, (void *)_RET_IP_);
961 
962 	kmemleak_free_part_phys(base, size);
963 	return memblock_remove_range(&memblock.reserved, base, size);
964 }
965 
966 int __init_memblock __memblock_reserve(phys_addr_t base, phys_addr_t size,
967 				       int nid, enum memblock_flags flags)
968 {
969 	phys_addr_t end = base + size - 1;
970 
971 	memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__,
972 		     &base, &end, nid, flags, (void *)_RET_IP_);
973 
974 	return memblock_add_range(&memblock.reserved, base, size, nid, flags);
975 }
976 
977 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
978 int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
979 {
980 	phys_addr_t end = base + size - 1;
981 
982 	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
983 		     &base, &end, (void *)_RET_IP_);
984 
985 	return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
986 }
987 #endif
988 
989 #ifdef CONFIG_MEMBLOCK_KHO_SCRATCH
990 __init void memblock_set_kho_scratch_only(void)
991 {
992 	kho_scratch_only = true;
993 }
994 
995 __init void memblock_clear_kho_scratch_only(void)
996 {
997 	kho_scratch_only = false;
998 }
999 
1000 __init void memmap_init_kho_scratch_pages(void)
1001 {
1002 	phys_addr_t start, end;
1003 	unsigned long pfn;
1004 	int nid;
1005 	u64 i;
1006 
1007 	if (!IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT))
1008 		return;
1009 
1010 	/*
1011 	 * Initialize struct pages for free scratch memory.
1012 	 * The struct pages for reserved scratch memory will be set up in
1013 	 * memmap_init_reserved_pages()
1014 	 */
1015 	__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
1016 			     MEMBLOCK_KHO_SCRATCH, &start, &end, &nid) {
1017 		for (pfn = PFN_UP(start); pfn < PFN_DOWN(end); pfn++)
1018 			init_deferred_page(pfn, nid);
1019 	}
1020 }
1021 #endif
1022 
1023 /**
1024  * memblock_setclr_flag - set or clear flag for a memory region
1025  * @type: memblock type to set/clear flag for
1026  * @base: base address of the region
1027  * @size: size of the region
1028  * @set: set or clear the flag
1029  * @flag: the flag to update
1030  *
1031  * This function isolates region [@base, @base + @size), and sets/clears flag
1032  *
1033  * Return: 0 on success, -errno on failure.
1034  */
1035 static int __init_memblock memblock_setclr_flag(struct memblock_type *type,
1036 				phys_addr_t base, phys_addr_t size, int set, int flag)
1037 {
1038 	int i, ret, start_rgn, end_rgn;
1039 
1040 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1041 	if (ret)
1042 		return ret;
1043 
1044 	for (i = start_rgn; i < end_rgn; i++) {
1045 		struct memblock_region *r = &type->regions[i];
1046 
1047 		if (set)
1048 			r->flags |= flag;
1049 		else
1050 			r->flags &= ~flag;
1051 	}
1052 
1053 	memblock_merge_regions(type, start_rgn, end_rgn);
1054 	return 0;
1055 }
1056 
1057 /**
1058  * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
1059  * @base: the base phys addr of the region
1060  * @size: the size of the region
1061  *
1062  * Return: 0 on success, -errno on failure.
1063  */
1064 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
1065 {
1066 	return memblock_setclr_flag(&memblock.memory, base, size, 1, MEMBLOCK_HOTPLUG);
1067 }
1068 
1069 /**
1070  * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
1071  * @base: the base phys addr of the region
1072  * @size: the size of the region
1073  *
1074  * Return: 0 on success, -errno on failure.
1075  */
1076 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
1077 {
1078 	return memblock_setclr_flag(&memblock.memory, base, size, 0, MEMBLOCK_HOTPLUG);
1079 }
1080 
1081 /**
1082  * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
1083  * @base: the base phys addr of the region
1084  * @size: the size of the region
1085  *
1086  * Return: 0 on success, -errno on failure.
1087  */
1088 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
1089 {
1090 	if (!mirrored_kernelcore)
1091 		return 0;
1092 
1093 	system_has_some_mirror = true;
1094 
1095 	return memblock_setclr_flag(&memblock.memory, base, size, 1, MEMBLOCK_MIRROR);
1096 }
1097 
1098 /**
1099  * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
1100  * @base: the base phys addr of the region
1101  * @size: the size of the region
1102  *
1103  * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the
1104  * direct mapping of the physical memory. These regions will still be
1105  * covered by the memory map. The struct page representing NOMAP memory
1106  * frames in the memory map will be PageReserved()
1107  *
1108  * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from
1109  * memblock, the caller must inform kmemleak to ignore that memory
1110  *
1111  * Return: 0 on success, -errno on failure.
1112  */
1113 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
1114 {
1115 	return memblock_setclr_flag(&memblock.memory, base, size, 1, MEMBLOCK_NOMAP);
1116 }
1117 
1118 /**
1119  * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
1120  * @base: the base phys addr of the region
1121  * @size: the size of the region
1122  *
1123  * Return: 0 on success, -errno on failure.
1124  */
1125 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
1126 {
1127 	return memblock_setclr_flag(&memblock.memory, base, size, 0, MEMBLOCK_NOMAP);
1128 }
1129 
1130 /**
1131  * memblock_reserved_mark_noinit - Mark a reserved memory region with flag
1132  * MEMBLOCK_RSRV_NOINIT
1133  *
1134  * @base: the base phys addr of the region
1135  * @size: the size of the region
1136  *
1137  * The struct pages for the reserved regions marked %MEMBLOCK_RSRV_NOINIT will
1138  * not be fully initialized to allow the caller optimize their initialization.
1139  *
1140  * When %CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled, setting this flag
1141  * completely bypasses the initialization of struct pages for such region.
1142  *
1143  * When %CONFIG_DEFERRED_STRUCT_PAGE_INIT is disabled, struct pages in this
1144  * region will be initialized with default values but won't be marked as
1145  * reserved.
1146  *
1147  * Return: 0 on success, -errno on failure.
1148  */
1149 int __init_memblock memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size)
1150 {
1151 	return memblock_setclr_flag(&memblock.reserved, base, size, 1,
1152 				    MEMBLOCK_RSRV_NOINIT);
1153 }
1154 
1155 /**
1156  * memblock_mark_kho_scratch - Mark a memory region as MEMBLOCK_KHO_SCRATCH.
1157  * @base: the base phys addr of the region
1158  * @size: the size of the region
1159  *
1160  * Only memory regions marked with %MEMBLOCK_KHO_SCRATCH will be considered
1161  * for allocations during early boot with kexec handover.
1162  *
1163  * Return: 0 on success, -errno on failure.
1164  */
1165 __init int memblock_mark_kho_scratch(phys_addr_t base, phys_addr_t size)
1166 {
1167 	return memblock_setclr_flag(&memblock.memory, base, size, 1,
1168 				    MEMBLOCK_KHO_SCRATCH);
1169 }
1170 
1171 /**
1172  * memblock_clear_kho_scratch - Clear MEMBLOCK_KHO_SCRATCH flag for a
1173  * specified region.
1174  * @base: the base phys addr of the region
1175  * @size: the size of the region
1176  *
1177  * Return: 0 on success, -errno on failure.
1178  */
1179 __init int memblock_clear_kho_scratch(phys_addr_t base, phys_addr_t size)
1180 {
1181 	return memblock_setclr_flag(&memblock.memory, base, size, 0,
1182 				    MEMBLOCK_KHO_SCRATCH);
1183 }
1184 
1185 static bool should_skip_region(struct memblock_type *type,
1186 			       struct memblock_region *m,
1187 			       int nid, int flags)
1188 {
1189 	int m_nid = memblock_get_region_node(m);
1190 
1191 	/* we never skip regions when iterating memblock.reserved or physmem */
1192 	if (type != memblock_memory)
1193 		return false;
1194 
1195 	/* only memory regions are associated with nodes, check it */
1196 	if (numa_valid_node(nid) && nid != m_nid)
1197 		return true;
1198 
1199 	/* skip hotpluggable memory regions if needed */
1200 	if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
1201 	    !(flags & MEMBLOCK_HOTPLUG))
1202 		return true;
1203 
1204 	/* if we want mirror memory skip non-mirror memory regions */
1205 	if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1206 		return true;
1207 
1208 	/* skip nomap memory unless we were asked for it explicitly */
1209 	if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1210 		return true;
1211 
1212 	/* skip driver-managed memory unless we were asked for it explicitly */
1213 	if (!(flags & MEMBLOCK_DRIVER_MANAGED) && memblock_is_driver_managed(m))
1214 		return true;
1215 
1216 	/*
1217 	 * In early alloc during kexec handover, we can only consider
1218 	 * MEMBLOCK_KHO_SCRATCH regions for the allocations
1219 	 */
1220 	if ((flags & MEMBLOCK_KHO_SCRATCH) && !memblock_is_kho_scratch(m))
1221 		return true;
1222 
1223 	return false;
1224 }
1225 
1226 /**
1227  * __next_mem_range - next function for for_each_free_mem_range() etc.
1228  * @idx: pointer to u64 loop variable
1229  * @nid: node selector, %NUMA_NO_NODE for all nodes
1230  * @flags: pick from blocks based on memory attributes
1231  * @type_a: pointer to memblock_type from where the range is taken
1232  * @type_b: pointer to memblock_type which excludes memory from being taken
1233  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1234  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1235  * @out_nid: ptr to int for nid of the range, can be %NULL
1236  *
1237  * Find the first area from *@idx which matches @nid, fill the out
1238  * parameters, and update *@idx for the next iteration.  The lower 32bit of
1239  * *@idx contains index into type_a and the upper 32bit indexes the
1240  * areas before each region in type_b.	For example, if type_b regions
1241  * look like the following,
1242  *
1243  *	0:[0-16), 1:[32-48), 2:[128-130)
1244  *
1245  * The upper 32bit indexes the following regions.
1246  *
1247  *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
1248  *
1249  * As both region arrays are sorted, the function advances the two indices
1250  * in lockstep and returns each intersection.
1251  */
1252 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
1253 		      struct memblock_type *type_a,
1254 		      struct memblock_type *type_b, phys_addr_t *out_start,
1255 		      phys_addr_t *out_end, int *out_nid)
1256 {
1257 	int idx_a = *idx & 0xffffffff;
1258 	int idx_b = *idx >> 32;
1259 
1260 	for (; idx_a < type_a->cnt; idx_a++) {
1261 		struct memblock_region *m = &type_a->regions[idx_a];
1262 
1263 		phys_addr_t m_start = m->base;
1264 		phys_addr_t m_end = m->base + m->size;
1265 		int	    m_nid = memblock_get_region_node(m);
1266 
1267 		if (should_skip_region(type_a, m, nid, flags))
1268 			continue;
1269 
1270 		if (!type_b) {
1271 			if (out_start)
1272 				*out_start = m_start;
1273 			if (out_end)
1274 				*out_end = m_end;
1275 			if (out_nid)
1276 				*out_nid = m_nid;
1277 			idx_a++;
1278 			*idx = (u32)idx_a | (u64)idx_b << 32;
1279 			return;
1280 		}
1281 
1282 		/* scan areas before each reservation */
1283 		for (; idx_b < type_b->cnt + 1; idx_b++) {
1284 			struct memblock_region *r;
1285 			phys_addr_t r_start;
1286 			phys_addr_t r_end;
1287 
1288 			r = &type_b->regions[idx_b];
1289 			r_start = idx_b ? r[-1].base + r[-1].size : 0;
1290 			r_end = idx_b < type_b->cnt ?
1291 				r->base : PHYS_ADDR_MAX;
1292 
1293 			/*
1294 			 * if idx_b advanced past idx_a,
1295 			 * break out to advance idx_a
1296 			 */
1297 			if (r_start >= m_end)
1298 				break;
1299 			/* if the two regions intersect, we're done */
1300 			if (m_start < r_end) {
1301 				if (out_start)
1302 					*out_start =
1303 						max(m_start, r_start);
1304 				if (out_end)
1305 					*out_end = min(m_end, r_end);
1306 				if (out_nid)
1307 					*out_nid = m_nid;
1308 				/*
1309 				 * The region which ends first is
1310 				 * advanced for the next iteration.
1311 				 */
1312 				if (m_end <= r_end)
1313 					idx_a++;
1314 				else
1315 					idx_b++;
1316 				*idx = (u32)idx_a | (u64)idx_b << 32;
1317 				return;
1318 			}
1319 		}
1320 	}
1321 
1322 	/* signal end of iteration */
1323 	*idx = ULLONG_MAX;
1324 }
1325 
1326 /**
1327  * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1328  *
1329  * @idx: pointer to u64 loop variable
1330  * @nid: node selector, %NUMA_NO_NODE for all nodes
1331  * @flags: pick from blocks based on memory attributes
1332  * @type_a: pointer to memblock_type from where the range is taken
1333  * @type_b: pointer to memblock_type which excludes memory from being taken
1334  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1335  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1336  * @out_nid: ptr to int for nid of the range, can be %NULL
1337  *
1338  * Finds the next range from type_a which is not marked as unsuitable
1339  * in type_b.
1340  *
1341  * Reverse of __next_mem_range().
1342  */
1343 void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1344 					  enum memblock_flags flags,
1345 					  struct memblock_type *type_a,
1346 					  struct memblock_type *type_b,
1347 					  phys_addr_t *out_start,
1348 					  phys_addr_t *out_end, int *out_nid)
1349 {
1350 	int idx_a = *idx & 0xffffffff;
1351 	int idx_b = *idx >> 32;
1352 
1353 	if (*idx == (u64)ULLONG_MAX) {
1354 		idx_a = type_a->cnt - 1;
1355 		if (type_b != NULL)
1356 			idx_b = type_b->cnt;
1357 		else
1358 			idx_b = 0;
1359 	}
1360 
1361 	for (; idx_a >= 0; idx_a--) {
1362 		struct memblock_region *m = &type_a->regions[idx_a];
1363 
1364 		phys_addr_t m_start = m->base;
1365 		phys_addr_t m_end = m->base + m->size;
1366 		int m_nid = memblock_get_region_node(m);
1367 
1368 		if (should_skip_region(type_a, m, nid, flags))
1369 			continue;
1370 
1371 		if (!type_b) {
1372 			if (out_start)
1373 				*out_start = m_start;
1374 			if (out_end)
1375 				*out_end = m_end;
1376 			if (out_nid)
1377 				*out_nid = m_nid;
1378 			idx_a--;
1379 			*idx = (u32)idx_a | (u64)idx_b << 32;
1380 			return;
1381 		}
1382 
1383 		/* scan areas before each reservation */
1384 		for (; idx_b >= 0; idx_b--) {
1385 			struct memblock_region *r;
1386 			phys_addr_t r_start;
1387 			phys_addr_t r_end;
1388 
1389 			r = &type_b->regions[idx_b];
1390 			r_start = idx_b ? r[-1].base + r[-1].size : 0;
1391 			r_end = idx_b < type_b->cnt ?
1392 				r->base : PHYS_ADDR_MAX;
1393 			/*
1394 			 * if idx_b advanced past idx_a,
1395 			 * break out to advance idx_a
1396 			 */
1397 
1398 			if (r_end <= m_start)
1399 				break;
1400 			/* if the two regions intersect, we're done */
1401 			if (m_end > r_start) {
1402 				if (out_start)
1403 					*out_start = max(m_start, r_start);
1404 				if (out_end)
1405 					*out_end = min(m_end, r_end);
1406 				if (out_nid)
1407 					*out_nid = m_nid;
1408 				if (m_start >= r_start)
1409 					idx_a--;
1410 				else
1411 					idx_b--;
1412 				*idx = (u32)idx_a | (u64)idx_b << 32;
1413 				return;
1414 			}
1415 		}
1416 	}
1417 	/* signal end of iteration */
1418 	*idx = ULLONG_MAX;
1419 }
1420 
1421 /*
1422  * Common iterator interface used to define for_each_mem_pfn_range().
1423  */
1424 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1425 				unsigned long *out_start_pfn,
1426 				unsigned long *out_end_pfn, int *out_nid)
1427 {
1428 	struct memblock_type *type = &memblock.memory;
1429 	struct memblock_region *r;
1430 	int r_nid;
1431 
1432 	while (++*idx < type->cnt) {
1433 		r = &type->regions[*idx];
1434 		r_nid = memblock_get_region_node(r);
1435 
1436 		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1437 			continue;
1438 		if (!numa_valid_node(nid) || nid == r_nid)
1439 			break;
1440 	}
1441 	if (*idx >= type->cnt) {
1442 		*idx = -1;
1443 		return;
1444 	}
1445 
1446 	if (out_start_pfn)
1447 		*out_start_pfn = PFN_UP(r->base);
1448 	if (out_end_pfn)
1449 		*out_end_pfn = PFN_DOWN(r->base + r->size);
1450 	if (out_nid)
1451 		*out_nid = r_nid;
1452 }
1453 
1454 /**
1455  * memblock_set_node - set node ID on memblock regions
1456  * @base: base of area to set node ID for
1457  * @size: size of area to set node ID for
1458  * @type: memblock type to set node ID for
1459  * @nid: node ID to set
1460  *
1461  * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1462  * Regions which cross the area boundaries are split as necessary.
1463  *
1464  * Return:
1465  * 0 on success, -errno on failure.
1466  */
1467 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1468 				      struct memblock_type *type, int nid)
1469 {
1470 #ifdef CONFIG_NUMA
1471 	int start_rgn, end_rgn;
1472 	int i, ret;
1473 
1474 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1475 	if (ret)
1476 		return ret;
1477 
1478 	for (i = start_rgn; i < end_rgn; i++)
1479 		memblock_set_region_node(&type->regions[i], nid);
1480 
1481 	memblock_merge_regions(type, start_rgn, end_rgn);
1482 #endif
1483 	return 0;
1484 }
1485 
1486 /**
1487  * memblock_alloc_range_nid - allocate boot memory block
1488  * @size: size of memory block to be allocated in bytes
1489  * @align: alignment of the region and block's size
1490  * @start: the lower bound of the memory region to allocate (phys address)
1491  * @end: the upper bound of the memory region to allocate (phys address)
1492  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1493  * @exact_nid: control the allocation fall back to other nodes
1494  *
1495  * The allocation is performed from memory region limited by
1496  * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1497  *
1498  * If the specified node can not hold the requested memory and @exact_nid
1499  * is false, the allocation falls back to any node in the system.
1500  *
1501  * For systems with memory mirroring, the allocation is attempted first
1502  * from the regions with mirroring enabled and then retried from any
1503  * memory region.
1504  *
1505  * In addition, function using kmemleak_alloc_phys for allocated boot
1506  * memory block, it is never reported as leaks.
1507  *
1508  * Return:
1509  * Physical address of allocated memory block on success, %0 on failure.
1510  */
1511 phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1512 					phys_addr_t align, phys_addr_t start,
1513 					phys_addr_t end, int nid,
1514 					bool exact_nid)
1515 {
1516 	enum memblock_flags flags = choose_memblock_flags();
1517 	phys_addr_t found;
1518 
1519 	/*
1520 	 * Detect any accidental use of these APIs after slab is ready, as at
1521 	 * this moment memblock may be deinitialized already and its
1522 	 * internal data may be destroyed (after execution of memblock_free_all)
1523 	 */
1524 	if (WARN_ON_ONCE(slab_is_available())) {
1525 		void *vaddr = kzalloc_node(size, GFP_NOWAIT, nid);
1526 
1527 		return vaddr ? virt_to_phys(vaddr) : 0;
1528 	}
1529 
1530 	if (!align) {
1531 		/* Can't use WARNs this early in boot on powerpc */
1532 		dump_stack();
1533 		align = SMP_CACHE_BYTES;
1534 	}
1535 
1536 again:
1537 	found = memblock_find_in_range_node(size, align, start, end, nid,
1538 					    flags);
1539 	if (found && !__memblock_reserve(found, size, nid, MEMBLOCK_RSRV_KERN))
1540 		goto done;
1541 
1542 	if (numa_valid_node(nid) && !exact_nid) {
1543 		found = memblock_find_in_range_node(size, align, start,
1544 						    end, NUMA_NO_NODE,
1545 						    flags);
1546 		if (found && !memblock_reserve_kern(found, size))
1547 			goto done;
1548 	}
1549 
1550 	if (flags & MEMBLOCK_MIRROR) {
1551 		flags &= ~MEMBLOCK_MIRROR;
1552 		pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
1553 			&size);
1554 		goto again;
1555 	}
1556 
1557 	return 0;
1558 
1559 done:
1560 	/*
1561 	 * Skip kmemleak for those places like kasan_init() and
1562 	 * early_pgtable_alloc() due to high volume.
1563 	 */
1564 	if (end != MEMBLOCK_ALLOC_NOLEAKTRACE)
1565 		/*
1566 		 * Memblock allocated blocks are never reported as
1567 		 * leaks. This is because many of these blocks are
1568 		 * only referred via the physical address which is
1569 		 * not looked up by kmemleak.
1570 		 */
1571 		kmemleak_alloc_phys(found, size, 0);
1572 
1573 	/*
1574 	 * Some Virtual Machine platforms, such as Intel TDX or AMD SEV-SNP,
1575 	 * require memory to be accepted before it can be used by the
1576 	 * guest.
1577 	 *
1578 	 * Accept the memory of the allocated buffer.
1579 	 */
1580 	accept_memory(found, size);
1581 
1582 	return found;
1583 }
1584 
1585 /**
1586  * memblock_phys_alloc_range - allocate a memory block inside specified range
1587  * @size: size of memory block to be allocated in bytes
1588  * @align: alignment of the region and block's size
1589  * @start: the lower bound of the memory region to allocate (physical address)
1590  * @end: the upper bound of the memory region to allocate (physical address)
1591  *
1592  * Allocate @size bytes in the between @start and @end.
1593  *
1594  * Return: physical address of the allocated memory block on success,
1595  * %0 on failure.
1596  */
1597 phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1598 					     phys_addr_t align,
1599 					     phys_addr_t start,
1600 					     phys_addr_t end)
1601 {
1602 	memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1603 		     __func__, (u64)size, (u64)align, &start, &end,
1604 		     (void *)_RET_IP_);
1605 	return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1606 					false);
1607 }
1608 
1609 /**
1610  * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
1611  * @size: size of memory block to be allocated in bytes
1612  * @align: alignment of the region and block's size
1613  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1614  *
1615  * Allocates memory block from the specified NUMA node. If the node
1616  * has no available memory, attempts to allocated from any node in the
1617  * system.
1618  *
1619  * Return: physical address of the allocated memory block on success,
1620  * %0 on failure.
1621  */
1622 phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1623 {
1624 	return memblock_alloc_range_nid(size, align, 0,
1625 					MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1626 }
1627 
1628 /**
1629  * memblock_alloc_internal - allocate boot memory block
1630  * @size: size of memory block to be allocated in bytes
1631  * @align: alignment of the region and block's size
1632  * @min_addr: the lower bound of the memory region to allocate (phys address)
1633  * @max_addr: the upper bound of the memory region to allocate (phys address)
1634  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1635  * @exact_nid: control the allocation fall back to other nodes
1636  *
1637  * Allocates memory block using memblock_alloc_range_nid() and
1638  * converts the returned physical address to virtual.
1639  *
1640  * The @min_addr limit is dropped if it can not be satisfied and the allocation
1641  * will fall back to memory below @min_addr. Other constraints, such
1642  * as node and mirrored memory will be handled again in
1643  * memblock_alloc_range_nid().
1644  *
1645  * Return:
1646  * Virtual address of allocated memory block on success, NULL on failure.
1647  */
1648 static void * __init memblock_alloc_internal(
1649 				phys_addr_t size, phys_addr_t align,
1650 				phys_addr_t min_addr, phys_addr_t max_addr,
1651 				int nid, bool exact_nid)
1652 {
1653 	phys_addr_t alloc;
1654 
1655 
1656 	if (max_addr > memblock.current_limit)
1657 		max_addr = memblock.current_limit;
1658 
1659 	alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1660 					exact_nid);
1661 
1662 	/* retry allocation without lower limit */
1663 	if (!alloc && min_addr)
1664 		alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1665 						exact_nid);
1666 
1667 	if (!alloc)
1668 		return NULL;
1669 
1670 	return phys_to_virt(alloc);
1671 }
1672 
1673 /**
1674  * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
1675  * without zeroing memory
1676  * @size: size of memory block to be allocated in bytes
1677  * @align: alignment of the region and block's size
1678  * @min_addr: the lower bound of the memory region from where the allocation
1679  *	  is preferred (phys address)
1680  * @max_addr: the upper bound of the memory region from where the allocation
1681  *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1682  *	      allocate only from memory limited by memblock.current_limit value
1683  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1684  *
1685  * Public function, provides additional debug information (including caller
1686  * info), if enabled. Does not zero allocated memory.
1687  *
1688  * Return:
1689  * Virtual address of allocated memory block on success, NULL on failure.
1690  */
1691 void * __init memblock_alloc_exact_nid_raw(
1692 			phys_addr_t size, phys_addr_t align,
1693 			phys_addr_t min_addr, phys_addr_t max_addr,
1694 			int nid)
1695 {
1696 	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1697 		     __func__, (u64)size, (u64)align, nid, &min_addr,
1698 		     &max_addr, (void *)_RET_IP_);
1699 
1700 	return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1701 				       true);
1702 }
1703 
1704 /**
1705  * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1706  * memory and without panicking
1707  * @size: size of memory block to be allocated in bytes
1708  * @align: alignment of the region and block's size
1709  * @min_addr: the lower bound of the memory region from where the allocation
1710  *	  is preferred (phys address)
1711  * @max_addr: the upper bound of the memory region from where the allocation
1712  *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1713  *	      allocate only from memory limited by memblock.current_limit value
1714  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1715  *
1716  * Public function, provides additional debug information (including caller
1717  * info), if enabled. Does not zero allocated memory, does not panic if request
1718  * cannot be satisfied.
1719  *
1720  * Return:
1721  * Virtual address of allocated memory block on success, NULL on failure.
1722  */
1723 void * __init memblock_alloc_try_nid_raw(
1724 			phys_addr_t size, phys_addr_t align,
1725 			phys_addr_t min_addr, phys_addr_t max_addr,
1726 			int nid)
1727 {
1728 	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1729 		     __func__, (u64)size, (u64)align, nid, &min_addr,
1730 		     &max_addr, (void *)_RET_IP_);
1731 
1732 	return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1733 				       false);
1734 }
1735 
1736 /**
1737  * memblock_alloc_try_nid - allocate boot memory block
1738  * @size: size of memory block to be allocated in bytes
1739  * @align: alignment of the region and block's size
1740  * @min_addr: the lower bound of the memory region from where the allocation
1741  *	  is preferred (phys address)
1742  * @max_addr: the upper bound of the memory region from where the allocation
1743  *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1744  *	      allocate only from memory limited by memblock.current_limit value
1745  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1746  *
1747  * Public function, provides additional debug information (including caller
1748  * info), if enabled. This function zeroes the allocated memory.
1749  *
1750  * Return:
1751  * Virtual address of allocated memory block on success, NULL on failure.
1752  */
1753 void * __init memblock_alloc_try_nid(
1754 			phys_addr_t size, phys_addr_t align,
1755 			phys_addr_t min_addr, phys_addr_t max_addr,
1756 			int nid)
1757 {
1758 	void *ptr;
1759 
1760 	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1761 		     __func__, (u64)size, (u64)align, nid, &min_addr,
1762 		     &max_addr, (void *)_RET_IP_);
1763 	ptr = memblock_alloc_internal(size, align,
1764 					   min_addr, max_addr, nid, false);
1765 	if (ptr)
1766 		memset(ptr, 0, size);
1767 
1768 	return ptr;
1769 }
1770 
1771 /**
1772  * __memblock_alloc_or_panic - Try to allocate memory and panic on failure
1773  * @size: size of memory block to be allocated in bytes
1774  * @align: alignment of the region and block's size
1775  * @func: caller func name
1776  *
1777  * This function attempts to allocate memory using memblock_alloc,
1778  * and in case of failure, it calls panic with the formatted message.
1779  * This function should not be used directly, please use the macro memblock_alloc_or_panic.
1780  */
1781 void *__init __memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align,
1782 				       const char *func)
1783 {
1784 	void *addr = memblock_alloc(size, align);
1785 
1786 	if (unlikely(!addr))
1787 		panic("%s: Failed to allocate %pap bytes\n", func, &size);
1788 	return addr;
1789 }
1790 
1791 /**
1792  * memblock_free_late - free pages directly to buddy allocator
1793  * @base: phys starting address of the  boot memory block
1794  * @size: size of the boot memory block in bytes
1795  *
1796  * This is only useful when the memblock allocator has already been torn
1797  * down, but we are still initializing the system.  Pages are released directly
1798  * to the buddy allocator.
1799  */
1800 void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
1801 {
1802 	phys_addr_t cursor, end;
1803 
1804 	end = base + size - 1;
1805 	memblock_dbg("%s: [%pa-%pa] %pS\n",
1806 		     __func__, &base, &end, (void *)_RET_IP_);
1807 	kmemleak_free_part_phys(base, size);
1808 	cursor = PFN_UP(base);
1809 	end = PFN_DOWN(base + size);
1810 
1811 	for (; cursor < end; cursor++) {
1812 		memblock_free_pages(cursor, 0);
1813 		totalram_pages_inc();
1814 	}
1815 }
1816 /*
1817  * Remaining API functions
1818  */
1819 
1820 phys_addr_t __init_memblock memblock_phys_mem_size(void)
1821 {
1822 	return memblock.memory.total_size;
1823 }
1824 
1825 phys_addr_t __init_memblock memblock_reserved_size(void)
1826 {
1827 	return memblock.reserved.total_size;
1828 }
1829 
1830 phys_addr_t __init_memblock memblock_reserved_kern_size(phys_addr_t limit, int nid)
1831 {
1832 	struct memblock_region *r;
1833 	phys_addr_t total = 0;
1834 
1835 	for_each_reserved_mem_region(r) {
1836 		phys_addr_t size = r->size;
1837 
1838 		if (r->base > limit)
1839 			break;
1840 
1841 		if (r->base + r->size > limit)
1842 			size = limit - r->base;
1843 
1844 		if (nid == memblock_get_region_node(r) || !numa_valid_node(nid))
1845 			if (r->flags & MEMBLOCK_RSRV_KERN)
1846 				total += size;
1847 	}
1848 
1849 	return total;
1850 }
1851 
1852 /**
1853  * memblock_estimated_nr_free_pages - return estimated number of free pages
1854  * from memblock point of view
1855  *
1856  * During bootup, subsystems might need a rough estimate of the number of free
1857  * pages in the whole system, before precise numbers are available from the
1858  * buddy. Especially with CONFIG_DEFERRED_STRUCT_PAGE_INIT, the numbers
1859  * obtained from the buddy might be very imprecise during bootup.
1860  *
1861  * Return:
1862  * An estimated number of free pages from memblock point of view.
1863  */
1864 unsigned long __init memblock_estimated_nr_free_pages(void)
1865 {
1866 	return PHYS_PFN(memblock_phys_mem_size() -
1867 			memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE, NUMA_NO_NODE));
1868 }
1869 
1870 /* lowest address */
1871 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1872 {
1873 	return memblock.memory.regions[0].base;
1874 }
1875 
1876 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1877 {
1878 	int idx = memblock.memory.cnt - 1;
1879 
1880 	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1881 }
1882 
1883 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1884 {
1885 	phys_addr_t max_addr = PHYS_ADDR_MAX;
1886 	struct memblock_region *r;
1887 
1888 	/*
1889 	 * translate the memory @limit size into the max address within one of
1890 	 * the memory memblock regions, if the @limit exceeds the total size
1891 	 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1892 	 */
1893 	for_each_mem_region(r) {
1894 		if (limit <= r->size) {
1895 			max_addr = r->base + limit;
1896 			break;
1897 		}
1898 		limit -= r->size;
1899 	}
1900 
1901 	return max_addr;
1902 }
1903 
1904 void __init memblock_enforce_memory_limit(phys_addr_t limit)
1905 {
1906 	phys_addr_t max_addr;
1907 
1908 	if (!limit)
1909 		return;
1910 
1911 	max_addr = __find_max_addr(limit);
1912 
1913 	/* @limit exceeds the total size of the memory, do nothing */
1914 	if (max_addr == PHYS_ADDR_MAX)
1915 		return;
1916 
1917 	/* truncate both memory and reserved regions */
1918 	memblock_remove_range(&memblock.memory, max_addr,
1919 			      PHYS_ADDR_MAX);
1920 	memblock_remove_range(&memblock.reserved, max_addr,
1921 			      PHYS_ADDR_MAX);
1922 }
1923 
1924 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1925 {
1926 	int start_rgn, end_rgn;
1927 	int i, ret;
1928 
1929 	if (!size)
1930 		return;
1931 
1932 	if (!memblock_memory->total_size) {
1933 		pr_warn("%s: No memory registered yet\n", __func__);
1934 		return;
1935 	}
1936 
1937 	ret = memblock_isolate_range(&memblock.memory, base, size,
1938 						&start_rgn, &end_rgn);
1939 	if (ret)
1940 		return;
1941 
1942 	/* remove all the MAP regions */
1943 	for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1944 		if (!memblock_is_nomap(&memblock.memory.regions[i]))
1945 			memblock_remove_region(&memblock.memory, i);
1946 
1947 	for (i = start_rgn - 1; i >= 0; i--)
1948 		if (!memblock_is_nomap(&memblock.memory.regions[i]))
1949 			memblock_remove_region(&memblock.memory, i);
1950 
1951 	/* truncate the reserved regions */
1952 	memblock_remove_range(&memblock.reserved, 0, base);
1953 	memblock_remove_range(&memblock.reserved,
1954 			base + size, PHYS_ADDR_MAX);
1955 }
1956 
1957 void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1958 {
1959 	phys_addr_t max_addr;
1960 
1961 	if (!limit)
1962 		return;
1963 
1964 	max_addr = __find_max_addr(limit);
1965 
1966 	/* @limit exceeds the total size of the memory, do nothing */
1967 	if (max_addr == PHYS_ADDR_MAX)
1968 		return;
1969 
1970 	memblock_cap_memory_range(0, max_addr);
1971 }
1972 
1973 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1974 {
1975 	unsigned int left = 0, right = type->cnt;
1976 
1977 	do {
1978 		unsigned int mid = (right + left) / 2;
1979 
1980 		if (addr < type->regions[mid].base)
1981 			right = mid;
1982 		else if (addr >= (type->regions[mid].base +
1983 				  type->regions[mid].size))
1984 			left = mid + 1;
1985 		else
1986 			return mid;
1987 	} while (left < right);
1988 	return -1;
1989 }
1990 
1991 bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1992 {
1993 	return memblock_search(&memblock.reserved, addr) != -1;
1994 }
1995 
1996 bool __init_memblock memblock_is_memory(phys_addr_t addr)
1997 {
1998 	return memblock_search(&memblock.memory, addr) != -1;
1999 }
2000 
2001 bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
2002 {
2003 	int i = memblock_search(&memblock.memory, addr);
2004 
2005 	if (i == -1)
2006 		return false;
2007 	return !memblock_is_nomap(&memblock.memory.regions[i]);
2008 }
2009 
2010 int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
2011 			 unsigned long *start_pfn, unsigned long *end_pfn)
2012 {
2013 	struct memblock_type *type = &memblock.memory;
2014 	int mid = memblock_search(type, PFN_PHYS(pfn));
2015 
2016 	if (mid == -1)
2017 		return NUMA_NO_NODE;
2018 
2019 	*start_pfn = PFN_DOWN(type->regions[mid].base);
2020 	*end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
2021 
2022 	return memblock_get_region_node(&type->regions[mid]);
2023 }
2024 
2025 /**
2026  * memblock_is_region_memory - check if a region is a subset of memory
2027  * @base: base of region to check
2028  * @size: size of region to check
2029  *
2030  * Check if the region [@base, @base + @size) is a subset of a memory block.
2031  *
2032  * Return:
2033  * 0 if false, non-zero if true
2034  */
2035 bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
2036 {
2037 	int idx = memblock_search(&memblock.memory, base);
2038 	phys_addr_t end = base + memblock_cap_size(base, &size);
2039 
2040 	if (idx == -1)
2041 		return false;
2042 	return (memblock.memory.regions[idx].base +
2043 		 memblock.memory.regions[idx].size) >= end;
2044 }
2045 
2046 /**
2047  * memblock_is_region_reserved - check if a region intersects reserved memory
2048  * @base: base of region to check
2049  * @size: size of region to check
2050  *
2051  * Check if the region [@base, @base + @size) intersects a reserved
2052  * memory block.
2053  *
2054  * Return:
2055  * True if they intersect, false if not.
2056  */
2057 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
2058 {
2059 	return memblock_overlaps_region(&memblock.reserved, base, size);
2060 }
2061 
2062 void __init_memblock memblock_trim_memory(phys_addr_t align)
2063 {
2064 	phys_addr_t start, end, orig_start, orig_end;
2065 	struct memblock_region *r;
2066 
2067 	for_each_mem_region(r) {
2068 		orig_start = r->base;
2069 		orig_end = r->base + r->size;
2070 		start = round_up(orig_start, align);
2071 		end = round_down(orig_end, align);
2072 
2073 		if (start == orig_start && end == orig_end)
2074 			continue;
2075 
2076 		if (start < end) {
2077 			r->base = start;
2078 			r->size = end - start;
2079 		} else {
2080 			memblock_remove_region(&memblock.memory,
2081 					       r - memblock.memory.regions);
2082 			r--;
2083 		}
2084 	}
2085 }
2086 
2087 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
2088 {
2089 	memblock.current_limit = limit;
2090 }
2091 
2092 phys_addr_t __init_memblock memblock_get_current_limit(void)
2093 {
2094 	return memblock.current_limit;
2095 }
2096 
2097 static void __init_memblock memblock_dump(struct memblock_type *type)
2098 {
2099 	phys_addr_t base, end, size;
2100 	enum memblock_flags flags;
2101 	int idx;
2102 	struct memblock_region *rgn;
2103 
2104 	pr_info(" %s.cnt  = 0x%lx\n", type->name, type->cnt);
2105 
2106 	for_each_memblock_type(idx, type, rgn) {
2107 		char nid_buf[32] = "";
2108 
2109 		base = rgn->base;
2110 		size = rgn->size;
2111 		end = base + size - 1;
2112 		flags = rgn->flags;
2113 #ifdef CONFIG_NUMA
2114 		if (numa_valid_node(memblock_get_region_node(rgn)))
2115 			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
2116 				 memblock_get_region_node(rgn));
2117 #endif
2118 		pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
2119 			type->name, idx, &base, &end, &size, nid_buf, flags);
2120 	}
2121 }
2122 
2123 static void __init_memblock __memblock_dump_all(void)
2124 {
2125 	pr_info("MEMBLOCK configuration:\n");
2126 	pr_info(" memory size = %pa reserved size = %pa\n",
2127 		&memblock.memory.total_size,
2128 		&memblock.reserved.total_size);
2129 
2130 	memblock_dump(&memblock.memory);
2131 	memblock_dump(&memblock.reserved);
2132 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2133 	memblock_dump(&physmem);
2134 #endif
2135 }
2136 
2137 void __init_memblock memblock_dump_all(void)
2138 {
2139 	if (memblock_debug)
2140 		__memblock_dump_all();
2141 }
2142 
2143 void __init memblock_allow_resize(void)
2144 {
2145 	memblock_can_resize = 1;
2146 }
2147 
2148 static int __init early_memblock(char *p)
2149 {
2150 	if (p && strstr(p, "debug"))
2151 		memblock_debug = 1;
2152 	return 0;
2153 }
2154 early_param("memblock", early_memblock);
2155 
2156 static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
2157 {
2158 	struct page *start_pg, *end_pg;
2159 	phys_addr_t pg, pgend;
2160 
2161 	/*
2162 	 * Convert start_pfn/end_pfn to a struct page pointer.
2163 	 */
2164 	start_pg = pfn_to_page(start_pfn - 1) + 1;
2165 	end_pg = pfn_to_page(end_pfn - 1) + 1;
2166 
2167 	/*
2168 	 * Convert to physical addresses, and round start upwards and end
2169 	 * downwards.
2170 	 */
2171 	pg = PAGE_ALIGN(__pa(start_pg));
2172 	pgend = PAGE_ALIGN_DOWN(__pa(end_pg));
2173 
2174 	/*
2175 	 * If there are free pages between these, free the section of the
2176 	 * memmap array.
2177 	 */
2178 	if (pg < pgend)
2179 		memblock_phys_free(pg, pgend - pg);
2180 }
2181 
2182 /*
2183  * The mem_map array can get very big.  Free the unused area of the memory map.
2184  */
2185 static void __init free_unused_memmap(void)
2186 {
2187 	unsigned long start, end, prev_end = 0;
2188 	int i;
2189 
2190 	if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
2191 	    IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
2192 		return;
2193 
2194 	/*
2195 	 * This relies on each bank being in address order.
2196 	 * The banks are sorted previously in bootmem_init().
2197 	 */
2198 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
2199 #ifdef CONFIG_SPARSEMEM
2200 		/*
2201 		 * Take care not to free memmap entries that don't exist
2202 		 * due to SPARSEMEM sections which aren't present.
2203 		 */
2204 		start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
2205 #endif
2206 		/*
2207 		 * Align down here since many operations in VM subsystem
2208 		 * presume that there are no holes in the memory map inside
2209 		 * a pageblock
2210 		 */
2211 		start = pageblock_start_pfn(start);
2212 
2213 		/*
2214 		 * If we had a previous bank, and there is a space
2215 		 * between the current bank and the previous, free it.
2216 		 */
2217 		if (prev_end && prev_end < start)
2218 			free_memmap(prev_end, start);
2219 
2220 		/*
2221 		 * Align up here since many operations in VM subsystem
2222 		 * presume that there are no holes in the memory map inside
2223 		 * a pageblock
2224 		 */
2225 		prev_end = pageblock_align(end);
2226 	}
2227 
2228 #ifdef CONFIG_SPARSEMEM
2229 	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
2230 		prev_end = pageblock_align(end);
2231 		free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
2232 	}
2233 #endif
2234 }
2235 
2236 static void __init __free_pages_memory(unsigned long start, unsigned long end)
2237 {
2238 	int order;
2239 
2240 	while (start < end) {
2241 		/*
2242 		 * Free the pages in the largest chunks alignment allows.
2243 		 *
2244 		 * __ffs() behaviour is undefined for 0. start == 0 is
2245 		 * MAX_PAGE_ORDER-aligned, set order to MAX_PAGE_ORDER for
2246 		 * the case.
2247 		 */
2248 		if (start)
2249 			order = min_t(int, MAX_PAGE_ORDER, __ffs(start));
2250 		else
2251 			order = MAX_PAGE_ORDER;
2252 
2253 		while (start + (1UL << order) > end)
2254 			order--;
2255 
2256 		memblock_free_pages(start, order);
2257 
2258 		start += (1UL << order);
2259 	}
2260 }
2261 
2262 static unsigned long __init __free_memory_core(phys_addr_t start,
2263 				 phys_addr_t end)
2264 {
2265 	unsigned long start_pfn = PFN_UP(start);
2266 	unsigned long end_pfn = PFN_DOWN(end);
2267 
2268 	if (!IS_ENABLED(CONFIG_HIGHMEM) && end_pfn > max_low_pfn)
2269 		end_pfn = max_low_pfn;
2270 
2271 	if (start_pfn >= end_pfn)
2272 		return 0;
2273 
2274 	__free_pages_memory(start_pfn, end_pfn);
2275 
2276 	return end_pfn - start_pfn;
2277 }
2278 
2279 /*
2280  * Initialised pages do not have PageReserved set. This function is called
2281  * for each reserved range and marks the pages PageReserved.
2282  * When deferred initialization of struct pages is enabled it also ensures
2283  * that struct pages are properly initialised.
2284  */
2285 static void __init memmap_init_reserved_range(phys_addr_t start,
2286 					      phys_addr_t end, int nid)
2287 {
2288 	unsigned long pfn;
2289 
2290 	for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) {
2291 		struct page *page = pfn_to_page(pfn);
2292 
2293 		init_deferred_page(pfn, nid);
2294 
2295 		/*
2296 		 * no need for atomic set_bit because the struct
2297 		 * page is not visible yet so nobody should
2298 		 * access it yet.
2299 		 */
2300 		__SetPageReserved(page);
2301 	}
2302 }
2303 
2304 static void __init memmap_init_reserved_pages(void)
2305 {
2306 	struct memblock_region *region;
2307 	phys_addr_t start, end;
2308 	int nid;
2309 	unsigned long max_reserved;
2310 
2311 	/*
2312 	 * set nid on all reserved pages and also treat struct
2313 	 * pages for the NOMAP regions as PageReserved
2314 	 */
2315 repeat:
2316 	max_reserved = memblock.reserved.max;
2317 	for_each_mem_region(region) {
2318 		nid = memblock_get_region_node(region);
2319 		start = region->base;
2320 		end = start + region->size;
2321 
2322 		if (memblock_is_nomap(region))
2323 			memmap_init_reserved_range(start, end, nid);
2324 
2325 		memblock_set_node(start, region->size, &memblock.reserved, nid);
2326 	}
2327 	/*
2328 	 * 'max' is changed means memblock.reserved has been doubled its
2329 	 * array, which may result a new reserved region before current
2330 	 * 'start'. Now we should repeat the procedure to set its node id.
2331 	 */
2332 	if (max_reserved != memblock.reserved.max)
2333 		goto repeat;
2334 
2335 	/*
2336 	 * initialize struct pages for reserved regions that don't have
2337 	 * the MEMBLOCK_RSRV_NOINIT flag set
2338 	 */
2339 	for_each_reserved_mem_region(region) {
2340 		if (!memblock_is_reserved_noinit(region)) {
2341 			nid = memblock_get_region_node(region);
2342 			start = region->base;
2343 			end = start + region->size;
2344 
2345 			if (!numa_valid_node(nid))
2346 				nid = early_pfn_to_nid(PFN_DOWN(start));
2347 
2348 			memmap_init_reserved_range(start, end, nid);
2349 		}
2350 	}
2351 }
2352 
2353 static unsigned long __init free_low_memory_core_early(void)
2354 {
2355 	unsigned long count = 0;
2356 	phys_addr_t start, end;
2357 	u64 i;
2358 
2359 	memblock_clear_hotplug(0, -1);
2360 
2361 	memmap_init_reserved_pages();
2362 
2363 	/*
2364 	 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
2365 	 *  because in some case like Node0 doesn't have RAM installed
2366 	 *  low ram will be on Node1
2367 	 */
2368 	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
2369 				NULL)
2370 		count += __free_memory_core(start, end);
2371 
2372 	return count;
2373 }
2374 
2375 static int reset_managed_pages_done __initdata;
2376 
2377 static void __init reset_node_managed_pages(pg_data_t *pgdat)
2378 {
2379 	struct zone *z;
2380 
2381 	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
2382 		atomic_long_set(&z->managed_pages, 0);
2383 }
2384 
2385 void __init reset_all_zones_managed_pages(void)
2386 {
2387 	struct pglist_data *pgdat;
2388 
2389 	if (reset_managed_pages_done)
2390 		return;
2391 
2392 	for_each_online_pgdat(pgdat)
2393 		reset_node_managed_pages(pgdat);
2394 
2395 	reset_managed_pages_done = 1;
2396 }
2397 
2398 /**
2399  * memblock_free_all - release free pages to the buddy allocator
2400  */
2401 void __init memblock_free_all(void)
2402 {
2403 	unsigned long pages;
2404 
2405 	free_unused_memmap();
2406 	reset_all_zones_managed_pages();
2407 
2408 	memblock_clear_kho_scratch_only();
2409 	pages = free_low_memory_core_early();
2410 	totalram_pages_add(pages);
2411 }
2412 
2413 /* Keep a table to reserve named memory */
2414 #define RESERVE_MEM_MAX_ENTRIES		8
2415 #define RESERVE_MEM_NAME_SIZE		16
2416 struct reserve_mem_table {
2417 	char			name[RESERVE_MEM_NAME_SIZE];
2418 	phys_addr_t		start;
2419 	phys_addr_t		size;
2420 };
2421 static struct reserve_mem_table reserved_mem_table[RESERVE_MEM_MAX_ENTRIES];
2422 static int reserved_mem_count;
2423 static DEFINE_MUTEX(reserve_mem_lock);
2424 
2425 /* Add wildcard region with a lookup name */
2426 static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
2427 				   const char *name)
2428 {
2429 	struct reserve_mem_table *map;
2430 
2431 	map = &reserved_mem_table[reserved_mem_count++];
2432 	map->start = start;
2433 	map->size = size;
2434 	strscpy(map->name, name);
2435 }
2436 
2437 static struct reserve_mem_table *reserve_mem_find_by_name_nolock(const char *name)
2438 {
2439 	struct reserve_mem_table *map;
2440 	int i;
2441 
2442 	for (i = 0; i < reserved_mem_count; i++) {
2443 		map = &reserved_mem_table[i];
2444 		if (!map->size)
2445 			continue;
2446 		if (strcmp(name, map->name) == 0)
2447 			return map;
2448 	}
2449 	return NULL;
2450 }
2451 
2452 /**
2453  * reserve_mem_find_by_name - Find reserved memory region with a given name
2454  * @name: The name that is attached to a reserved memory region
2455  * @start: If found, holds the start address
2456  * @size: If found, holds the size of the address.
2457  *
2458  * @start and @size are only updated if @name is found.
2459  *
2460  * Returns: 1 if found or 0 if not found.
2461  */
2462 int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size)
2463 {
2464 	struct reserve_mem_table *map;
2465 
2466 	guard(mutex)(&reserve_mem_lock);
2467 	map = reserve_mem_find_by_name_nolock(name);
2468 	if (!map)
2469 		return 0;
2470 
2471 	*start = map->start;
2472 	*size = map->size;
2473 	return 1;
2474 }
2475 EXPORT_SYMBOL_GPL(reserve_mem_find_by_name);
2476 
2477 /**
2478  * reserve_mem_release_by_name - Release reserved memory region with a given name
2479  * @name: The name that is attached to a reserved memory region
2480  *
2481  * Forcibly release the pages in the reserved memory region so that those memory
2482  * can be used as free memory. After released the reserved region size becomes 0.
2483  *
2484  * Returns: 1 if released or 0 if not found.
2485  */
2486 int reserve_mem_release_by_name(const char *name)
2487 {
2488 	char buf[RESERVE_MEM_NAME_SIZE + 12];
2489 	struct reserve_mem_table *map;
2490 	void *start, *end;
2491 
2492 	guard(mutex)(&reserve_mem_lock);
2493 	map = reserve_mem_find_by_name_nolock(name);
2494 	if (!map)
2495 		return 0;
2496 
2497 	start = phys_to_virt(map->start);
2498 	end = start + map->size;
2499 	snprintf(buf, sizeof(buf), "reserve_mem:%s", name);
2500 	free_reserved_area(start, end, 0, buf);
2501 	map->size = 0;
2502 
2503 	return 1;
2504 }
2505 
2506 #ifdef CONFIG_KEXEC_HANDOVER
2507 
2508 static int __init reserved_mem_preserve(void)
2509 {
2510 	unsigned int nr_preserved = 0;
2511 	int err;
2512 
2513 	for (unsigned int i = 0; i < reserved_mem_count; i++, nr_preserved++) {
2514 		struct reserve_mem_table *map = &reserved_mem_table[i];
2515 		struct page *page = phys_to_page(map->start);
2516 		unsigned int nr_pages = map->size >> PAGE_SHIFT;
2517 
2518 		err = kho_preserve_pages(page, nr_pages);
2519 		if (err)
2520 			goto err_unpreserve;
2521 	}
2522 
2523 	return 0;
2524 
2525 err_unpreserve:
2526 	for (unsigned int i = 0; i < nr_preserved; i++) {
2527 		struct reserve_mem_table *map = &reserved_mem_table[i];
2528 		struct page *page = phys_to_page(map->start);
2529 		unsigned int nr_pages = map->size >> PAGE_SHIFT;
2530 
2531 		kho_unpreserve_pages(page, nr_pages);
2532 	}
2533 
2534 	return err;
2535 }
2536 
2537 static int __init prepare_kho_fdt(void)
2538 {
2539 	struct page *fdt_page;
2540 	void *fdt;
2541 	int err;
2542 
2543 	fdt_page = alloc_page(GFP_KERNEL);
2544 	if (!fdt_page) {
2545 		err = -ENOMEM;
2546 		goto err_report;
2547 	}
2548 
2549 	fdt = page_to_virt(fdt_page);
2550 	err = kho_preserve_pages(fdt_page, 1);
2551 	if (err)
2552 		goto err_free_fdt;
2553 
2554 	err |= fdt_create(fdt, PAGE_SIZE);
2555 	err |= fdt_finish_reservemap(fdt);
2556 	err |= fdt_begin_node(fdt, "");
2557 	err |= fdt_property_string(fdt, "compatible", MEMBLOCK_KHO_NODE_COMPATIBLE);
2558 
2559 	for (unsigned int i = 0; !err && i < reserved_mem_count; i++) {
2560 		struct reserve_mem_table *map = &reserved_mem_table[i];
2561 
2562 		err |= fdt_begin_node(fdt, map->name);
2563 		err |= fdt_property_string(fdt, "compatible", RESERVE_MEM_KHO_NODE_COMPATIBLE);
2564 		err |= fdt_property(fdt, "start", &map->start, sizeof(map->start));
2565 		err |= fdt_property(fdt, "size", &map->size, sizeof(map->size));
2566 		err |= fdt_end_node(fdt);
2567 	}
2568 	err |= fdt_end_node(fdt);
2569 	err |= fdt_finish(fdt);
2570 
2571 	if (err)
2572 		goto err_unpreserve_fdt;
2573 
2574 	err = kho_add_subtree(MEMBLOCK_KHO_FDT, fdt);
2575 	if (err)
2576 		goto err_unpreserve_fdt;
2577 
2578 	err = reserved_mem_preserve();
2579 	if (err)
2580 		goto err_remove_subtree;
2581 
2582 	return 0;
2583 
2584 err_remove_subtree:
2585 	kho_remove_subtree(fdt);
2586 err_unpreserve_fdt:
2587 	kho_unpreserve_pages(fdt_page, 1);
2588 err_free_fdt:
2589 	put_page(fdt_page);
2590 err_report:
2591 	pr_err("failed to prepare memblock FDT for KHO: %d\n", err);
2592 
2593 	return err;
2594 }
2595 
2596 static int __init reserve_mem_init(void)
2597 {
2598 	int err;
2599 
2600 	if (!kho_is_enabled() || !reserved_mem_count)
2601 		return 0;
2602 
2603 	err = prepare_kho_fdt();
2604 	if (err)
2605 		return err;
2606 	return err;
2607 }
2608 late_initcall(reserve_mem_init);
2609 
2610 static void *__init reserve_mem_kho_retrieve_fdt(void)
2611 {
2612 	phys_addr_t fdt_phys;
2613 	static void *fdt;
2614 	int err;
2615 
2616 	if (fdt)
2617 		return fdt;
2618 
2619 	err = kho_retrieve_subtree(MEMBLOCK_KHO_FDT, &fdt_phys);
2620 	if (err) {
2621 		if (err != -ENOENT)
2622 			pr_warn("failed to retrieve FDT '%s' from KHO: %d\n",
2623 				MEMBLOCK_KHO_FDT, err);
2624 		return NULL;
2625 	}
2626 
2627 	fdt = phys_to_virt(fdt_phys);
2628 
2629 	err = fdt_node_check_compatible(fdt, 0, MEMBLOCK_KHO_NODE_COMPATIBLE);
2630 	if (err) {
2631 		pr_warn("FDT '%s' is incompatible with '%s': %d\n",
2632 			MEMBLOCK_KHO_FDT, MEMBLOCK_KHO_NODE_COMPATIBLE, err);
2633 		fdt = NULL;
2634 	}
2635 
2636 	return fdt;
2637 }
2638 
2639 static bool __init reserve_mem_kho_revive(const char *name, phys_addr_t size,
2640 					  phys_addr_t align)
2641 {
2642 	int err, len_start, len_size, offset;
2643 	const phys_addr_t *p_start, *p_size;
2644 	const void *fdt;
2645 
2646 	fdt = reserve_mem_kho_retrieve_fdt();
2647 	if (!fdt)
2648 		return false;
2649 
2650 	offset = fdt_subnode_offset(fdt, 0, name);
2651 	if (offset < 0) {
2652 		pr_warn("FDT '%s' has no child '%s': %d\n",
2653 			MEMBLOCK_KHO_FDT, name, offset);
2654 		return false;
2655 	}
2656 	err = fdt_node_check_compatible(fdt, offset, RESERVE_MEM_KHO_NODE_COMPATIBLE);
2657 	if (err) {
2658 		pr_warn("Node '%s' is incompatible with '%s': %d\n",
2659 			name, RESERVE_MEM_KHO_NODE_COMPATIBLE, err);
2660 		return false;
2661 	}
2662 
2663 	p_start = fdt_getprop(fdt, offset, "start", &len_start);
2664 	p_size = fdt_getprop(fdt, offset, "size", &len_size);
2665 	if (!p_start || len_start != sizeof(*p_start) || !p_size ||
2666 	    len_size != sizeof(*p_size)) {
2667 		return false;
2668 	}
2669 
2670 	if (*p_start & (align - 1)) {
2671 		pr_warn("KHO reserve-mem '%s' has wrong alignment (0x%lx, 0x%lx)\n",
2672 			name, (long)align, (long)*p_start);
2673 		return false;
2674 	}
2675 
2676 	if (*p_size != size) {
2677 		pr_warn("KHO reserve-mem '%s' has wrong size (0x%lx != 0x%lx)\n",
2678 			name, (long)*p_size, (long)size);
2679 		return false;
2680 	}
2681 
2682 	reserved_mem_add(*p_start, size, name);
2683 	pr_info("Revived memory reservation '%s' from KHO\n", name);
2684 
2685 	return true;
2686 }
2687 #else
2688 static bool __init reserve_mem_kho_revive(const char *name, phys_addr_t size,
2689 					  phys_addr_t align)
2690 {
2691 	return false;
2692 }
2693 #endif /* CONFIG_KEXEC_HANDOVER */
2694 
2695 /*
2696  * Parse reserve_mem=nn:align:name
2697  */
2698 static int __init reserve_mem(char *p)
2699 {
2700 	phys_addr_t start, size, align, tmp;
2701 	char *name;
2702 	char *oldp;
2703 	int len;
2704 
2705 	if (!p)
2706 		goto err_param;
2707 
2708 	/* Check if there's room for more reserved memory */
2709 	if (reserved_mem_count >= RESERVE_MEM_MAX_ENTRIES) {
2710 		pr_err("reserve_mem: no more room for reserved memory\n");
2711 		return -EBUSY;
2712 	}
2713 
2714 	oldp = p;
2715 	size = memparse(p, &p);
2716 	if (!size || p == oldp)
2717 		goto err_param;
2718 
2719 	if (*p != ':')
2720 		goto err_param;
2721 
2722 	align = memparse(p+1, &p);
2723 	if (*p != ':')
2724 		goto err_param;
2725 
2726 	/*
2727 	 * memblock_phys_alloc() doesn't like a zero size align,
2728 	 * but it is OK for this command to have it.
2729 	 */
2730 	if (align < SMP_CACHE_BYTES)
2731 		align = SMP_CACHE_BYTES;
2732 
2733 	name = p + 1;
2734 	len = strlen(name);
2735 
2736 	/* name needs to have length but not too big */
2737 	if (!len || len >= RESERVE_MEM_NAME_SIZE)
2738 		goto err_param;
2739 
2740 	/* Make sure that name has text */
2741 	for (p = name; *p; p++) {
2742 		if (!isspace(*p))
2743 			break;
2744 	}
2745 	if (!*p)
2746 		goto err_param;
2747 
2748 	/* Make sure the name is not already used */
2749 	if (reserve_mem_find_by_name(name, &start, &tmp)) {
2750 		pr_err("reserve_mem: name \"%s\" was already used\n", name);
2751 		return -EBUSY;
2752 	}
2753 
2754 	/* Pick previous allocations up from KHO if available */
2755 	if (reserve_mem_kho_revive(name, size, align))
2756 		return 1;
2757 
2758 	/* TODO: Allocation must be outside of scratch region */
2759 	start = memblock_phys_alloc(size, align);
2760 	if (!start) {
2761 		pr_err("reserve_mem: memblock allocation failed\n");
2762 		return -ENOMEM;
2763 	}
2764 
2765 	reserved_mem_add(start, size, name);
2766 
2767 	return 1;
2768 err_param:
2769 	pr_err("reserve_mem: empty or malformed parameter\n");
2770 	return -EINVAL;
2771 }
2772 __setup("reserve_mem=", reserve_mem);
2773 
2774 #ifdef CONFIG_DEBUG_FS
2775 #ifdef CONFIG_ARCH_KEEP_MEMBLOCK
2776 static const char * const flagname[] = {
2777 	[ilog2(MEMBLOCK_HOTPLUG)] = "HOTPLUG",
2778 	[ilog2(MEMBLOCK_MIRROR)] = "MIRROR",
2779 	[ilog2(MEMBLOCK_NOMAP)] = "NOMAP",
2780 	[ilog2(MEMBLOCK_DRIVER_MANAGED)] = "DRV_MNG",
2781 	[ilog2(MEMBLOCK_RSRV_NOINIT)] = "RSV_NIT",
2782 	[ilog2(MEMBLOCK_RSRV_KERN)] = "RSV_KERN",
2783 	[ilog2(MEMBLOCK_KHO_SCRATCH)] = "KHO_SCRATCH",
2784 };
2785 
2786 static int memblock_debug_show(struct seq_file *m, void *private)
2787 {
2788 	struct memblock_type *type = m->private;
2789 	struct memblock_region *reg;
2790 	int i, j, nid;
2791 	unsigned int count = ARRAY_SIZE(flagname);
2792 	phys_addr_t end;
2793 
2794 	for (i = 0; i < type->cnt; i++) {
2795 		reg = &type->regions[i];
2796 		end = reg->base + reg->size - 1;
2797 		nid = memblock_get_region_node(reg);
2798 
2799 		seq_printf(m, "%4d: ", i);
2800 		seq_printf(m, "%pa..%pa ", &reg->base, &end);
2801 		if (numa_valid_node(nid))
2802 			seq_printf(m, "%4d ", nid);
2803 		else
2804 			seq_printf(m, "%4c ", 'x');
2805 		if (reg->flags) {
2806 			for (j = 0; j < count; j++) {
2807 				if (reg->flags & (1U << j)) {
2808 					seq_printf(m, "%s\n", flagname[j]);
2809 					break;
2810 				}
2811 			}
2812 			if (j == count)
2813 				seq_printf(m, "%s\n", "UNKNOWN");
2814 		} else {
2815 			seq_printf(m, "%s\n", "NONE");
2816 		}
2817 	}
2818 	return 0;
2819 }
2820 DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2821 
2822 static inline void memblock_debugfs_expose_arrays(struct dentry *root)
2823 {
2824 	debugfs_create_file("memory", 0444, root,
2825 			    &memblock.memory, &memblock_debug_fops);
2826 	debugfs_create_file("reserved", 0444, root,
2827 			    &memblock.reserved, &memblock_debug_fops);
2828 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2829 	debugfs_create_file("physmem", 0444, root, &physmem,
2830 			    &memblock_debug_fops);
2831 #endif
2832 }
2833 
2834 #else
2835 
2836 static inline void memblock_debugfs_expose_arrays(struct dentry *root) { }
2837 
2838 #endif /* CONFIG_ARCH_KEEP_MEMBLOCK */
2839 
2840 static int memblock_reserve_mem_show(struct seq_file *m, void *private)
2841 {
2842 	struct reserve_mem_table *map;
2843 	char txtsz[16];
2844 
2845 	guard(mutex)(&reserve_mem_lock);
2846 	for (int i = 0; i < reserved_mem_count; i++) {
2847 		map = &reserved_mem_table[i];
2848 		if (!map->size)
2849 			continue;
2850 
2851 		memset(txtsz, 0, sizeof(txtsz));
2852 		string_get_size(map->size, 1, STRING_UNITS_2, txtsz, sizeof(txtsz));
2853 		seq_printf(m, "%s\t\t(%s)\n", map->name, txtsz);
2854 	}
2855 
2856 	return 0;
2857 }
2858 DEFINE_SHOW_ATTRIBUTE(memblock_reserve_mem);
2859 
2860 static int __init memblock_init_debugfs(void)
2861 {
2862 	struct dentry *root;
2863 
2864 	if (!IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !reserved_mem_count)
2865 		return 0;
2866 
2867 	root = debugfs_create_dir("memblock", NULL);
2868 
2869 	if (reserved_mem_count)
2870 		debugfs_create_file("reserve_mem_param", 0444, root, NULL,
2871 				    &memblock_reserve_mem_fops);
2872 
2873 	memblock_debugfs_expose_arrays(root);
2874 	return 0;
2875 }
2876 __initcall(memblock_init_debugfs);
2877 
2878 #endif /* CONFIG_DEBUG_FS */
2879