xref: /linux/mm/memblock.c (revision 60e13231561b3a4c5269bfa1ef6c0569ad6f28ec)
1 /*
2  * Procedures for maintaining information about logical memory blocks.
3  *
4  * Peter Bergner, IBM Corp.	June 2001.
5  * Copyright (C) 2001 Peter Bergner.
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
22 
23 struct memblock memblock __initdata_memblock;
24 
25 int memblock_debug __initdata_memblock;
26 int memblock_can_resize __initdata_memblock;
27 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
28 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
29 
30 /* inline so we don't get a warning when pr_debug is compiled out */
31 static inline const char *memblock_type_name(struct memblock_type *type)
32 {
33 	if (type == &memblock.memory)
34 		return "memory";
35 	else if (type == &memblock.reserved)
36 		return "reserved";
37 	else
38 		return "unknown";
39 }
40 
41 /*
42  * Address comparison utilities
43  */
44 
45 static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size)
46 {
47 	return addr & ~(size - 1);
48 }
49 
50 static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size)
51 {
52 	return (addr + (size - 1)) & ~(size - 1);
53 }
54 
55 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
56 				       phys_addr_t base2, phys_addr_t size2)
57 {
58 	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
59 }
60 
61 long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
62 {
63 	unsigned long i;
64 
65 	for (i = 0; i < type->cnt; i++) {
66 		phys_addr_t rgnbase = type->regions[i].base;
67 		phys_addr_t rgnsize = type->regions[i].size;
68 		if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
69 			break;
70 	}
71 
72 	return (i < type->cnt) ? i : -1;
73 }
74 
75 /*
76  * Find, allocate, deallocate or reserve unreserved regions. All allocations
77  * are top-down.
78  */
79 
80 static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end,
81 					  phys_addr_t size, phys_addr_t align)
82 {
83 	phys_addr_t base, res_base;
84 	long j;
85 
86 	/* In case, huge size is requested */
87 	if (end < size)
88 		return MEMBLOCK_ERROR;
89 
90 	base = memblock_align_down((end - size), align);
91 
92 	/* Prevent allocations returning 0 as it's also used to
93 	 * indicate an allocation failure
94 	 */
95 	if (start == 0)
96 		start = PAGE_SIZE;
97 
98 	while (start <= base) {
99 		j = memblock_overlaps_region(&memblock.reserved, base, size);
100 		if (j < 0)
101 			return base;
102 		res_base = memblock.reserved.regions[j].base;
103 		if (res_base < size)
104 			break;
105 		base = memblock_align_down(res_base - size, align);
106 	}
107 
108 	return MEMBLOCK_ERROR;
109 }
110 
111 static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
112 			phys_addr_t align, phys_addr_t start, phys_addr_t end)
113 {
114 	long i;
115 
116 	BUG_ON(0 == size);
117 
118 	/* Pump up max_addr */
119 	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
120 		end = memblock.current_limit;
121 
122 	/* We do a top-down search, this tends to limit memory
123 	 * fragmentation by keeping early boot allocs near the
124 	 * top of memory
125 	 */
126 	for (i = memblock.memory.cnt - 1; i >= 0; i--) {
127 		phys_addr_t memblockbase = memblock.memory.regions[i].base;
128 		phys_addr_t memblocksize = memblock.memory.regions[i].size;
129 		phys_addr_t bottom, top, found;
130 
131 		if (memblocksize < size)
132 			continue;
133 		if ((memblockbase + memblocksize) <= start)
134 			break;
135 		bottom = max(memblockbase, start);
136 		top = min(memblockbase + memblocksize, end);
137 		if (bottom >= top)
138 			continue;
139 		found = memblock_find_region(bottom, top, size, align);
140 		if (found != MEMBLOCK_ERROR)
141 			return found;
142 	}
143 	return MEMBLOCK_ERROR;
144 }
145 
146 /*
147  * Find a free area with specified alignment in a specific range.
148  */
149 u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align)
150 {
151 	return memblock_find_base(size, align, start, end);
152 }
153 
154 /*
155  * Free memblock.reserved.regions
156  */
157 int __init_memblock memblock_free_reserved_regions(void)
158 {
159 	if (memblock.reserved.regions == memblock_reserved_init_regions)
160 		return 0;
161 
162 	return memblock_free(__pa(memblock.reserved.regions),
163 		 sizeof(struct memblock_region) * memblock.reserved.max);
164 }
165 
166 /*
167  * Reserve memblock.reserved.regions
168  */
169 int __init_memblock memblock_reserve_reserved_regions(void)
170 {
171 	if (memblock.reserved.regions == memblock_reserved_init_regions)
172 		return 0;
173 
174 	return memblock_reserve(__pa(memblock.reserved.regions),
175 		 sizeof(struct memblock_region) * memblock.reserved.max);
176 }
177 
178 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
179 {
180 	unsigned long i;
181 
182 	for (i = r; i < type->cnt - 1; i++) {
183 		type->regions[i].base = type->regions[i + 1].base;
184 		type->regions[i].size = type->regions[i + 1].size;
185 	}
186 	type->cnt--;
187 
188 	/* Special case for empty arrays */
189 	if (type->cnt == 0) {
190 		type->cnt = 1;
191 		type->regions[0].base = 0;
192 		type->regions[0].size = 0;
193 	}
194 }
195 
196 /* Defined below but needed now */
197 static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
198 
199 static int __init_memblock memblock_double_array(struct memblock_type *type)
200 {
201 	struct memblock_region *new_array, *old_array;
202 	phys_addr_t old_size, new_size, addr;
203 	int use_slab = slab_is_available();
204 
205 	/* We don't allow resizing until we know about the reserved regions
206 	 * of memory that aren't suitable for allocation
207 	 */
208 	if (!memblock_can_resize)
209 		return -1;
210 
211 	/* Calculate new doubled size */
212 	old_size = type->max * sizeof(struct memblock_region);
213 	new_size = old_size << 1;
214 
215 	/* Try to find some space for it.
216 	 *
217 	 * WARNING: We assume that either slab_is_available() and we use it or
218 	 * we use MEMBLOCK for allocations. That means that this is unsafe to use
219 	 * when bootmem is currently active (unless bootmem itself is implemented
220 	 * on top of MEMBLOCK which isn't the case yet)
221 	 *
222 	 * This should however not be an issue for now, as we currently only
223 	 * call into MEMBLOCK while it's still active, or much later when slab is
224 	 * active for memory hotplug operations
225 	 */
226 	if (use_slab) {
227 		new_array = kmalloc(new_size, GFP_KERNEL);
228 		addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
229 	} else
230 		addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
231 	if (addr == MEMBLOCK_ERROR) {
232 		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
233 		       memblock_type_name(type), type->max, type->max * 2);
234 		return -1;
235 	}
236 	new_array = __va(addr);
237 
238 	memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
239 		 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
240 
241 	/* Found space, we now need to move the array over before
242 	 * we add the reserved region since it may be our reserved
243 	 * array itself that is full.
244 	 */
245 	memcpy(new_array, type->regions, old_size);
246 	memset(new_array + type->max, 0, old_size);
247 	old_array = type->regions;
248 	type->regions = new_array;
249 	type->max <<= 1;
250 
251 	/* If we use SLAB that's it, we are done */
252 	if (use_slab)
253 		return 0;
254 
255 	/* Add the new reserved region now. Should not fail ! */
256 	BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size));
257 
258 	/* If the array wasn't our static init one, then free it. We only do
259 	 * that before SLAB is available as later on, we don't know whether
260 	 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
261 	 * anyways
262 	 */
263 	if (old_array != memblock_memory_init_regions &&
264 	    old_array != memblock_reserved_init_regions)
265 		memblock_free(__pa(old_array), old_size);
266 
267 	return 0;
268 }
269 
270 extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
271 					  phys_addr_t addr2, phys_addr_t size2)
272 {
273 	return 1;
274 }
275 
276 static long __init_memblock memblock_add_region(struct memblock_type *type,
277 						phys_addr_t base, phys_addr_t size)
278 {
279 	phys_addr_t end = base + size;
280 	int i, slot = -1;
281 
282 	/* First try and coalesce this MEMBLOCK with others */
283 	for (i = 0; i < type->cnt; i++) {
284 		struct memblock_region *rgn = &type->regions[i];
285 		phys_addr_t rend = rgn->base + rgn->size;
286 
287 		/* Exit if there's no possible hits */
288 		if (rgn->base > end || rgn->size == 0)
289 			break;
290 
291 		/* Check if we are fully enclosed within an existing
292 		 * block
293 		 */
294 		if (rgn->base <= base && rend >= end)
295 			return 0;
296 
297 		/* Check if we overlap or are adjacent with the bottom
298 		 * of a block.
299 		 */
300 		if (base < rgn->base && end >= rgn->base) {
301 			/* If we can't coalesce, create a new block */
302 			if (!memblock_memory_can_coalesce(base, size,
303 							  rgn->base,
304 							  rgn->size)) {
305 				/* Overlap & can't coalesce are mutually
306 				 * exclusive, if you do that, be prepared
307 				 * for trouble
308 				 */
309 				WARN_ON(end != rgn->base);
310 				goto new_block;
311 			}
312 			/* We extend the bottom of the block down to our
313 			 * base
314 			 */
315 			rgn->base = base;
316 			rgn->size = rend - base;
317 
318 			/* Return if we have nothing else to allocate
319 			 * (fully coalesced)
320 			 */
321 			if (rend >= end)
322 				return 0;
323 
324 			/* We continue processing from the end of the
325 			 * coalesced block.
326 			 */
327 			base = rend;
328 			size = end - base;
329 		}
330 
331 		/* Now check if we overlap or are adjacent with the
332 		 * top of a block
333 		 */
334 		if (base <= rend && end >= rend) {
335 			/* If we can't coalesce, create a new block */
336 			if (!memblock_memory_can_coalesce(rgn->base,
337 							  rgn->size,
338 							  base, size)) {
339 				/* Overlap & can't coalesce are mutually
340 				 * exclusive, if you do that, be prepared
341 				 * for trouble
342 				 */
343 				WARN_ON(rend != base);
344 				goto new_block;
345 			}
346 			/* We adjust our base down to enclose the
347 			 * original block and destroy it. It will be
348 			 * part of our new allocation. Since we've
349 			 * freed an entry, we know we won't fail
350 			 * to allocate one later, so we won't risk
351 			 * losing the original block allocation.
352 			 */
353 			size += (base - rgn->base);
354 			base = rgn->base;
355 			memblock_remove_region(type, i--);
356 		}
357 	}
358 
359 	/* If the array is empty, special case, replace the fake
360 	 * filler region and return
361 	 */
362 	if ((type->cnt == 1) && (type->regions[0].size == 0)) {
363 		type->regions[0].base = base;
364 		type->regions[0].size = size;
365 		return 0;
366 	}
367 
368  new_block:
369 	/* If we are out of space, we fail. It's too late to resize the array
370 	 * but then this shouldn't have happened in the first place.
371 	 */
372 	if (WARN_ON(type->cnt >= type->max))
373 		return -1;
374 
375 	/* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
376 	for (i = type->cnt - 1; i >= 0; i--) {
377 		if (base < type->regions[i].base) {
378 			type->regions[i+1].base = type->regions[i].base;
379 			type->regions[i+1].size = type->regions[i].size;
380 		} else {
381 			type->regions[i+1].base = base;
382 			type->regions[i+1].size = size;
383 			slot = i + 1;
384 			break;
385 		}
386 	}
387 	if (base < type->regions[0].base) {
388 		type->regions[0].base = base;
389 		type->regions[0].size = size;
390 		slot = 0;
391 	}
392 	type->cnt++;
393 
394 	/* The array is full ? Try to resize it. If that fails, we undo
395 	 * our allocation and return an error
396 	 */
397 	if (type->cnt == type->max && memblock_double_array(type)) {
398 		BUG_ON(slot < 0);
399 		memblock_remove_region(type, slot);
400 		return -1;
401 	}
402 
403 	return 0;
404 }
405 
406 long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
407 {
408 	return memblock_add_region(&memblock.memory, base, size);
409 
410 }
411 
412 static long __init_memblock __memblock_remove(struct memblock_type *type,
413 					      phys_addr_t base, phys_addr_t size)
414 {
415 	phys_addr_t end = base + size;
416 	int i;
417 
418 	/* Walk through the array for collisions */
419 	for (i = 0; i < type->cnt; i++) {
420 		struct memblock_region *rgn = &type->regions[i];
421 		phys_addr_t rend = rgn->base + rgn->size;
422 
423 		/* Nothing more to do, exit */
424 		if (rgn->base > end || rgn->size == 0)
425 			break;
426 
427 		/* If we fully enclose the block, drop it */
428 		if (base <= rgn->base && end >= rend) {
429 			memblock_remove_region(type, i--);
430 			continue;
431 		}
432 
433 		/* If we are fully enclosed within a block
434 		 * then we need to split it and we are done
435 		 */
436 		if (base > rgn->base && end < rend) {
437 			rgn->size = base - rgn->base;
438 			if (!memblock_add_region(type, end, rend - end))
439 				return 0;
440 			/* Failure to split is bad, we at least
441 			 * restore the block before erroring
442 			 */
443 			rgn->size = rend - rgn->base;
444 			WARN_ON(1);
445 			return -1;
446 		}
447 
448 		/* Check if we need to trim the bottom of a block */
449 		if (rgn->base < end && rend > end) {
450 			rgn->size -= end - rgn->base;
451 			rgn->base = end;
452 			break;
453 		}
454 
455 		/* And check if we need to trim the top of a block */
456 		if (base < rend)
457 			rgn->size -= rend - base;
458 
459 	}
460 	return 0;
461 }
462 
463 long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
464 {
465 	return __memblock_remove(&memblock.memory, base, size);
466 }
467 
468 long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
469 {
470 	return __memblock_remove(&memblock.reserved, base, size);
471 }
472 
473 long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
474 {
475 	struct memblock_type *_rgn = &memblock.reserved;
476 
477 	BUG_ON(0 == size);
478 
479 	return memblock_add_region(_rgn, base, size);
480 }
481 
482 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
483 {
484 	phys_addr_t found;
485 
486 	/* We align the size to limit fragmentation. Without this, a lot of
487 	 * small allocs quickly eat up the whole reserve array on sparc
488 	 */
489 	size = memblock_align_up(size, align);
490 
491 	found = memblock_find_base(size, align, 0, max_addr);
492 	if (found != MEMBLOCK_ERROR &&
493 	    !memblock_add_region(&memblock.reserved, found, size))
494 		return found;
495 
496 	return 0;
497 }
498 
499 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
500 {
501 	phys_addr_t alloc;
502 
503 	alloc = __memblock_alloc_base(size, align, max_addr);
504 
505 	if (alloc == 0)
506 		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
507 		      (unsigned long long) size, (unsigned long long) max_addr);
508 
509 	return alloc;
510 }
511 
512 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
513 {
514 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
515 }
516 
517 
518 /*
519  * Additional node-local allocators. Search for node memory is bottom up
520  * and walks memblock regions within that node bottom-up as well, but allocation
521  * within an memblock region is top-down. XXX I plan to fix that at some stage
522  *
523  * WARNING: Only available after early_node_map[] has been populated,
524  * on some architectures, that is after all the calls to add_active_range()
525  * have been done to populate it.
526  */
527 
528 phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
529 {
530 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
531 	/*
532 	 * This code originates from sparc which really wants use to walk by addresses
533 	 * and returns the nid. This is not very convenient for early_pfn_map[] users
534 	 * as the map isn't sorted yet, and it really wants to be walked by nid.
535 	 *
536 	 * For now, I implement the inefficient method below which walks the early
537 	 * map multiple times. Eventually we may want to use an ARCH config option
538 	 * to implement a completely different method for both case.
539 	 */
540 	unsigned long start_pfn, end_pfn;
541 	int i;
542 
543 	for (i = 0; i < MAX_NUMNODES; i++) {
544 		get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
545 		if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
546 			continue;
547 		*nid = i;
548 		return min(end, PFN_PHYS(end_pfn));
549 	}
550 #endif
551 	*nid = 0;
552 
553 	return end;
554 }
555 
556 static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
557 					       phys_addr_t size,
558 					       phys_addr_t align, int nid)
559 {
560 	phys_addr_t start, end;
561 
562 	start = mp->base;
563 	end = start + mp->size;
564 
565 	start = memblock_align_up(start, align);
566 	while (start < end) {
567 		phys_addr_t this_end;
568 		int this_nid;
569 
570 		this_end = memblock_nid_range(start, end, &this_nid);
571 		if (this_nid == nid) {
572 			phys_addr_t ret = memblock_find_region(start, this_end, size, align);
573 			if (ret != MEMBLOCK_ERROR &&
574 			    !memblock_add_region(&memblock.reserved, ret, size))
575 				return ret;
576 		}
577 		start = this_end;
578 	}
579 
580 	return MEMBLOCK_ERROR;
581 }
582 
583 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
584 {
585 	struct memblock_type *mem = &memblock.memory;
586 	int i;
587 
588 	BUG_ON(0 == size);
589 
590 	/* We align the size to limit fragmentation. Without this, a lot of
591 	 * small allocs quickly eat up the whole reserve array on sparc
592 	 */
593 	size = memblock_align_up(size, align);
594 
595 	/* We do a bottom-up search for a region with the right
596 	 * nid since that's easier considering how memblock_nid_range()
597 	 * works
598 	 */
599 	for (i = 0; i < mem->cnt; i++) {
600 		phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
601 					       size, align, nid);
602 		if (ret != MEMBLOCK_ERROR)
603 			return ret;
604 	}
605 
606 	return 0;
607 }
608 
609 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
610 {
611 	phys_addr_t res = memblock_alloc_nid(size, align, nid);
612 
613 	if (res)
614 		return res;
615 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
616 }
617 
618 
619 /*
620  * Remaining API functions
621  */
622 
623 /* You must call memblock_analyze() before this. */
624 phys_addr_t __init memblock_phys_mem_size(void)
625 {
626 	return memblock.memory_size;
627 }
628 
629 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
630 {
631 	int idx = memblock.memory.cnt - 1;
632 
633 	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
634 }
635 
636 /* You must call memblock_analyze() after this. */
637 void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
638 {
639 	unsigned long i;
640 	phys_addr_t limit;
641 	struct memblock_region *p;
642 
643 	if (!memory_limit)
644 		return;
645 
646 	/* Truncate the memblock regions to satisfy the memory limit. */
647 	limit = memory_limit;
648 	for (i = 0; i < memblock.memory.cnt; i++) {
649 		if (limit > memblock.memory.regions[i].size) {
650 			limit -= memblock.memory.regions[i].size;
651 			continue;
652 		}
653 
654 		memblock.memory.regions[i].size = limit;
655 		memblock.memory.cnt = i + 1;
656 		break;
657 	}
658 
659 	memory_limit = memblock_end_of_DRAM();
660 
661 	/* And truncate any reserves above the limit also. */
662 	for (i = 0; i < memblock.reserved.cnt; i++) {
663 		p = &memblock.reserved.regions[i];
664 
665 		if (p->base > memory_limit)
666 			p->size = 0;
667 		else if ((p->base + p->size) > memory_limit)
668 			p->size = memory_limit - p->base;
669 
670 		if (p->size == 0) {
671 			memblock_remove_region(&memblock.reserved, i);
672 			i--;
673 		}
674 	}
675 }
676 
677 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
678 {
679 	unsigned int left = 0, right = type->cnt;
680 
681 	do {
682 		unsigned int mid = (right + left) / 2;
683 
684 		if (addr < type->regions[mid].base)
685 			right = mid;
686 		else if (addr >= (type->regions[mid].base +
687 				  type->regions[mid].size))
688 			left = mid + 1;
689 		else
690 			return mid;
691 	} while (left < right);
692 	return -1;
693 }
694 
695 int __init memblock_is_reserved(phys_addr_t addr)
696 {
697 	return memblock_search(&memblock.reserved, addr) != -1;
698 }
699 
700 int __init_memblock memblock_is_memory(phys_addr_t addr)
701 {
702 	return memblock_search(&memblock.memory, addr) != -1;
703 }
704 
705 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
706 {
707 	int idx = memblock_search(&memblock.memory, base);
708 
709 	if (idx == -1)
710 		return 0;
711 	return memblock.memory.regions[idx].base <= base &&
712 		(memblock.memory.regions[idx].base +
713 		 memblock.memory.regions[idx].size) >= (base + size);
714 }
715 
716 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
717 {
718 	return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
719 }
720 
721 
722 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
723 {
724 	memblock.current_limit = limit;
725 }
726 
727 static void __init_memblock memblock_dump(struct memblock_type *region, char *name)
728 {
729 	unsigned long long base, size;
730 	int i;
731 
732 	pr_info(" %s.cnt  = 0x%lx\n", name, region->cnt);
733 
734 	for (i = 0; i < region->cnt; i++) {
735 		base = region->regions[i].base;
736 		size = region->regions[i].size;
737 
738 		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
739 		    name, i, base, base + size - 1, size);
740 	}
741 }
742 
743 void __init_memblock memblock_dump_all(void)
744 {
745 	if (!memblock_debug)
746 		return;
747 
748 	pr_info("MEMBLOCK configuration:\n");
749 	pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
750 
751 	memblock_dump(&memblock.memory, "memory");
752 	memblock_dump(&memblock.reserved, "reserved");
753 }
754 
755 void __init memblock_analyze(void)
756 {
757 	int i;
758 
759 	/* Check marker in the unused last array entry */
760 	WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
761 		!= MEMBLOCK_INACTIVE);
762 	WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
763 		!= MEMBLOCK_INACTIVE);
764 
765 	memblock.memory_size = 0;
766 
767 	for (i = 0; i < memblock.memory.cnt; i++)
768 		memblock.memory_size += memblock.memory.regions[i].size;
769 
770 	/* We allow resizing from there */
771 	memblock_can_resize = 1;
772 }
773 
774 void __init memblock_init(void)
775 {
776 	static int init_done __initdata = 0;
777 
778 	if (init_done)
779 		return;
780 	init_done = 1;
781 
782 	/* Hookup the initial arrays */
783 	memblock.memory.regions	= memblock_memory_init_regions;
784 	memblock.memory.max		= INIT_MEMBLOCK_REGIONS;
785 	memblock.reserved.regions	= memblock_reserved_init_regions;
786 	memblock.reserved.max	= INIT_MEMBLOCK_REGIONS;
787 
788 	/* Write a marker in the unused last array entry */
789 	memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
790 	memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
791 
792 	/* Create a dummy zero size MEMBLOCK which will get coalesced away later.
793 	 * This simplifies the memblock_add() code below...
794 	 */
795 	memblock.memory.regions[0].base = 0;
796 	memblock.memory.regions[0].size = 0;
797 	memblock.memory.cnt = 1;
798 
799 	/* Ditto. */
800 	memblock.reserved.regions[0].base = 0;
801 	memblock.reserved.regions[0].size = 0;
802 	memblock.reserved.cnt = 1;
803 
804 	memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
805 }
806 
807 static int __init early_memblock(char *p)
808 {
809 	if (p && strstr(p, "debug"))
810 		memblock_debug = 1;
811 	return 0;
812 }
813 early_param("memblock", early_memblock);
814 
815 #if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK)
816 
817 static int memblock_debug_show(struct seq_file *m, void *private)
818 {
819 	struct memblock_type *type = m->private;
820 	struct memblock_region *reg;
821 	int i;
822 
823 	for (i = 0; i < type->cnt; i++) {
824 		reg = &type->regions[i];
825 		seq_printf(m, "%4d: ", i);
826 		if (sizeof(phys_addr_t) == 4)
827 			seq_printf(m, "0x%08lx..0x%08lx\n",
828 				   (unsigned long)reg->base,
829 				   (unsigned long)(reg->base + reg->size - 1));
830 		else
831 			seq_printf(m, "0x%016llx..0x%016llx\n",
832 				   (unsigned long long)reg->base,
833 				   (unsigned long long)(reg->base + reg->size - 1));
834 
835 	}
836 	return 0;
837 }
838 
839 static int memblock_debug_open(struct inode *inode, struct file *file)
840 {
841 	return single_open(file, memblock_debug_show, inode->i_private);
842 }
843 
844 static const struct file_operations memblock_debug_fops = {
845 	.open = memblock_debug_open,
846 	.read = seq_read,
847 	.llseek = seq_lseek,
848 	.release = single_release,
849 };
850 
851 static int __init memblock_init_debugfs(void)
852 {
853 	struct dentry *root = debugfs_create_dir("memblock", NULL);
854 	if (!root)
855 		return -ENXIO;
856 	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
857 	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
858 
859 	return 0;
860 }
861 __initcall(memblock_init_debugfs);
862 
863 #endif /* CONFIG_DEBUG_FS */
864