xref: /linux/include/linux/memblock.h (revision 5b9f3b013b33e73c6f8f2bbdc6b3fd87745b40af)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _LINUX_MEMBLOCK_H
3 #define _LINUX_MEMBLOCK_H
4 
5 /*
6  * Logical memory blocks.
7  *
8  * Copyright (C) 2001 Peter Bergner, IBM Corp.
9  */
10 
11 #include <linux/init.h>
12 #include <linux/mm.h>
13 #include <asm/dma.h>
14 
15 extern unsigned long max_low_pfn;
16 extern unsigned long min_low_pfn;
17 
18 /*
19  * highest page
20  */
21 extern unsigned long max_pfn;
22 /*
23  * highest possible page
24  */
25 extern unsigned long long max_possible_pfn;
26 
27 /**
28  * enum memblock_flags - definition of memory region attributes
29  * @MEMBLOCK_NONE: no special request
30  * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory
31  * map during early boot as hot(un)pluggable system RAM (e.g., memory range
32  * that might get hotunplugged later). With "movable_node" set on the kernel
33  * commandline, try keeping this memory region hotunpluggable. Does not apply
34  * to memblocks added ("hotplugged") after early boot.
35  * @MEMBLOCK_MIRROR: mirrored region
36  * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
37  * reserved in the memory map; refer to memblock_mark_nomap() description
38  * for further details
39  * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added
40  * via a driver, and never indicated in the firmware-provided memory map as
41  * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
42  * kernel resource tree.
43  * @MEMBLOCK_RSRV_NOINIT: reserved memory region for which struct pages are not
44  * fully initialized. Users of this flag are responsible to properly initialize
45  * struct pages of this region
46  * @MEMBLOCK_RSRV_KERN: memory region that is reserved for kernel use,
47  * either explictitly with memblock_reserve_kern() or via memblock
48  * allocation APIs. All memblock allocations set this flag.
49  * @MEMBLOCK_KHO_SCRATCH: memory region that kexec can pass to the next
50  * kernel in handover mode. During early boot, we do not know about all
51  * memory reservations yet, so we get scratch memory from the previous
52  * kernel that we know is good to use. It is the only memory that
53  * allocations may happen from in this phase.
54  */
55 enum memblock_flags {
56 	MEMBLOCK_NONE		= 0x0,	/* No special request */
57 	MEMBLOCK_HOTPLUG	= 0x1,	/* hotpluggable region */
58 	MEMBLOCK_MIRROR		= 0x2,	/* mirrored region */
59 	MEMBLOCK_NOMAP		= 0x4,	/* don't add to kernel direct mapping */
60 	MEMBLOCK_DRIVER_MANAGED = 0x8,	/* always detected via a driver */
61 	MEMBLOCK_RSRV_NOINIT	= 0x10,	/* don't initialize struct pages */
62 	MEMBLOCK_RSRV_KERN	= 0x20,	/* memory reserved for kernel use */
63 	MEMBLOCK_KHO_SCRATCH	= 0x40,	/* scratch memory for kexec handover */
64 };
65 
66 /**
67  * struct memblock_region - represents a memory region
68  * @base: base address of the region
69  * @size: size of the region
70  * @flags: memory region attributes
71  * @nid: NUMA node id
72  */
73 struct memblock_region {
74 	phys_addr_t base;
75 	phys_addr_t size;
76 	enum memblock_flags flags;
77 #ifdef CONFIG_NUMA
78 	int nid;
79 #endif
80 };
81 
82 /**
83  * struct memblock_type - collection of memory regions of certain type
84  * @cnt: number of regions
85  * @max: size of the allocated array
86  * @total_size: size of all regions
87  * @regions: array of regions
88  * @name: the memory type symbolic name
89  */
90 struct memblock_type {
91 	unsigned long cnt;
92 	unsigned long max;
93 	phys_addr_t total_size;
94 	struct memblock_region *regions;
95 	char *name;
96 };
97 
98 /**
99  * struct memblock - memblock allocator metadata
100  * @bottom_up: is bottom up direction?
101  * @current_limit: physical address of the current allocation limit
102  * @memory: usable memory regions
103  * @reserved: reserved memory regions
104  */
105 struct memblock {
106 	bool bottom_up;  /* is bottom up direction? */
107 	phys_addr_t current_limit;
108 	struct memblock_type memory;
109 	struct memblock_type reserved;
110 };
111 
112 extern struct memblock memblock;
113 
114 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
115 #define __init_memblock __meminit
116 #define __initdata_memblock __meminitdata
117 void memblock_discard(void);
118 #else
119 #define __init_memblock
120 #define __initdata_memblock
memblock_discard(void)121 static inline void memblock_discard(void) {}
122 #endif
123 
124 void memblock_allow_resize(void);
125 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
126 		      enum memblock_flags flags);
127 int memblock_add(phys_addr_t base, phys_addr_t size);
128 int memblock_remove(phys_addr_t base, phys_addr_t size);
129 int memblock_phys_free(phys_addr_t base, phys_addr_t size);
130 int __memblock_reserve(phys_addr_t base, phys_addr_t size, int nid,
131 		       enum memblock_flags flags);
132 
memblock_reserve(phys_addr_t base,phys_addr_t size)133 static __always_inline int memblock_reserve(phys_addr_t base, phys_addr_t size)
134 {
135 	return __memblock_reserve(base, size, NUMA_NO_NODE, 0);
136 }
137 
memblock_reserve_kern(phys_addr_t base,phys_addr_t size)138 static __always_inline int memblock_reserve_kern(phys_addr_t base, phys_addr_t size)
139 {
140 	return __memblock_reserve(base, size, NUMA_NO_NODE, MEMBLOCK_RSRV_KERN);
141 }
142 
143 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
144 int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
145 #endif
146 void memblock_trim_memory(phys_addr_t align);
147 unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
148 				     phys_addr_t base2, phys_addr_t size2);
149 bool memblock_overlaps_region(struct memblock_type *type,
150 			      phys_addr_t base, phys_addr_t size);
151 bool memblock_validate_numa_coverage(unsigned long threshold_bytes);
152 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
153 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
154 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
155 int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
156 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
157 int memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size);
158 int memblock_mark_kho_scratch(phys_addr_t base, phys_addr_t size);
159 int memblock_clear_kho_scratch(phys_addr_t base, phys_addr_t size);
160 
161 void memblock_free(void *ptr, size_t size);
162 void reset_all_zones_managed_pages(void);
163 
164 /* Low level functions */
165 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
166 		      struct memblock_type *type_a,
167 		      struct memblock_type *type_b, phys_addr_t *out_start,
168 		      phys_addr_t *out_end, int *out_nid);
169 
170 void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
171 			  struct memblock_type *type_a,
172 			  struct memblock_type *type_b, phys_addr_t *out_start,
173 			  phys_addr_t *out_end, int *out_nid);
174 
175 void memblock_free_late(phys_addr_t base, phys_addr_t size);
176 
177 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
__next_physmem_range(u64 * idx,struct memblock_type * type,phys_addr_t * out_start,phys_addr_t * out_end)178 static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
179 					phys_addr_t *out_start,
180 					phys_addr_t *out_end)
181 {
182 	extern struct memblock_type physmem;
183 
184 	__next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
185 			 out_start, out_end, NULL);
186 }
187 
188 /**
189  * for_each_physmem_range - iterate through physmem areas not included in type.
190  * @i: u64 used as loop variable
191  * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
192  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
193  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
194  */
195 #define for_each_physmem_range(i, type, p_start, p_end)			\
196 	for (i = 0, __next_physmem_range(&i, type, p_start, p_end);	\
197 	     i != (u64)ULLONG_MAX;					\
198 	     __next_physmem_range(&i, type, p_start, p_end))
199 #endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
200 
201 /**
202  * __for_each_mem_range - iterate through memblock areas from type_a and not
203  * included in type_b. Or just type_a if type_b is NULL.
204  * @i: u64 used as loop variable
205  * @type_a: ptr to memblock_type to iterate
206  * @type_b: ptr to memblock_type which excludes from the iteration
207  * @nid: node selector, %NUMA_NO_NODE for all nodes
208  * @flags: pick from blocks based on memory attributes
209  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
210  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
211  * @p_nid: ptr to int for nid of the range, can be %NULL
212  */
213 #define __for_each_mem_range(i, type_a, type_b, nid, flags,		\
214 			   p_start, p_end, p_nid)			\
215 	for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,	\
216 				     p_start, p_end, p_nid);		\
217 	     i != (u64)ULLONG_MAX;					\
218 	     __next_mem_range(&i, nid, flags, type_a, type_b,		\
219 			      p_start, p_end, p_nid))
220 
221 /**
222  * __for_each_mem_range_rev - reverse iterate through memblock areas from
223  * type_a and not included in type_b. Or just type_a if type_b is NULL.
224  * @i: u64 used as loop variable
225  * @type_a: ptr to memblock_type to iterate
226  * @type_b: ptr to memblock_type which excludes from the iteration
227  * @nid: node selector, %NUMA_NO_NODE for all nodes
228  * @flags: pick from blocks based on memory attributes
229  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
230  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
231  * @p_nid: ptr to int for nid of the range, can be %NULL
232  */
233 #define __for_each_mem_range_rev(i, type_a, type_b, nid, flags,		\
234 				 p_start, p_end, p_nid)			\
235 	for (i = (u64)ULLONG_MAX,					\
236 		     __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
237 					  p_start, p_end, p_nid);	\
238 	     i != (u64)ULLONG_MAX;					\
239 	     __next_mem_range_rev(&i, nid, flags, type_a, type_b,	\
240 				  p_start, p_end, p_nid))
241 
242 /**
243  * for_each_mem_range - iterate through memory areas.
244  * @i: u64 used as loop variable
245  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
246  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
247  */
248 #define for_each_mem_range(i, p_start, p_end) \
249 	__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,	\
250 			     MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \
251 			     p_start, p_end, NULL)
252 
253 /**
254  * for_each_mem_range_rev - reverse iterate through memblock areas from
255  * type_a and not included in type_b. Or just type_a if type_b is NULL.
256  * @i: u64 used as loop variable
257  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
258  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
259  */
260 #define for_each_mem_range_rev(i, p_start, p_end)			\
261 	__for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
262 				 MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\
263 				 p_start, p_end, NULL)
264 
265 /**
266  * for_each_reserved_mem_range - iterate over all reserved memblock areas
267  * @i: u64 used as loop variable
268  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
269  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
270  *
271  * Walks over reserved areas of memblock. Available as soon as memblock
272  * is initialized.
273  */
274 #define for_each_reserved_mem_range(i, p_start, p_end)			\
275 	__for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE,	\
276 			     MEMBLOCK_NONE, p_start, p_end, NULL)
277 
memblock_is_hotpluggable(struct memblock_region * m)278 static inline bool memblock_is_hotpluggable(struct memblock_region *m)
279 {
280 	return m->flags & MEMBLOCK_HOTPLUG;
281 }
282 
memblock_is_mirror(struct memblock_region * m)283 static inline bool memblock_is_mirror(struct memblock_region *m)
284 {
285 	return m->flags & MEMBLOCK_MIRROR;
286 }
287 
memblock_is_nomap(struct memblock_region * m)288 static inline bool memblock_is_nomap(struct memblock_region *m)
289 {
290 	return m->flags & MEMBLOCK_NOMAP;
291 }
292 
memblock_is_reserved_noinit(struct memblock_region * m)293 static inline bool memblock_is_reserved_noinit(struct memblock_region *m)
294 {
295 	return m->flags & MEMBLOCK_RSRV_NOINIT;
296 }
297 
memblock_is_driver_managed(struct memblock_region * m)298 static inline bool memblock_is_driver_managed(struct memblock_region *m)
299 {
300 	return m->flags & MEMBLOCK_DRIVER_MANAGED;
301 }
302 
memblock_is_kho_scratch(struct memblock_region * m)303 static inline bool memblock_is_kho_scratch(struct memblock_region *m)
304 {
305 	return m->flags & MEMBLOCK_KHO_SCRATCH;
306 }
307 
308 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
309 			    unsigned long  *end_pfn);
310 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
311 			  unsigned long *out_end_pfn, int *out_nid);
312 
313 /**
314  * for_each_mem_pfn_range - early memory pfn range iterator
315  * @i: an integer used as loop variable
316  * @nid: node selector, %MAX_NUMNODES for all nodes
317  * @p_start: ptr to ulong for start pfn of the range, can be %NULL
318  * @p_end: ptr to ulong for end pfn of the range, can be %NULL
319  * @p_nid: ptr to int for nid of the range, can be %NULL
320  *
321  * Walks over configured memory ranges.
322  */
323 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)		\
324 	for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
325 	     i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
326 
327 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
328 void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
329 				  unsigned long *out_spfn,
330 				  unsigned long *out_epfn);
331 
332 /**
333  * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
334  * free memblock areas from a given point
335  * @i: u64 used as loop variable
336  * @zone: zone in which all of the memory blocks reside
337  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
338  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
339  *
340  * Walks over free (memory && !reserved) areas of memblock in a specific
341  * zone, continuing from current position. Available as soon as memblock is
342  * initialized.
343  */
344 #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
345 	for (; i != U64_MAX;					  \
346 	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
347 
348 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
349 
350 /**
351  * for_each_free_mem_range - iterate through free memblock areas
352  * @i: u64 used as loop variable
353  * @nid: node selector, %NUMA_NO_NODE for all nodes
354  * @flags: pick from blocks based on memory attributes
355  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
356  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
357  * @p_nid: ptr to int for nid of the range, can be %NULL
358  *
359  * Walks over free (memory && !reserved) areas of memblock.  Available as
360  * soon as memblock is initialized.
361  */
362 #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)	\
363 	__for_each_mem_range(i, &memblock.memory, &memblock.reserved,	\
364 			     nid, flags, p_start, p_end, p_nid)
365 
366 /**
367  * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
368  * @i: u64 used as loop variable
369  * @nid: node selector, %NUMA_NO_NODE for all nodes
370  * @flags: pick from blocks based on memory attributes
371  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
372  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
373  * @p_nid: ptr to int for nid of the range, can be %NULL
374  *
375  * Walks over free (memory && !reserved) areas of memblock in reverse
376  * order.  Available as soon as memblock is initialized.
377  */
378 #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end,	\
379 					p_nid)				\
380 	__for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
381 				 nid, flags, p_start, p_end, p_nid)
382 
383 int memblock_set_node(phys_addr_t base, phys_addr_t size,
384 		      struct memblock_type *type, int nid);
385 
386 #ifdef CONFIG_NUMA
memblock_set_region_node(struct memblock_region * r,int nid)387 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
388 {
389 	r->nid = nid;
390 }
391 
memblock_get_region_node(const struct memblock_region * r)392 static inline int memblock_get_region_node(const struct memblock_region *r)
393 {
394 	return r->nid;
395 }
396 #else
memblock_set_region_node(struct memblock_region * r,int nid)397 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
398 {
399 }
400 
memblock_get_region_node(const struct memblock_region * r)401 static inline int memblock_get_region_node(const struct memblock_region *r)
402 {
403 	return 0;
404 }
405 #endif /* CONFIG_NUMA */
406 
407 /* Flags for memblock allocation APIs */
408 #define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)
409 #define MEMBLOCK_ALLOC_ACCESSIBLE	0
410 /*
411  *  MEMBLOCK_ALLOC_NOLEAKTRACE avoids kmemleak tracing. It implies
412  *  MEMBLOCK_ALLOC_ACCESSIBLE
413  */
414 #define MEMBLOCK_ALLOC_NOLEAKTRACE	1
415 
416 /* We are using top down, so it is safe to use 0 here */
417 #define MEMBLOCK_LOW_LIMIT 0
418 
419 #ifndef ARCH_LOW_ADDRESS_LIMIT
420 #define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
421 #endif
422 
423 phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
424 				      phys_addr_t start, phys_addr_t end);
425 phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
426 				      phys_addr_t align, phys_addr_t start,
427 				      phys_addr_t end, int nid, bool exact_nid);
428 phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
429 
memblock_phys_alloc(phys_addr_t size,phys_addr_t align)430 static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
431 						       phys_addr_t align)
432 {
433 	return memblock_phys_alloc_range(size, align, 0,
434 					 MEMBLOCK_ALLOC_ACCESSIBLE);
435 }
436 
437 void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
438 				 phys_addr_t min_addr, phys_addr_t max_addr,
439 				 int nid);
440 void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
441 				 phys_addr_t min_addr, phys_addr_t max_addr,
442 				 int nid);
443 void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
444 			     phys_addr_t min_addr, phys_addr_t max_addr,
445 			     int nid);
446 
memblock_alloc(phys_addr_t size,phys_addr_t align)447 static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
448 {
449 	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
450 				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
451 }
452 
453 void *__memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align,
454 				const char *func);
455 
456 #define memblock_alloc_or_panic(size, align)    \
457 	 __memblock_alloc_or_panic(size, align, __func__)
458 
memblock_alloc_raw(phys_addr_t size,phys_addr_t align)459 static inline void *memblock_alloc_raw(phys_addr_t size,
460 					       phys_addr_t align)
461 {
462 	return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
463 					  MEMBLOCK_ALLOC_ACCESSIBLE,
464 					  NUMA_NO_NODE);
465 }
466 
memblock_alloc_from(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr)467 static __always_inline void *memblock_alloc_from(phys_addr_t size,
468 						phys_addr_t align,
469 						phys_addr_t min_addr)
470 {
471 	return memblock_alloc_try_nid(size, align, min_addr,
472 				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
473 }
474 
memblock_alloc_low(phys_addr_t size,phys_addr_t align)475 static inline void *memblock_alloc_low(phys_addr_t size,
476 					       phys_addr_t align)
477 {
478 	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
479 				      ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
480 }
481 
memblock_alloc_node(phys_addr_t size,phys_addr_t align,int nid)482 static inline void *memblock_alloc_node(phys_addr_t size,
483 						phys_addr_t align, int nid)
484 {
485 	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
486 				      MEMBLOCK_ALLOC_ACCESSIBLE, nid);
487 }
488 
489 /*
490  * Set the allocation direction to bottom-up or top-down.
491  */
memblock_set_bottom_up(bool enable)492 static inline __init_memblock void memblock_set_bottom_up(bool enable)
493 {
494 	memblock.bottom_up = enable;
495 }
496 
497 /*
498  * Check if the allocation direction is bottom-up or not.
499  * if this is true, that said, memblock will allocate memory
500  * in bottom-up direction.
501  */
memblock_bottom_up(void)502 static inline __init_memblock bool memblock_bottom_up(void)
503 {
504 	return memblock.bottom_up;
505 }
506 
507 phys_addr_t memblock_phys_mem_size(void);
508 phys_addr_t memblock_reserved_size(void);
509 phys_addr_t memblock_reserved_kern_size(phys_addr_t limit, int nid);
510 unsigned long memblock_estimated_nr_free_pages(void);
511 phys_addr_t memblock_start_of_DRAM(void);
512 phys_addr_t memblock_end_of_DRAM(void);
513 void memblock_enforce_memory_limit(phys_addr_t memory_limit);
514 void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
515 void memblock_mem_limit_remove_map(phys_addr_t limit);
516 bool memblock_is_memory(phys_addr_t addr);
517 bool memblock_is_map_memory(phys_addr_t addr);
518 bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
519 bool memblock_is_reserved(phys_addr_t addr);
520 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
521 
522 void memblock_dump_all(void);
523 
524 /**
525  * memblock_set_current_limit - Set the current allocation limit to allow
526  *                         limiting allocations to what is currently
527  *                         accessible during boot
528  * @limit: New limit value (physical address)
529  */
530 void memblock_set_current_limit(phys_addr_t limit);
531 
532 
533 phys_addr_t memblock_get_current_limit(void);
534 
535 /*
536  * pfn conversion functions
537  *
538  * While the memory MEMBLOCKs should always be page aligned, the reserved
539  * MEMBLOCKs may not be. This accessor attempt to provide a very clear
540  * idea of what they return for such non aligned MEMBLOCKs.
541  */
542 
543 /**
544  * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
545  * @reg: memblock_region structure
546  *
547  * Return: the lowest pfn intersecting with the memory region
548  */
memblock_region_memory_base_pfn(const struct memblock_region * reg)549 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
550 {
551 	return PFN_UP(reg->base);
552 }
553 
554 /**
555  * memblock_region_memory_end_pfn - get the end pfn of the memory region
556  * @reg: memblock_region structure
557  *
558  * Return: the end_pfn of the reserved region
559  */
memblock_region_memory_end_pfn(const struct memblock_region * reg)560 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
561 {
562 	return PFN_DOWN(reg->base + reg->size);
563 }
564 
565 /**
566  * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
567  * @reg: memblock_region structure
568  *
569  * Return: the lowest pfn intersecting with the reserved region
570  */
memblock_region_reserved_base_pfn(const struct memblock_region * reg)571 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
572 {
573 	return PFN_DOWN(reg->base);
574 }
575 
576 /**
577  * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
578  * @reg: memblock_region structure
579  *
580  * Return: the end_pfn of the reserved region
581  */
memblock_region_reserved_end_pfn(const struct memblock_region * reg)582 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
583 {
584 	return PFN_UP(reg->base + reg->size);
585 }
586 
587 /**
588  * for_each_mem_region - iterate over memory regions
589  * @region: loop variable
590  */
591 #define for_each_mem_region(region)					\
592 	for (region = memblock.memory.regions;				\
593 	     region < (memblock.memory.regions + memblock.memory.cnt);	\
594 	     region++)
595 
596 /**
597  * for_each_reserved_mem_region - itereate over reserved memory regions
598  * @region: loop variable
599  */
600 #define for_each_reserved_mem_region(region)				\
601 	for (region = memblock.reserved.regions;			\
602 	     region < (memblock.reserved.regions + memblock.reserved.cnt); \
603 	     region++)
604 
605 extern void *alloc_large_system_hash(const char *tablename,
606 				     unsigned long bucketsize,
607 				     unsigned long numentries,
608 				     int scale,
609 				     int flags,
610 				     unsigned int *_hash_shift,
611 				     unsigned int *_hash_mask,
612 				     unsigned long low_limit,
613 				     unsigned long high_limit);
614 
615 #define HASH_EARLY	0x00000001	/* Allocating during early boot? */
616 #define HASH_ZERO	0x00000002	/* Zero allocated hash table */
617 
618 /* Only NUMA needs hash distribution. 64bit NUMA architectures have
619  * sufficient vmalloc space.
620  */
621 #ifdef CONFIG_NUMA
622 #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
623 extern int hashdist;		/* Distribute hashes across NUMA nodes? */
624 #else
625 #define hashdist (0)
626 #endif
627 
628 #ifdef CONFIG_MEMTEST
629 void early_memtest(phys_addr_t start, phys_addr_t end);
630 void memtest_report_meminfo(struct seq_file *m);
631 #else
early_memtest(phys_addr_t start,phys_addr_t end)632 static inline void early_memtest(phys_addr_t start, phys_addr_t end) { }
memtest_report_meminfo(struct seq_file * m)633 static inline void memtest_report_meminfo(struct seq_file *m) { }
634 #endif
635 
636 #ifdef CONFIG_MEMBLOCK_KHO_SCRATCH
637 void memblock_set_kho_scratch_only(void);
638 void memblock_clear_kho_scratch_only(void);
639 void memmap_init_kho_scratch_pages(void);
640 #else
memblock_set_kho_scratch_only(void)641 static inline void memblock_set_kho_scratch_only(void) { }
memblock_clear_kho_scratch_only(void)642 static inline void memblock_clear_kho_scratch_only(void) { }
memmap_init_kho_scratch_pages(void)643 static inline void memmap_init_kho_scratch_pages(void) {}
644 #endif
645 
646 #endif /* _LINUX_MEMBLOCK_H */
647