xref: /linux/tools/testing/memblock/tests/alloc_exact_nid_api.c (revision b6df23edb1ba65b0b46788a872ddc85dfe86ccf5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include "alloc_exact_nid_api.h"
3 #include "alloc_nid_api.h"
4 
5 #define FUNC_NAME			"memblock_alloc_exact_nid_raw"
6 
7 /*
8  * contains the fraction of MEM_SIZE contained in each node in basis point
9  * units (one hundredth of 1% or 1/10000)
10  */
11 static const unsigned int node_fractions[] = {
12 	2500, /* 1/4  */
13 	 625, /* 1/16 */
14 	1250, /* 1/8  */
15 	1250, /* 1/8  */
16 	 625, /* 1/16 */
17 	 625, /* 1/16 */
18 	2500, /* 1/4  */
19 	 625, /* 1/16 */
20 };
21 
22 /*
23  * A test that tries to allocate a memory region in a specific NUMA node that
24  * has enough memory to allocate a region of the requested size.
25  * Expect to allocate an aligned region at the end of the requested node.
26  */
27 static int alloc_exact_nid_top_down_numa_simple_check(void)
28 {
29 	int nid_req = 3;
30 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
31 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
32 	void *allocated_ptr = NULL;
33 	phys_addr_t size;
34 	phys_addr_t min_addr;
35 	phys_addr_t max_addr;
36 
37 	PREFIX_PUSH();
38 	setup_numa_memblock(node_fractions);
39 
40 	ASSERT_LE(SZ_4, req_node->size);
41 	size = req_node->size / SZ_4;
42 	min_addr = memblock_start_of_DRAM();
43 	max_addr = memblock_end_of_DRAM();
44 
45 	allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
46 						     min_addr, max_addr,
47 						     nid_req);
48 
49 	ASSERT_NE(allocated_ptr, NULL);
50 	ASSERT_MEM_NE(allocated_ptr, 0, size);
51 
52 	ASSERT_EQ(new_rgn->size, size);
53 	ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
54 	ASSERT_LE(req_node->base, new_rgn->base);
55 
56 	ASSERT_EQ(memblock.reserved.cnt, 1);
57 	ASSERT_EQ(memblock.reserved.total_size, size);
58 
59 	test_pass_pop();
60 
61 	return 0;
62 }
63 
64 /*
65  * A test that tries to allocate a memory region in a specific NUMA node that
66  * is partially reserved but has enough memory for the allocated region:
67  *
68  *  |           +---------------------------------------+          |
69  *  |           |               requested               |          |
70  *  +-----------+---------------------------------------+----------+
71  *
72  *  |           +------------------+              +-----+          |
73  *  |           |     reserved     |              | new |          |
74  *  +-----------+------------------+--------------+-----+----------+
75  *
76  * Expect to allocate an aligned region at the end of the requested node. The
77  * region count and total size get updated.
78  */
79 static int alloc_exact_nid_top_down_numa_part_reserved_check(void)
80 {
81 	int nid_req = 4;
82 	struct memblock_region *new_rgn = &memblock.reserved.regions[1];
83 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
84 	void *allocated_ptr = NULL;
85 	struct region r1;
86 	phys_addr_t size;
87 	phys_addr_t min_addr;
88 	phys_addr_t max_addr;
89 
90 	PREFIX_PUSH();
91 	setup_numa_memblock(node_fractions);
92 
93 	ASSERT_LE(SZ_8, req_node->size);
94 	r1.base = req_node->base;
95 	r1.size = req_node->size / SZ_2;
96 	size = r1.size / SZ_4;
97 	min_addr = memblock_start_of_DRAM();
98 	max_addr = memblock_end_of_DRAM();
99 
100 	memblock_reserve(r1.base, r1.size);
101 	allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
102 						     min_addr, max_addr,
103 						     nid_req);
104 
105 	ASSERT_NE(allocated_ptr, NULL);
106 	ASSERT_MEM_NE(allocated_ptr, 0, size);
107 
108 	ASSERT_EQ(new_rgn->size, size);
109 	ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
110 	ASSERT_LE(req_node->base, new_rgn->base);
111 
112 	ASSERT_EQ(memblock.reserved.cnt, 2);
113 	ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
114 
115 	test_pass_pop();
116 
117 	return 0;
118 }
119 
120 /*
121  * A test that tries to allocate a memory region that spans over the min_addr
122  * and max_addr range and overlaps with two different nodes, where the first
123  * node is the requested node:
124  *
125  *                                min_addr
126  *                                |           max_addr
127  *                                |           |
128  *                                v           v
129  *  |           +-----------------------+-----------+              |
130  *  |           |       requested       |   node3   |              |
131  *  +-----------+-----------------------+-----------+--------------+
132  *                                +           +
133  *  |                       +-----------+                          |
134  *  |                       |    rgn    |                          |
135  *  +-----------------------+-----------+--------------------------+
136  *
137  * Expect to drop the lower limit and allocate a memory region that ends at
138  * the end of the requested node.
139  */
140 static int alloc_exact_nid_top_down_numa_split_range_low_check(void)
141 {
142 	int nid_req = 2;
143 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
144 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
145 	void *allocated_ptr = NULL;
146 	phys_addr_t size = SZ_512;
147 	phys_addr_t min_addr;
148 	phys_addr_t max_addr;
149 	phys_addr_t req_node_end;
150 
151 	PREFIX_PUSH();
152 	setup_numa_memblock(node_fractions);
153 
154 	req_node_end = region_end(req_node);
155 	min_addr = req_node_end - SZ_256;
156 	max_addr = min_addr + size;
157 
158 	allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
159 						     min_addr, max_addr,
160 						     nid_req);
161 
162 	ASSERT_NE(allocated_ptr, NULL);
163 	ASSERT_MEM_NE(allocated_ptr, 0, size);
164 
165 	ASSERT_EQ(new_rgn->size, size);
166 	ASSERT_EQ(new_rgn->base, req_node_end - size);
167 	ASSERT_LE(req_node->base, new_rgn->base);
168 
169 	ASSERT_EQ(memblock.reserved.cnt, 1);
170 	ASSERT_EQ(memblock.reserved.total_size, size);
171 
172 	test_pass_pop();
173 
174 	return 0;
175 }
176 
177 /*
178  * A test that tries to allocate a memory region that spans over the min_addr
179  * and max_addr range and overlaps with two different nodes, where the requested
180  * node ends before min_addr:
181  *
182  *                                         min_addr
183  *                                         |         max_addr
184  *                                         |         |
185  *                                         v         v
186  *  |    +---------------+        +-------------+---------+          |
187  *  |    |   requested   |        |    node1    |  node2  |          |
188  *  +----+---------------+--------+-------------+---------+----------+
189  *                                         +         +
190  *  |          +---------+                                           |
191  *  |          |   rgn   |                                           |
192  *  +----------+---------+-------------------------------------------+
193  *
194  * Expect to drop the lower limit and allocate a memory region that ends at
195  * the end of the requested node.
196  */
197 static int alloc_exact_nid_top_down_numa_no_overlap_split_check(void)
198 {
199 	int nid_req = 2;
200 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
201 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
202 	struct memblock_region *node2 = &memblock.memory.regions[6];
203 	void *allocated_ptr = NULL;
204 	phys_addr_t size;
205 	phys_addr_t min_addr;
206 	phys_addr_t max_addr;
207 
208 	PREFIX_PUSH();
209 	setup_numa_memblock(node_fractions);
210 
211 	size = SZ_512;
212 	min_addr = node2->base - SZ_256;
213 	max_addr = min_addr + size;
214 
215 	allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
216 						     min_addr, max_addr,
217 						     nid_req);
218 
219 	ASSERT_NE(allocated_ptr, NULL);
220 	ASSERT_MEM_NE(allocated_ptr, 0, size);
221 
222 	ASSERT_EQ(new_rgn->size, size);
223 	ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
224 	ASSERT_LE(req_node->base, new_rgn->base);
225 
226 	ASSERT_EQ(memblock.reserved.cnt, 1);
227 	ASSERT_EQ(memblock.reserved.total_size, size);
228 
229 	test_pass_pop();
230 
231 	return 0;
232 }
233 
234 /*
235  * A test that tries to allocate memory within min_addr and max_add range when
236  * the requested node and the range do not overlap, and requested node ends
237  * before min_addr. The range overlaps with multiple nodes along node
238  * boundaries:
239  *
240  *                          min_addr
241  *                          |                                 max_addr
242  *                          |                                 |
243  *                          v                                 v
244  *  |-----------+           +----------+----...----+----------+      |
245  *  | requested |           | min node |    ...    | max node |      |
246  *  +-----------+-----------+----------+----...----+----------+------+
247  *                          +                                 +
248  *  |     +-----+                                                    |
249  *  |     | rgn |                                                    |
250  *  +-----+-----+----------------------------------------------------+
251  *
252  * Expect to drop the lower limit and allocate a memory region that ends at
253  * the end of the requested node.
254  */
255 static int alloc_exact_nid_top_down_numa_no_overlap_low_check(void)
256 {
257 	int nid_req = 0;
258 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
259 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
260 	struct memblock_region *min_node = &memblock.memory.regions[2];
261 	struct memblock_region *max_node = &memblock.memory.regions[5];
262 	void *allocated_ptr = NULL;
263 	phys_addr_t size = SZ_64;
264 	phys_addr_t max_addr;
265 	phys_addr_t min_addr;
266 
267 	PREFIX_PUSH();
268 	setup_numa_memblock(node_fractions);
269 
270 	min_addr = min_node->base;
271 	max_addr = region_end(max_node);
272 
273 	allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
274 						     min_addr, max_addr,
275 						     nid_req);
276 
277 	ASSERT_NE(allocated_ptr, NULL);
278 	ASSERT_MEM_NE(allocated_ptr, 0, size);
279 
280 	ASSERT_EQ(new_rgn->size, size);
281 	ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
282 
283 	ASSERT_EQ(memblock.reserved.cnt, 1);
284 	ASSERT_EQ(memblock.reserved.total_size, size);
285 
286 	test_pass_pop();
287 
288 	return 0;
289 }
290 
291 /*
292  * A test that tries to allocate a memory region in a specific NUMA node that
293  * has enough memory to allocate a region of the requested size.
294  * Expect to allocate an aligned region at the beginning of the requested node.
295  */
296 static int alloc_exact_nid_bottom_up_numa_simple_check(void)
297 {
298 	int nid_req = 3;
299 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
300 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
301 	void *allocated_ptr = NULL;
302 	phys_addr_t size;
303 	phys_addr_t min_addr;
304 	phys_addr_t max_addr;
305 
306 	PREFIX_PUSH();
307 	setup_numa_memblock(node_fractions);
308 
309 	ASSERT_LE(SZ_4, req_node->size);
310 	size = req_node->size / SZ_4;
311 	min_addr = memblock_start_of_DRAM();
312 	max_addr = memblock_end_of_DRAM();
313 
314 	allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
315 						     min_addr, max_addr,
316 						     nid_req);
317 
318 	ASSERT_NE(allocated_ptr, NULL);
319 	ASSERT_MEM_NE(allocated_ptr, 0, size);
320 
321 	ASSERT_EQ(new_rgn->size, size);
322 	ASSERT_EQ(new_rgn->base, req_node->base);
323 	ASSERT_LE(region_end(new_rgn), region_end(req_node));
324 
325 	ASSERT_EQ(memblock.reserved.cnt, 1);
326 	ASSERT_EQ(memblock.reserved.total_size, size);
327 
328 	test_pass_pop();
329 
330 	return 0;
331 }
332 
333 /*
334  * A test that tries to allocate a memory region in a specific NUMA node that
335  * is partially reserved but has enough memory for the allocated region:
336  *
337  *  |           +---------------------------------------+         |
338  *  |           |               requested               |         |
339  *  +-----------+---------------------------------------+---------+
340  *
341  *  |           +------------------+-----+                        |
342  *  |           |     reserved     | new |                        |
343  *  +-----------+------------------+-----+------------------------+
344  *
345  * Expect to allocate an aligned region in the requested node that merges with
346  * the existing reserved region. The total size gets updated.
347  */
348 static int alloc_exact_nid_bottom_up_numa_part_reserved_check(void)
349 {
350 	int nid_req = 4;
351 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
352 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
353 	void *allocated_ptr = NULL;
354 	struct region r1;
355 	phys_addr_t size;
356 	phys_addr_t min_addr;
357 	phys_addr_t max_addr;
358 	phys_addr_t total_size;
359 
360 	PREFIX_PUSH();
361 	setup_numa_memblock(node_fractions);
362 
363 	ASSERT_LE(SZ_8, req_node->size);
364 	r1.base = req_node->base;
365 	r1.size = req_node->size / SZ_2;
366 	size = r1.size / SZ_4;
367 	min_addr = memblock_start_of_DRAM();
368 	max_addr = memblock_end_of_DRAM();
369 	total_size = size + r1.size;
370 
371 	memblock_reserve(r1.base, r1.size);
372 	allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
373 						     min_addr, max_addr,
374 						     nid_req);
375 
376 	ASSERT_NE(allocated_ptr, NULL);
377 	ASSERT_MEM_NE(allocated_ptr, 0, size);
378 
379 	ASSERT_EQ(new_rgn->size, total_size);
380 	ASSERT_EQ(new_rgn->base, req_node->base);
381 	ASSERT_LE(region_end(new_rgn), region_end(req_node));
382 
383 	ASSERT_EQ(memblock.reserved.cnt, 1);
384 	ASSERT_EQ(memblock.reserved.total_size, total_size);
385 
386 	test_pass_pop();
387 
388 	return 0;
389 }
390 
391 /*
392  * A test that tries to allocate a memory region that spans over the min_addr
393  * and max_addr range and overlaps with two different nodes, where the first
394  * node is the requested node:
395  *
396  *                                min_addr
397  *                                |           max_addr
398  *                                |           |
399  *                                v           v
400  *  |           +-----------------------+-----------+              |
401  *  |           |       requested       |   node3   |              |
402  *  +-----------+-----------------------+-----------+--------------+
403  *                                +           +
404  *  |           +-----------+                                      |
405  *  |           |    rgn    |                                      |
406  *  +-----------+-----------+--------------------------------------+
407  *
408  * Expect to drop the lower limit and allocate a memory region at the beginning
409  * of the requested node.
410  */
411 static int alloc_exact_nid_bottom_up_numa_split_range_low_check(void)
412 {
413 	int nid_req = 2;
414 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
415 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
416 	void *allocated_ptr = NULL;
417 	phys_addr_t size = SZ_512;
418 	phys_addr_t min_addr;
419 	phys_addr_t max_addr;
420 	phys_addr_t req_node_end;
421 
422 	PREFIX_PUSH();
423 	setup_numa_memblock(node_fractions);
424 
425 	req_node_end = region_end(req_node);
426 	min_addr = req_node_end - SZ_256;
427 	max_addr = min_addr + size;
428 
429 	allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
430 						     min_addr, max_addr,
431 						     nid_req);
432 
433 	ASSERT_NE(allocated_ptr, NULL);
434 	ASSERT_MEM_NE(allocated_ptr, 0, size);
435 
436 	ASSERT_EQ(new_rgn->size, size);
437 	ASSERT_EQ(new_rgn->base, req_node->base);
438 	ASSERT_LE(region_end(new_rgn), req_node_end);
439 
440 	ASSERT_EQ(memblock.reserved.cnt, 1);
441 	ASSERT_EQ(memblock.reserved.total_size, size);
442 
443 	test_pass_pop();
444 
445 	return 0;
446 }
447 
448 /*
449  * A test that tries to allocate a memory region that spans over the min_addr
450  * and max_addr range and overlaps with two different nodes, where the requested
451  * node ends before min_addr:
452  *
453  *                                          min_addr
454  *                                         |         max_addr
455  *                                         |         |
456  *                                         v         v
457  *  |    +---------------+        +-------------+---------+         |
458  *  |    |   requested   |        |    node1    |  node2  |         |
459  *  +----+---------------+--------+-------------+---------+---------+
460  *                                         +         +
461  *  |    +---------+                                                |
462  *  |    |   rgn   |                                                |
463  *  +----+---------+------------------------------------------------+
464  *
465  * Expect to drop the lower limit and allocate a memory region that starts at
466  * the beginning of the requested node.
467  */
468 static int alloc_exact_nid_bottom_up_numa_no_overlap_split_check(void)
469 {
470 	int nid_req = 2;
471 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
472 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
473 	struct memblock_region *node2 = &memblock.memory.regions[6];
474 	void *allocated_ptr = NULL;
475 	phys_addr_t size;
476 	phys_addr_t min_addr;
477 	phys_addr_t max_addr;
478 
479 	PREFIX_PUSH();
480 	setup_numa_memblock(node_fractions);
481 
482 	size = SZ_512;
483 	min_addr = node2->base - SZ_256;
484 	max_addr = min_addr + size;
485 
486 	allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
487 						     min_addr, max_addr,
488 						     nid_req);
489 
490 	ASSERT_NE(allocated_ptr, NULL);
491 	ASSERT_MEM_NE(allocated_ptr, 0, size);
492 
493 	ASSERT_EQ(new_rgn->size, size);
494 	ASSERT_EQ(new_rgn->base, req_node->base);
495 	ASSERT_LE(region_end(new_rgn), region_end(req_node));
496 
497 	ASSERT_EQ(memblock.reserved.cnt, 1);
498 	ASSERT_EQ(memblock.reserved.total_size, size);
499 
500 	test_pass_pop();
501 
502 	return 0;
503 }
504 
505 /*
506  * A test that tries to allocate memory within min_addr and max_add range when
507  * the requested node and the range do not overlap, and requested node ends
508  * before min_addr. The range overlaps with multiple nodes along node
509  * boundaries:
510  *
511  *                          min_addr
512  *                          |                                 max_addr
513  *                          |                                 |
514  *                          v                                 v
515  *  |-----------+           +----------+----...----+----------+      |
516  *  | requested |           | min node |    ...    | max node |      |
517  *  +-----------+-----------+----------+----...----+----------+------+
518  *                          +                                 +
519  *  |-----+                                                          |
520  *  | rgn |                                                          |
521  *  +-----+----------------------------------------------------------+
522  *
523  * Expect to drop the lower limit and allocate a memory region that starts at
524  * the beginning of the requested node.
525  */
526 static int alloc_exact_nid_bottom_up_numa_no_overlap_low_check(void)
527 {
528 	int nid_req = 0;
529 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
530 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
531 	struct memblock_region *min_node = &memblock.memory.regions[2];
532 	struct memblock_region *max_node = &memblock.memory.regions[5];
533 	void *allocated_ptr = NULL;
534 	phys_addr_t size = SZ_64;
535 	phys_addr_t max_addr;
536 	phys_addr_t min_addr;
537 
538 	PREFIX_PUSH();
539 	setup_numa_memblock(node_fractions);
540 
541 	min_addr = min_node->base;
542 	max_addr = region_end(max_node);
543 
544 	allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
545 						     min_addr, max_addr,
546 						     nid_req);
547 
548 	ASSERT_NE(allocated_ptr, NULL);
549 	ASSERT_MEM_NE(allocated_ptr, 0, size);
550 
551 	ASSERT_EQ(new_rgn->size, size);
552 	ASSERT_EQ(new_rgn->base, req_node->base);
553 	ASSERT_LE(region_end(new_rgn), region_end(req_node));
554 
555 	ASSERT_EQ(memblock.reserved.cnt, 1);
556 	ASSERT_EQ(memblock.reserved.total_size, size);
557 
558 	test_pass_pop();
559 
560 	return 0;
561 }
562 
563 /* Test case wrappers for NUMA tests */
564 static int alloc_exact_nid_numa_simple_check(void)
565 {
566 	test_print("\tRunning %s...\n", __func__);
567 	memblock_set_bottom_up(false);
568 	alloc_exact_nid_top_down_numa_simple_check();
569 	memblock_set_bottom_up(true);
570 	alloc_exact_nid_bottom_up_numa_simple_check();
571 
572 	return 0;
573 }
574 
575 static int alloc_exact_nid_numa_part_reserved_check(void)
576 {
577 	test_print("\tRunning %s...\n", __func__);
578 	memblock_set_bottom_up(false);
579 	alloc_exact_nid_top_down_numa_part_reserved_check();
580 	memblock_set_bottom_up(true);
581 	alloc_exact_nid_bottom_up_numa_part_reserved_check();
582 
583 	return 0;
584 }
585 
586 static int alloc_exact_nid_numa_split_range_low_check(void)
587 {
588 	test_print("\tRunning %s...\n", __func__);
589 	memblock_set_bottom_up(false);
590 	alloc_exact_nid_top_down_numa_split_range_low_check();
591 	memblock_set_bottom_up(true);
592 	alloc_exact_nid_bottom_up_numa_split_range_low_check();
593 
594 	return 0;
595 }
596 
597 static int alloc_exact_nid_numa_no_overlap_split_check(void)
598 {
599 	test_print("\tRunning %s...\n", __func__);
600 	memblock_set_bottom_up(false);
601 	alloc_exact_nid_top_down_numa_no_overlap_split_check();
602 	memblock_set_bottom_up(true);
603 	alloc_exact_nid_bottom_up_numa_no_overlap_split_check();
604 
605 	return 0;
606 }
607 
608 static int alloc_exact_nid_numa_no_overlap_low_check(void)
609 {
610 	test_print("\tRunning %s...\n", __func__);
611 	memblock_set_bottom_up(false);
612 	alloc_exact_nid_top_down_numa_no_overlap_low_check();
613 	memblock_set_bottom_up(true);
614 	alloc_exact_nid_bottom_up_numa_no_overlap_low_check();
615 
616 	return 0;
617 }
618 
619 int __memblock_alloc_exact_nid_numa_checks(void)
620 {
621 	test_print("Running %s NUMA tests...\n", FUNC_NAME);
622 
623 	alloc_exact_nid_numa_simple_check();
624 	alloc_exact_nid_numa_part_reserved_check();
625 	alloc_exact_nid_numa_split_range_low_check();
626 	alloc_exact_nid_numa_no_overlap_split_check();
627 	alloc_exact_nid_numa_no_overlap_low_check();
628 
629 	return 0;
630 }
631 
632 int memblock_alloc_exact_nid_checks(void)
633 {
634 	prefix_reset();
635 	prefix_push(FUNC_NAME);
636 
637 	reset_memblock_attributes();
638 	dummy_physical_memory_init();
639 
640 	memblock_alloc_exact_nid_range_checks();
641 	memblock_alloc_exact_nid_numa_checks();
642 
643 	dummy_physical_memory_cleanup();
644 
645 	prefix_pop();
646 
647 	return 0;
648 }
649