xref: /linux/tools/testing/memblock/tests/basic_api.c (revision 3d3165193776ddacf59f101f0fa05cfab9f1a9ba)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include "basic_api.h"
3 #include <string.h>
4 #include <linux/memblock.h>
5 
6 #define EXPECTED_MEMBLOCK_REGIONS			128
7 #define FUNC_ADD					"memblock_add"
8 #define FUNC_RESERVE					"memblock_reserve"
9 #define FUNC_REMOVE					"memblock_remove"
10 #define FUNC_FREE					"memblock_free"
11 #define FUNC_TRIM					"memblock_trim_memory"
12 
13 static int memblock_initialization_check(void)
14 {
15 	PREFIX_PUSH();
16 
17 	ASSERT_NE(memblock.memory.regions, NULL);
18 	ASSERT_EQ(memblock.memory.cnt, 0);
19 	ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS);
20 	ASSERT_EQ(strcmp(memblock.memory.name, "memory"), 0);
21 
22 	ASSERT_NE(memblock.reserved.regions, NULL);
23 	ASSERT_EQ(memblock.reserved.cnt, 0);
24 	ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS);
25 	ASSERT_EQ(strcmp(memblock.reserved.name, "reserved"), 0);
26 
27 	ASSERT_EQ(memblock.bottom_up, false);
28 	ASSERT_EQ(memblock.current_limit, MEMBLOCK_ALLOC_ANYWHERE);
29 
30 	test_pass_pop();
31 
32 	return 0;
33 }
34 
35 /*
36  * A simple test that adds a memory block of a specified base address
37  * and size to the collection of available memory regions (memblock.memory).
38  * Expect to create a new entry. The region counter and total memory get
39  * updated.
40  */
41 static int memblock_add_simple_check(void)
42 {
43 	struct memblock_region *rgn;
44 
45 	rgn = &memblock.memory.regions[0];
46 
47 	struct region r = {
48 		.base = SZ_1G,
49 		.size = SZ_4M
50 	};
51 
52 	PREFIX_PUSH();
53 
54 	reset_memblock_regions();
55 	memblock_add(r.base, r.size);
56 
57 	ASSERT_EQ(rgn->base, r.base);
58 	ASSERT_EQ(rgn->size, r.size);
59 
60 	ASSERT_EQ(memblock.memory.cnt, 1);
61 	ASSERT_EQ(memblock.memory.total_size, r.size);
62 
63 	test_pass_pop();
64 
65 	return 0;
66 }
67 
68 /*
69  * A simple test that adds a memory block of a specified base address, size,
70  * NUMA node and memory flags to the collection of available memory regions.
71  * Expect to create a new entry. The region counter and total memory get
72  * updated.
73  */
74 static int memblock_add_node_simple_check(void)
75 {
76 	struct memblock_region *rgn;
77 
78 	rgn = &memblock.memory.regions[0];
79 
80 	struct region r = {
81 		.base = SZ_1M,
82 		.size = SZ_16M
83 	};
84 
85 	PREFIX_PUSH();
86 
87 	reset_memblock_regions();
88 	memblock_add_node(r.base, r.size, 1, MEMBLOCK_HOTPLUG);
89 
90 	ASSERT_EQ(rgn->base, r.base);
91 	ASSERT_EQ(rgn->size, r.size);
92 #ifdef CONFIG_NUMA
93 	ASSERT_EQ(rgn->nid, 1);
94 #endif
95 	ASSERT_EQ(rgn->flags, MEMBLOCK_HOTPLUG);
96 
97 	ASSERT_EQ(memblock.memory.cnt, 1);
98 	ASSERT_EQ(memblock.memory.total_size, r.size);
99 
100 	test_pass_pop();
101 
102 	return 0;
103 }
104 
105 /*
106  * A test that tries to add two memory blocks that don't overlap with one
107  * another:
108  *
109  *  |        +--------+        +--------+  |
110  *  |        |   r1   |        |   r2   |  |
111  *  +--------+--------+--------+--------+--+
112  *
113  * Expect to add two correctly initialized entries to the collection of
114  * available memory regions (memblock.memory). The total size and
115  * region counter fields get updated.
116  */
117 static int memblock_add_disjoint_check(void)
118 {
119 	struct memblock_region *rgn1, *rgn2;
120 
121 	rgn1 = &memblock.memory.regions[0];
122 	rgn2 = &memblock.memory.regions[1];
123 
124 	struct region r1 = {
125 		.base = SZ_1G,
126 		.size = SZ_8K
127 	};
128 	struct region r2 = {
129 		.base = SZ_1G + SZ_16K,
130 		.size = SZ_8K
131 	};
132 
133 	PREFIX_PUSH();
134 
135 	reset_memblock_regions();
136 	memblock_add(r1.base, r1.size);
137 	memblock_add(r2.base, r2.size);
138 
139 	ASSERT_EQ(rgn1->base, r1.base);
140 	ASSERT_EQ(rgn1->size, r1.size);
141 
142 	ASSERT_EQ(rgn2->base, r2.base);
143 	ASSERT_EQ(rgn2->size, r2.size);
144 
145 	ASSERT_EQ(memblock.memory.cnt, 2);
146 	ASSERT_EQ(memblock.memory.total_size, r1.size + r2.size);
147 
148 	test_pass_pop();
149 
150 	return 0;
151 }
152 
153 /*
154  * A test that tries to add two memory blocks r1 and r2, where r2 overlaps
155  * with the beginning of r1 (that is r1.base < r2.base + r2.size):
156  *
157  *  |    +----+----+------------+          |
158  *  |    |    |r2  |   r1       |          |
159  *  +----+----+----+------------+----------+
160  *       ^    ^
161  *       |    |
162  *       |    r1.base
163  *       |
164  *       r2.base
165  *
166  * Expect to merge the two entries into one region that starts at r2.base
167  * and has size of two regions minus their intersection. The total size of
168  * the available memory is updated, and the region counter stays the same.
169  */
170 static int memblock_add_overlap_top_check(void)
171 {
172 	struct memblock_region *rgn;
173 	phys_addr_t total_size;
174 
175 	rgn = &memblock.memory.regions[0];
176 
177 	struct region r1 = {
178 		.base = SZ_512M,
179 		.size = SZ_1G
180 	};
181 	struct region r2 = {
182 		.base = SZ_256M,
183 		.size = SZ_512M
184 	};
185 
186 	PREFIX_PUSH();
187 
188 	total_size = (r1.base - r2.base) + r1.size;
189 
190 	reset_memblock_regions();
191 	memblock_add(r1.base, r1.size);
192 	memblock_add(r2.base, r2.size);
193 
194 	ASSERT_EQ(rgn->base, r2.base);
195 	ASSERT_EQ(rgn->size, total_size);
196 
197 	ASSERT_EQ(memblock.memory.cnt, 1);
198 	ASSERT_EQ(memblock.memory.total_size, total_size);
199 
200 	test_pass_pop();
201 
202 	return 0;
203 }
204 
205 /*
206  * A test that tries to add two memory blocks r1 and r2, where r2 overlaps
207  * with the end of r1 (that is r2.base < r1.base + r1.size):
208  *
209  *  |  +--+------+----------+              |
210  *  |  |  | r1   | r2       |              |
211  *  +--+--+------+----------+--------------+
212  *     ^  ^
213  *     |  |
214  *     |  r2.base
215  *     |
216  *     r1.base
217  *
218  * Expect to merge the two entries into one region that starts at r1.base
219  * and has size of two regions minus their intersection. The total size of
220  * the available memory is updated, and the region counter stays the same.
221  */
222 static int memblock_add_overlap_bottom_check(void)
223 {
224 	struct memblock_region *rgn;
225 	phys_addr_t total_size;
226 
227 	rgn = &memblock.memory.regions[0];
228 
229 	struct region r1 = {
230 		.base = SZ_128M,
231 		.size = SZ_512M
232 	};
233 	struct region r2 = {
234 		.base = SZ_256M,
235 		.size = SZ_1G
236 	};
237 
238 	PREFIX_PUSH();
239 
240 	total_size = (r2.base - r1.base) + r2.size;
241 
242 	reset_memblock_regions();
243 	memblock_add(r1.base, r1.size);
244 	memblock_add(r2.base, r2.size);
245 
246 	ASSERT_EQ(rgn->base, r1.base);
247 	ASSERT_EQ(rgn->size, total_size);
248 
249 	ASSERT_EQ(memblock.memory.cnt, 1);
250 	ASSERT_EQ(memblock.memory.total_size, total_size);
251 
252 	test_pass_pop();
253 
254 	return 0;
255 }
256 
257 /*
258  * A test that tries to add two memory blocks r1 and r2, where r2 is
259  * within the range of r1 (that is r1.base < r2.base &&
260  * r2.base + r2.size < r1.base + r1.size):
261  *
262  *  |   +-------+--+-----------------------+
263  *  |   |       |r2|      r1               |
264  *  +---+-------+--+-----------------------+
265  *      ^
266  *      |
267  *      r1.base
268  *
269  * Expect to merge two entries into one region that stays the same.
270  * The counter and total size of available memory are not updated.
271  */
272 static int memblock_add_within_check(void)
273 {
274 	struct memblock_region *rgn;
275 
276 	rgn = &memblock.memory.regions[0];
277 
278 	struct region r1 = {
279 		.base = SZ_8M,
280 		.size = SZ_32M
281 	};
282 	struct region r2 = {
283 		.base = SZ_16M,
284 		.size = SZ_1M
285 	};
286 
287 	PREFIX_PUSH();
288 
289 	reset_memblock_regions();
290 	memblock_add(r1.base, r1.size);
291 	memblock_add(r2.base, r2.size);
292 
293 	ASSERT_EQ(rgn->base, r1.base);
294 	ASSERT_EQ(rgn->size, r1.size);
295 
296 	ASSERT_EQ(memblock.memory.cnt, 1);
297 	ASSERT_EQ(memblock.memory.total_size, r1.size);
298 
299 	test_pass_pop();
300 
301 	return 0;
302 }
303 
304 /*
305  * A simple test that tries to add the same memory block twice. Expect
306  * the counter and total size of available memory to not be updated.
307  */
308 static int memblock_add_twice_check(void)
309 {
310 	struct region r = {
311 		.base = SZ_16K,
312 		.size = SZ_2M
313 	};
314 
315 	PREFIX_PUSH();
316 
317 	reset_memblock_regions();
318 
319 	memblock_add(r.base, r.size);
320 	memblock_add(r.base, r.size);
321 
322 	ASSERT_EQ(memblock.memory.cnt, 1);
323 	ASSERT_EQ(memblock.memory.total_size, r.size);
324 
325 	test_pass_pop();
326 
327 	return 0;
328 }
329 
330 /*
331  * A test that tries to add two memory blocks that don't overlap with one
332  * another and then add a third memory block in the space between the first two:
333  *
334  *  |        +--------+--------+--------+  |
335  *  |        |   r1   |   r3   |   r2   |  |
336  *  +--------+--------+--------+--------+--+
337  *
338  * Expect to merge the three entries into one region that starts at r1.base
339  * and has size of r1.size + r2.size + r3.size. The region counter and total
340  * size of the available memory are updated.
341  */
342 static int memblock_add_between_check(void)
343 {
344 	struct memblock_region *rgn;
345 	phys_addr_t total_size;
346 
347 	rgn = &memblock.memory.regions[0];
348 
349 	struct region r1 = {
350 		.base = SZ_1G,
351 		.size = SZ_8K
352 	};
353 	struct region r2 = {
354 		.base = SZ_1G + SZ_16K,
355 		.size = SZ_8K
356 	};
357 	struct region r3 = {
358 		.base = SZ_1G + SZ_8K,
359 		.size = SZ_8K
360 	};
361 
362 	PREFIX_PUSH();
363 
364 	total_size = r1.size + r2.size + r3.size;
365 
366 	reset_memblock_regions();
367 	memblock_add(r1.base, r1.size);
368 	memblock_add(r2.base, r2.size);
369 	memblock_add(r3.base, r3.size);
370 
371 	ASSERT_EQ(rgn->base, r1.base);
372 	ASSERT_EQ(rgn->size, total_size);
373 
374 	ASSERT_EQ(memblock.memory.cnt, 1);
375 	ASSERT_EQ(memblock.memory.total_size, total_size);
376 
377 	test_pass_pop();
378 
379 	return 0;
380 }
381 
382 /*
383  * A simple test that tries to add a memory block r when r extends past
384  * PHYS_ADDR_MAX:
385  *
386  *                               +--------+
387  *                               |    r   |
388  *                               +--------+
389  *  |                            +----+
390  *  |                            | rgn|
391  *  +----------------------------+----+
392  *
393  * Expect to add a memory block of size PHYS_ADDR_MAX - r.base. Expect the
394  * total size of available memory and the counter to be updated.
395  */
396 static int memblock_add_near_max_check(void)
397 {
398 	struct memblock_region *rgn;
399 	phys_addr_t total_size;
400 
401 	rgn = &memblock.memory.regions[0];
402 
403 	struct region r = {
404 		.base = PHYS_ADDR_MAX - SZ_1M,
405 		.size = SZ_2M
406 	};
407 
408 	PREFIX_PUSH();
409 
410 	total_size = PHYS_ADDR_MAX - r.base;
411 
412 	reset_memblock_regions();
413 	memblock_add(r.base, r.size);
414 
415 	ASSERT_EQ(rgn->base, r.base);
416 	ASSERT_EQ(rgn->size, total_size);
417 
418 	ASSERT_EQ(memblock.memory.cnt, 1);
419 	ASSERT_EQ(memblock.memory.total_size, total_size);
420 
421 	test_pass_pop();
422 
423 	return 0;
424 }
425 
426 /*
427  * A test that trying to add the 129th memory block.
428  * Expect to trigger memblock_double_array() to double the
429  * memblock.memory.max, find a new valid memory as
430  * memory.regions.
431  */
432 static int memblock_add_many_check(void)
433 {
434 	int i;
435 	void *orig_region;
436 	struct region r = {
437 		.base = SZ_16K,
438 		.size = SZ_16K,
439 	};
440 	phys_addr_t new_memory_regions_size;
441 	phys_addr_t base, size = SZ_64;
442 	phys_addr_t gap_size = SZ_64;
443 
444 	PREFIX_PUSH();
445 
446 	reset_memblock_regions();
447 	memblock_allow_resize();
448 
449 	dummy_physical_memory_init();
450 	/*
451 	 * We allocated enough memory by using dummy_physical_memory_init(), and
452 	 * split it into small block. First we split a large enough memory block
453 	 * as the memory region which will be choosed by memblock_double_array().
454 	 */
455 	base = PAGE_ALIGN(dummy_physical_memory_base());
456 	new_memory_regions_size = PAGE_ALIGN(INIT_MEMBLOCK_REGIONS * 2 *
457 					     sizeof(struct memblock_region));
458 	memblock_add(base, new_memory_regions_size);
459 
460 	/* This is the base of small memory block. */
461 	base += new_memory_regions_size + gap_size;
462 
463 	orig_region = memblock.memory.regions;
464 
465 	for (i = 0; i < INIT_MEMBLOCK_REGIONS; i++) {
466 		/*
467 		 * Add these small block to fulfill the memblock. We keep a
468 		 * gap between the nearby memory to avoid being merged.
469 		 */
470 		memblock_add(base, size);
471 		base += size + gap_size;
472 
473 		ASSERT_EQ(memblock.memory.cnt, i + 2);
474 		ASSERT_EQ(memblock.memory.total_size, new_memory_regions_size +
475 						      (i + 1) * size);
476 	}
477 
478 	/*
479 	 * At there, memblock_double_array() has been succeed, check if it
480 	 * update the memory.max.
481 	 */
482 	ASSERT_EQ(memblock.memory.max, INIT_MEMBLOCK_REGIONS * 2);
483 
484 	/* memblock_double_array() will reserve the memory it used. Check it. */
485 	ASSERT_EQ(memblock.reserved.cnt, 1);
486 	ASSERT_EQ(memblock.reserved.total_size, new_memory_regions_size);
487 
488 	/*
489 	 * Now memblock_double_array() works fine. Let's check after the
490 	 * double_array(), the memblock_add() still works as normal.
491 	 */
492 	memblock_add(r.base, r.size);
493 	ASSERT_EQ(memblock.memory.regions[0].base, r.base);
494 	ASSERT_EQ(memblock.memory.regions[0].size, r.size);
495 
496 	ASSERT_EQ(memblock.memory.cnt, INIT_MEMBLOCK_REGIONS + 2);
497 	ASSERT_EQ(memblock.memory.total_size, INIT_MEMBLOCK_REGIONS * size +
498 					      new_memory_regions_size +
499 					      r.size);
500 	ASSERT_EQ(memblock.memory.max, INIT_MEMBLOCK_REGIONS * 2);
501 
502 	dummy_physical_memory_cleanup();
503 
504 	/*
505 	 * The current memory.regions is occupying a range of memory that
506 	 * allocated from dummy_physical_memory_init(). After free the memory,
507 	 * we must not use it. So restore the origin memory region to make sure
508 	 * the tests can run as normal and not affected by the double array.
509 	 */
510 	memblock.memory.regions = orig_region;
511 	memblock.memory.cnt = INIT_MEMBLOCK_REGIONS;
512 
513 	test_pass_pop();
514 
515 	return 0;
516 }
517 
518 static int memblock_add_checks(void)
519 {
520 	prefix_reset();
521 	prefix_push(FUNC_ADD);
522 	test_print("Running %s tests...\n", FUNC_ADD);
523 
524 	memblock_add_simple_check();
525 	memblock_add_node_simple_check();
526 	memblock_add_disjoint_check();
527 	memblock_add_overlap_top_check();
528 	memblock_add_overlap_bottom_check();
529 	memblock_add_within_check();
530 	memblock_add_twice_check();
531 	memblock_add_between_check();
532 	memblock_add_near_max_check();
533 	memblock_add_many_check();
534 
535 	prefix_pop();
536 
537 	return 0;
538 }
539 
540 /*
541  * A simple test that marks a memory block of a specified base address
542  * and size as reserved and to the collection of reserved memory regions
543  * (memblock.reserved). Expect to create a new entry. The region counter
544  * and total memory size are updated.
545  */
546 static int memblock_reserve_simple_check(void)
547 {
548 	struct memblock_region *rgn;
549 
550 	rgn =  &memblock.reserved.regions[0];
551 
552 	struct region r = {
553 		.base = SZ_2G,
554 		.size = SZ_128M
555 	};
556 
557 	PREFIX_PUSH();
558 
559 	reset_memblock_regions();
560 	memblock_reserve(r.base, r.size);
561 
562 	ASSERT_EQ(rgn->base, r.base);
563 	ASSERT_EQ(rgn->size, r.size);
564 
565 	test_pass_pop();
566 
567 	return 0;
568 }
569 
570 /*
571  * A test that tries to mark two memory blocks that don't overlap as reserved:
572  *
573  *  |        +--+      +----------------+  |
574  *  |        |r1|      |       r2       |  |
575  *  +--------+--+------+----------------+--+
576  *
577  * Expect to add two entries to the collection of reserved memory regions
578  * (memblock.reserved). The total size and region counter for
579  * memblock.reserved are updated.
580  */
581 static int memblock_reserve_disjoint_check(void)
582 {
583 	struct memblock_region *rgn1, *rgn2;
584 
585 	rgn1 = &memblock.reserved.regions[0];
586 	rgn2 = &memblock.reserved.regions[1];
587 
588 	struct region r1 = {
589 		.base = SZ_256M,
590 		.size = SZ_16M
591 	};
592 	struct region r2 = {
593 		.base = SZ_512M,
594 		.size = SZ_512M
595 	};
596 
597 	PREFIX_PUSH();
598 
599 	reset_memblock_regions();
600 	memblock_reserve(r1.base, r1.size);
601 	memblock_reserve(r2.base, r2.size);
602 
603 	ASSERT_EQ(rgn1->base, r1.base);
604 	ASSERT_EQ(rgn1->size, r1.size);
605 
606 	ASSERT_EQ(rgn2->base, r2.base);
607 	ASSERT_EQ(rgn2->size, r2.size);
608 
609 	ASSERT_EQ(memblock.reserved.cnt, 2);
610 	ASSERT_EQ(memblock.reserved.total_size, r1.size + r2.size);
611 
612 	test_pass_pop();
613 
614 	return 0;
615 }
616 
617 /*
618  * A test that tries to mark two memory blocks r1 and r2 as reserved,
619  * where r2 overlaps with the beginning of r1 (that is
620  * r1.base < r2.base + r2.size):
621  *
622  *  |  +--------------+--+--------------+  |
623  *  |  |       r2     |  |     r1       |  |
624  *  +--+--------------+--+--------------+--+
625  *     ^              ^
626  *     |              |
627  *     |              r1.base
628  *     |
629  *     r2.base
630  *
631  * Expect to merge two entries into one region that starts at r2.base and
632  * has size of two regions minus their intersection. The total size of the
633  * reserved memory is updated, and the region counter is not updated.
634  */
635 static int memblock_reserve_overlap_top_check(void)
636 {
637 	struct memblock_region *rgn;
638 	phys_addr_t total_size;
639 
640 	rgn = &memblock.reserved.regions[0];
641 
642 	struct region r1 = {
643 		.base = SZ_1G,
644 		.size = SZ_1G
645 	};
646 	struct region r2 = {
647 		.base = SZ_128M,
648 		.size = SZ_1G
649 	};
650 
651 	PREFIX_PUSH();
652 
653 	total_size = (r1.base - r2.base) + r1.size;
654 
655 	reset_memblock_regions();
656 	memblock_reserve(r1.base, r1.size);
657 	memblock_reserve(r2.base, r2.size);
658 
659 	ASSERT_EQ(rgn->base, r2.base);
660 	ASSERT_EQ(rgn->size, total_size);
661 
662 	ASSERT_EQ(memblock.reserved.cnt, 1);
663 	ASSERT_EQ(memblock.reserved.total_size, total_size);
664 
665 	test_pass_pop();
666 
667 	return 0;
668 }
669 
670 /*
671  * A test that tries to mark two memory blocks r1 and r2 as reserved,
672  * where r2 overlaps with the end of r1 (that is
673  * r2.base < r1.base + r1.size):
674  *
675  *  |  +--------------+--+--------------+  |
676  *  |  |       r1     |  |     r2       |  |
677  *  +--+--------------+--+--------------+--+
678  *     ^              ^
679  *     |              |
680  *     |              r2.base
681  *     |
682  *     r1.base
683  *
684  * Expect to merge two entries into one region that starts at r1.base and
685  * has size of two regions minus their intersection. The total size of the
686  * reserved memory is updated, and the region counter is not updated.
687  */
688 static int memblock_reserve_overlap_bottom_check(void)
689 {
690 	struct memblock_region *rgn;
691 	phys_addr_t total_size;
692 
693 	rgn = &memblock.reserved.regions[0];
694 
695 	struct region r1 = {
696 		.base = SZ_2K,
697 		.size = SZ_128K
698 	};
699 	struct region r2 = {
700 		.base = SZ_128K,
701 		.size = SZ_128K
702 	};
703 
704 	PREFIX_PUSH();
705 
706 	total_size = (r2.base - r1.base) + r2.size;
707 
708 	reset_memblock_regions();
709 	memblock_reserve(r1.base, r1.size);
710 	memblock_reserve(r2.base, r2.size);
711 
712 	ASSERT_EQ(rgn->base, r1.base);
713 	ASSERT_EQ(rgn->size, total_size);
714 
715 	ASSERT_EQ(memblock.reserved.cnt, 1);
716 	ASSERT_EQ(memblock.reserved.total_size, total_size);
717 
718 	test_pass_pop();
719 
720 	return 0;
721 }
722 
723 /*
724  * A test that tries to mark two memory blocks r1 and r2 as reserved,
725  * where r2 is within the range of r1 (that is
726  * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
727  *
728  *  | +-----+--+---------------------------|
729  *  | |     |r2|          r1               |
730  *  +-+-----+--+---------------------------+
731  *    ^     ^
732  *    |     |
733  *    |     r2.base
734  *    |
735  *    r1.base
736  *
737  * Expect to merge two entries into one region that stays the same. The
738  * counter and total size of available memory are not updated.
739  */
740 static int memblock_reserve_within_check(void)
741 {
742 	struct memblock_region *rgn;
743 
744 	rgn = &memblock.reserved.regions[0];
745 
746 	struct region r1 = {
747 		.base = SZ_1M,
748 		.size = SZ_8M
749 	};
750 	struct region r2 = {
751 		.base = SZ_2M,
752 		.size = SZ_64K
753 	};
754 
755 	PREFIX_PUSH();
756 
757 	reset_memblock_regions();
758 	memblock_reserve(r1.base, r1.size);
759 	memblock_reserve(r2.base, r2.size);
760 
761 	ASSERT_EQ(rgn->base, r1.base);
762 	ASSERT_EQ(rgn->size, r1.size);
763 
764 	ASSERT_EQ(memblock.reserved.cnt, 1);
765 	ASSERT_EQ(memblock.reserved.total_size, r1.size);
766 
767 	test_pass_pop();
768 
769 	return 0;
770 }
771 
772 /*
773  * A simple test that tries to reserve the same memory block twice.
774  * Expect the region counter and total size of reserved memory to not
775  * be updated.
776  */
777 static int memblock_reserve_twice_check(void)
778 {
779 	struct region r = {
780 		.base = SZ_16K,
781 		.size = SZ_2M
782 	};
783 
784 	PREFIX_PUSH();
785 
786 	reset_memblock_regions();
787 
788 	memblock_reserve(r.base, r.size);
789 	memblock_reserve(r.base, r.size);
790 
791 	ASSERT_EQ(memblock.reserved.cnt, 1);
792 	ASSERT_EQ(memblock.reserved.total_size, r.size);
793 
794 	test_pass_pop();
795 
796 	return 0;
797 }
798 
799 /*
800  * A test that tries to mark two memory blocks that don't overlap as reserved
801  * and then reserve a third memory block in the space between the first two:
802  *
803  *  |        +--------+--------+--------+  |
804  *  |        |   r1   |   r3   |   r2   |  |
805  *  +--------+--------+--------+--------+--+
806  *
807  * Expect to merge the three entries into one reserved region that starts at
808  * r1.base and has size of r1.size + r2.size + r3.size. The region counter and
809  * total for memblock.reserved are updated.
810  */
811 static int memblock_reserve_between_check(void)
812 {
813 	struct memblock_region *rgn;
814 	phys_addr_t total_size;
815 
816 	rgn = &memblock.reserved.regions[0];
817 
818 	struct region r1 = {
819 		.base = SZ_1G,
820 		.size = SZ_8K
821 	};
822 	struct region r2 = {
823 		.base = SZ_1G + SZ_16K,
824 		.size = SZ_8K
825 	};
826 	struct region r3 = {
827 		.base = SZ_1G + SZ_8K,
828 		.size = SZ_8K
829 	};
830 
831 	PREFIX_PUSH();
832 
833 	total_size = r1.size + r2.size + r3.size;
834 
835 	reset_memblock_regions();
836 	memblock_reserve(r1.base, r1.size);
837 	memblock_reserve(r2.base, r2.size);
838 	memblock_reserve(r3.base, r3.size);
839 
840 	ASSERT_EQ(rgn->base, r1.base);
841 	ASSERT_EQ(rgn->size, total_size);
842 
843 	ASSERT_EQ(memblock.reserved.cnt, 1);
844 	ASSERT_EQ(memblock.reserved.total_size, total_size);
845 
846 	test_pass_pop();
847 
848 	return 0;
849 }
850 
851 /*
852  * A simple test that tries to reserve a memory block r when r extends past
853  * PHYS_ADDR_MAX:
854  *
855  *                               +--------+
856  *                               |    r   |
857  *                               +--------+
858  *  |                            +----+
859  *  |                            | rgn|
860  *  +----------------------------+----+
861  *
862  * Expect to reserve a memory block of size PHYS_ADDR_MAX - r.base. Expect the
863  * total size of reserved memory and the counter to be updated.
864  */
865 static int memblock_reserve_near_max_check(void)
866 {
867 	struct memblock_region *rgn;
868 	phys_addr_t total_size;
869 
870 	rgn = &memblock.reserved.regions[0];
871 
872 	struct region r = {
873 		.base = PHYS_ADDR_MAX - SZ_1M,
874 		.size = SZ_2M
875 	};
876 
877 	PREFIX_PUSH();
878 
879 	total_size = PHYS_ADDR_MAX - r.base;
880 
881 	reset_memblock_regions();
882 	memblock_reserve(r.base, r.size);
883 
884 	ASSERT_EQ(rgn->base, r.base);
885 	ASSERT_EQ(rgn->size, total_size);
886 
887 	ASSERT_EQ(memblock.reserved.cnt, 1);
888 	ASSERT_EQ(memblock.reserved.total_size, total_size);
889 
890 	test_pass_pop();
891 
892 	return 0;
893 }
894 
895 /*
896  * A test that trying to reserve the 129th memory block.
897  * Expect to trigger memblock_double_array() to double the
898  * memblock.memory.max, find a new valid memory as
899  * reserved.regions.
900  */
901 static int memblock_reserve_many_check(void)
902 {
903 	int i;
904 	void *orig_region;
905 	struct region r = {
906 		.base = SZ_16K,
907 		.size = SZ_16K,
908 	};
909 	phys_addr_t memory_base = SZ_128K;
910 	phys_addr_t new_reserved_regions_size;
911 
912 	PREFIX_PUSH();
913 
914 	reset_memblock_regions();
915 	memblock_allow_resize();
916 
917 	/* Add a valid memory region used by double_array(). */
918 	dummy_physical_memory_init();
919 	memblock_add(dummy_physical_memory_base(), MEM_SIZE);
920 
921 	for (i = 0; i < INIT_MEMBLOCK_REGIONS; i++) {
922 		/* Reserve some fakes memory region to fulfill the memblock. */
923 		memblock_reserve(memory_base, MEM_SIZE);
924 
925 		ASSERT_EQ(memblock.reserved.cnt, i + 1);
926 		ASSERT_EQ(memblock.reserved.total_size, (i + 1) * MEM_SIZE);
927 
928 		/* Keep the gap so these memory region will not be merged. */
929 		memory_base += MEM_SIZE * 2;
930 	}
931 
932 	orig_region = memblock.reserved.regions;
933 
934 	/* This reserve the 129 memory_region, and makes it double array. */
935 	memblock_reserve(memory_base, MEM_SIZE);
936 
937 	/*
938 	 * This is the memory region size used by the doubled reserved.regions,
939 	 * and it has been reserved due to it has been used. The size is used to
940 	 * calculate the total_size that the memblock.reserved have now.
941 	 */
942 	new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) *
943 					sizeof(struct memblock_region));
944 	/*
945 	 * The double_array() will find a free memory region as the new
946 	 * reserved.regions, and the used memory region will be reserved, so
947 	 * there will be one more region exist in the reserved memblock. And the
948 	 * one more reserved region's size is new_reserved_regions_size.
949 	 */
950 	ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2);
951 	ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
952 						new_reserved_regions_size);
953 	ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
954 
955 	/*
956 	 * Now memblock_double_array() works fine. Let's check after the
957 	 * double_array(), the memblock_reserve() still works as normal.
958 	 */
959 	memblock_reserve(r.base, r.size);
960 	ASSERT_EQ(memblock.reserved.regions[0].base, r.base);
961 	ASSERT_EQ(memblock.reserved.regions[0].size, r.size);
962 
963 	ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3);
964 	ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
965 						new_reserved_regions_size +
966 						r.size);
967 	ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
968 
969 	dummy_physical_memory_cleanup();
970 
971 	/*
972 	 * The current reserved.regions is occupying a range of memory that
973 	 * allocated from dummy_physical_memory_init(). After free the memory,
974 	 * we must not use it. So restore the origin memory region to make sure
975 	 * the tests can run as normal and not affected by the double array.
976 	 */
977 	memblock.reserved.regions = orig_region;
978 	memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS;
979 
980 	test_pass_pop();
981 
982 	return 0;
983 }
984 
985 
986 /*
987  * A test that trying to reserve the 129th memory block at all locations.
988  * Expect to trigger memblock_double_array() to double the
989  * memblock.memory.max, find a new valid memory as reserved.regions.
990  *
991  *  0               1               2                 128
992  *  +-------+       +-------+       +-------+         +-------+
993  *  |  32K  |       |  32K  |       |  32K  |   ...   |  32K  |
994  *  +-------+-------+-------+-------+-------+         +-------+
995  *          |<-32K->|       |<-32K->|
996  *
997  */
998 /* Keep the gap so these memory region will not be merged. */
999 #define MEMORY_BASE(idx) (SZ_128K + (MEM_SIZE * 2) * (idx))
1000 static int memblock_reserve_all_locations_check(void)
1001 {
1002 	int i, skip;
1003 	void *orig_region;
1004 	struct region r = {
1005 		.base = SZ_16K,
1006 		.size = SZ_16K,
1007 	};
1008 	phys_addr_t new_reserved_regions_size;
1009 
1010 	PREFIX_PUSH();
1011 
1012 	/* Reserve the 129th memory block for all possible positions*/
1013 	for (skip = 0; skip < INIT_MEMBLOCK_REGIONS + 1; skip++) {
1014 		reset_memblock_regions();
1015 		memblock_allow_resize();
1016 
1017 		/* Add a valid memory region used by double_array(). */
1018 		dummy_physical_memory_init();
1019 		memblock_add(dummy_physical_memory_base(), MEM_SIZE);
1020 
1021 		for (i = 0; i < INIT_MEMBLOCK_REGIONS + 1; i++) {
1022 			if (i == skip)
1023 				continue;
1024 
1025 			/* Reserve some fakes memory region to fulfill the memblock. */
1026 			memblock_reserve(MEMORY_BASE(i), MEM_SIZE);
1027 
1028 			if (i < skip) {
1029 				ASSERT_EQ(memblock.reserved.cnt, i + 1);
1030 				ASSERT_EQ(memblock.reserved.total_size, (i + 1) * MEM_SIZE);
1031 			} else {
1032 				ASSERT_EQ(memblock.reserved.cnt, i);
1033 				ASSERT_EQ(memblock.reserved.total_size, i * MEM_SIZE);
1034 			}
1035 		}
1036 
1037 		orig_region = memblock.reserved.regions;
1038 
1039 		/* This reserve the 129 memory_region, and makes it double array. */
1040 		memblock_reserve(MEMORY_BASE(skip), MEM_SIZE);
1041 
1042 		/*
1043 		 * This is the memory region size used by the doubled reserved.regions,
1044 		 * and it has been reserved due to it has been used. The size is used to
1045 		 * calculate the total_size that the memblock.reserved have now.
1046 		 */
1047 		new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) *
1048 						sizeof(struct memblock_region));
1049 		/*
1050 		 * The double_array() will find a free memory region as the new
1051 		 * reserved.regions, and the used memory region will be reserved, so
1052 		 * there will be one more region exist in the reserved memblock. And the
1053 		 * one more reserved region's size is new_reserved_regions_size.
1054 		 */
1055 		ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2);
1056 		ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
1057 							new_reserved_regions_size);
1058 		ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
1059 
1060 		/*
1061 		 * Now memblock_double_array() works fine. Let's check after the
1062 		 * double_array(), the memblock_reserve() still works as normal.
1063 		 */
1064 		memblock_reserve(r.base, r.size);
1065 		ASSERT_EQ(memblock.reserved.regions[0].base, r.base);
1066 		ASSERT_EQ(memblock.reserved.regions[0].size, r.size);
1067 
1068 		ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3);
1069 		ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
1070 							new_reserved_regions_size +
1071 							r.size);
1072 		ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
1073 
1074 		dummy_physical_memory_cleanup();
1075 
1076 		/*
1077 		 * The current reserved.regions is occupying a range of memory that
1078 		 * allocated from dummy_physical_memory_init(). After free the memory,
1079 		 * we must not use it. So restore the origin memory region to make sure
1080 		 * the tests can run as normal and not affected by the double array.
1081 		 */
1082 		memblock.reserved.regions = orig_region;
1083 		memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS;
1084 	}
1085 
1086 	test_pass_pop();
1087 
1088 	return 0;
1089 }
1090 
1091 static int memblock_reserve_checks(void)
1092 {
1093 	prefix_reset();
1094 	prefix_push(FUNC_RESERVE);
1095 	test_print("Running %s tests...\n", FUNC_RESERVE);
1096 
1097 	memblock_reserve_simple_check();
1098 	memblock_reserve_disjoint_check();
1099 	memblock_reserve_overlap_top_check();
1100 	memblock_reserve_overlap_bottom_check();
1101 	memblock_reserve_within_check();
1102 	memblock_reserve_twice_check();
1103 	memblock_reserve_between_check();
1104 	memblock_reserve_near_max_check();
1105 	memblock_reserve_many_check();
1106 	memblock_reserve_all_locations_check();
1107 
1108 	prefix_pop();
1109 
1110 	return 0;
1111 }
1112 
1113 /*
1114  * A simple test that tries to remove a region r1 from the array of
1115  * available memory regions. By "removing" a region we mean overwriting it
1116  * with the next region r2 in memblock.memory:
1117  *
1118  *  |  ......          +----------------+  |
1119  *  |  : r1 :          |       r2       |  |
1120  *  +--+----+----------+----------------+--+
1121  *                     ^
1122  *                     |
1123  *                     rgn.base
1124  *
1125  * Expect to add two memory blocks r1 and r2 and then remove r1 so that
1126  * r2 is the first available region. The region counter and total size
1127  * are updated.
1128  */
1129 static int memblock_remove_simple_check(void)
1130 {
1131 	struct memblock_region *rgn;
1132 
1133 	rgn = &memblock.memory.regions[0];
1134 
1135 	struct region r1 = {
1136 		.base = SZ_2K,
1137 		.size = SZ_4K
1138 	};
1139 	struct region r2 = {
1140 		.base = SZ_128K,
1141 		.size = SZ_4M
1142 	};
1143 
1144 	PREFIX_PUSH();
1145 
1146 	reset_memblock_regions();
1147 	memblock_add(r1.base, r1.size);
1148 	memblock_add(r2.base, r2.size);
1149 	memblock_remove(r1.base, r1.size);
1150 
1151 	ASSERT_EQ(rgn->base, r2.base);
1152 	ASSERT_EQ(rgn->size, r2.size);
1153 
1154 	ASSERT_EQ(memblock.memory.cnt, 1);
1155 	ASSERT_EQ(memblock.memory.total_size, r2.size);
1156 
1157 	test_pass_pop();
1158 
1159 	return 0;
1160 }
1161 
1162 /*
1163  * A test that tries to remove a region r2 that was not registered as
1164  * available memory (i.e. has no corresponding entry in memblock.memory):
1165  *
1166  *                     +----------------+
1167  *                     |       r2       |
1168  *                     +----------------+
1169  *  |  +----+                              |
1170  *  |  | r1 |                              |
1171  *  +--+----+------------------------------+
1172  *     ^
1173  *     |
1174  *     rgn.base
1175  *
1176  * Expect the array, regions counter and total size to not be modified.
1177  */
1178 static int memblock_remove_absent_check(void)
1179 {
1180 	struct memblock_region *rgn;
1181 
1182 	rgn = &memblock.memory.regions[0];
1183 
1184 	struct region r1 = {
1185 		.base = SZ_512K,
1186 		.size = SZ_4M
1187 	};
1188 	struct region r2 = {
1189 		.base = SZ_64M,
1190 		.size = SZ_1G
1191 	};
1192 
1193 	PREFIX_PUSH();
1194 
1195 	reset_memblock_regions();
1196 	memblock_add(r1.base, r1.size);
1197 	memblock_remove(r2.base, r2.size);
1198 
1199 	ASSERT_EQ(rgn->base, r1.base);
1200 	ASSERT_EQ(rgn->size, r1.size);
1201 
1202 	ASSERT_EQ(memblock.memory.cnt, 1);
1203 	ASSERT_EQ(memblock.memory.total_size, r1.size);
1204 
1205 	test_pass_pop();
1206 
1207 	return 0;
1208 }
1209 
1210 /*
1211  * A test that tries to remove a region r2 that overlaps with the
1212  * beginning of the already existing entry r1
1213  * (that is r1.base < r2.base + r2.size):
1214  *
1215  *           +-----------------+
1216  *           |       r2        |
1217  *           +-----------------+
1218  *  |                 .........+--------+  |
1219  *  |                 :     r1 |  rgn   |  |
1220  *  +-----------------+--------+--------+--+
1221  *                    ^        ^
1222  *                    |        |
1223  *                    |        rgn.base
1224  *                    r1.base
1225  *
1226  * Expect that only the intersection of both regions is removed from the
1227  * available memory pool. The regions counter and total size are updated.
1228  */
1229 static int memblock_remove_overlap_top_check(void)
1230 {
1231 	struct memblock_region *rgn;
1232 	phys_addr_t r1_end, r2_end, total_size;
1233 
1234 	rgn = &memblock.memory.regions[0];
1235 
1236 	struct region r1 = {
1237 		.base = SZ_32M,
1238 		.size = SZ_32M
1239 	};
1240 	struct region r2 = {
1241 		.base = SZ_16M,
1242 		.size = SZ_32M
1243 	};
1244 
1245 	PREFIX_PUSH();
1246 
1247 	r1_end = r1.base + r1.size;
1248 	r2_end = r2.base + r2.size;
1249 	total_size = r1_end - r2_end;
1250 
1251 	reset_memblock_regions();
1252 	memblock_add(r1.base, r1.size);
1253 	memblock_remove(r2.base, r2.size);
1254 
1255 	ASSERT_EQ(rgn->base, r1.base + r2.base);
1256 	ASSERT_EQ(rgn->size, total_size);
1257 
1258 	ASSERT_EQ(memblock.memory.cnt, 1);
1259 	ASSERT_EQ(memblock.memory.total_size, total_size);
1260 
1261 	test_pass_pop();
1262 
1263 	return 0;
1264 }
1265 
1266 /*
1267  * A test that tries to remove a region r2 that overlaps with the end of
1268  * the already existing region r1 (that is r2.base < r1.base + r1.size):
1269  *
1270  *        +--------------------------------+
1271  *        |               r2               |
1272  *        +--------------------------------+
1273  *  | +---+.....                           |
1274  *  | |rgn| r1 :                           |
1275  *  +-+---+----+---------------------------+
1276  *    ^
1277  *    |
1278  *    r1.base
1279  *
1280  * Expect that only the intersection of both regions is removed from the
1281  * available memory pool. The regions counter and total size are updated.
1282  */
1283 static int memblock_remove_overlap_bottom_check(void)
1284 {
1285 	struct memblock_region *rgn;
1286 	phys_addr_t total_size;
1287 
1288 	rgn = &memblock.memory.regions[0];
1289 
1290 	struct region r1 = {
1291 		.base = SZ_2M,
1292 		.size = SZ_64M
1293 	};
1294 	struct region r2 = {
1295 		.base = SZ_32M,
1296 		.size = SZ_256M
1297 	};
1298 
1299 	PREFIX_PUSH();
1300 
1301 	total_size = r2.base - r1.base;
1302 
1303 	reset_memblock_regions();
1304 	memblock_add(r1.base, r1.size);
1305 	memblock_remove(r2.base, r2.size);
1306 
1307 	ASSERT_EQ(rgn->base, r1.base);
1308 	ASSERT_EQ(rgn->size, total_size);
1309 
1310 	ASSERT_EQ(memblock.memory.cnt, 1);
1311 	ASSERT_EQ(memblock.memory.total_size, total_size);
1312 
1313 	test_pass_pop();
1314 
1315 	return 0;
1316 }
1317 
1318 /*
1319  * A test that tries to remove a region r2 that is within the range of
1320  * the already existing entry r1 (that is
1321  * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
1322  *
1323  *                  +----+
1324  *                  | r2 |
1325  *                  +----+
1326  *  | +-------------+....+---------------+ |
1327  *  | |     rgn1    | r1 |     rgn2      | |
1328  *  +-+-------------+----+---------------+-+
1329  *    ^
1330  *    |
1331  *    r1.base
1332  *
1333  * Expect that the region is split into two - one that ends at r2.base and
1334  * another that starts at r2.base + r2.size, with appropriate sizes. The
1335  * region counter and total size are updated.
1336  */
1337 static int memblock_remove_within_check(void)
1338 {
1339 	struct memblock_region *rgn1, *rgn2;
1340 	phys_addr_t r1_size, r2_size, total_size;
1341 
1342 	rgn1 = &memblock.memory.regions[0];
1343 	rgn2 = &memblock.memory.regions[1];
1344 
1345 	struct region r1 = {
1346 		.base = SZ_1M,
1347 		.size = SZ_32M
1348 	};
1349 	struct region r2 = {
1350 		.base = SZ_16M,
1351 		.size = SZ_1M
1352 	};
1353 
1354 	PREFIX_PUSH();
1355 
1356 	r1_size = r2.base - r1.base;
1357 	r2_size = (r1.base + r1.size) - (r2.base + r2.size);
1358 	total_size = r1_size + r2_size;
1359 
1360 	reset_memblock_regions();
1361 	memblock_add(r1.base, r1.size);
1362 	memblock_remove(r2.base, r2.size);
1363 
1364 	ASSERT_EQ(rgn1->base, r1.base);
1365 	ASSERT_EQ(rgn1->size, r1_size);
1366 
1367 	ASSERT_EQ(rgn2->base, r2.base + r2.size);
1368 	ASSERT_EQ(rgn2->size, r2_size);
1369 
1370 	ASSERT_EQ(memblock.memory.cnt, 2);
1371 	ASSERT_EQ(memblock.memory.total_size, total_size);
1372 
1373 	test_pass_pop();
1374 
1375 	return 0;
1376 }
1377 
1378 /*
1379  * A simple test that tries to remove a region r1 from the array of
1380  * available memory regions when r1 is the only available region.
1381  * Expect to add a memory block r1 and then remove r1 so that a dummy
1382  * region is added. The region counter stays the same, and the total size
1383  * is updated.
1384  */
1385 static int memblock_remove_only_region_check(void)
1386 {
1387 	struct memblock_region *rgn;
1388 
1389 	rgn = &memblock.memory.regions[0];
1390 
1391 	struct region r1 = {
1392 		.base = SZ_2K,
1393 		.size = SZ_4K
1394 	};
1395 
1396 	PREFIX_PUSH();
1397 
1398 	reset_memblock_regions();
1399 	memblock_add(r1.base, r1.size);
1400 	memblock_remove(r1.base, r1.size);
1401 
1402 	ASSERT_EQ(rgn->base, 0);
1403 	ASSERT_EQ(rgn->size, 0);
1404 
1405 	ASSERT_EQ(memblock.memory.cnt, 0);
1406 	ASSERT_EQ(memblock.memory.total_size, 0);
1407 
1408 	test_pass_pop();
1409 
1410 	return 0;
1411 }
1412 
1413 /*
1414  * A simple test that tries remove a region r2 from the array of available
1415  * memory regions when r2 extends past PHYS_ADDR_MAX:
1416  *
1417  *                               +--------+
1418  *                               |   r2   |
1419  *                               +--------+
1420  *  |                        +---+....+
1421  *  |                        |rgn|    |
1422  *  +------------------------+---+----+
1423  *
1424  * Expect that only the portion between PHYS_ADDR_MAX and r2.base is removed.
1425  * Expect the total size of available memory to be updated and the counter to
1426  * not be updated.
1427  */
1428 static int memblock_remove_near_max_check(void)
1429 {
1430 	struct memblock_region *rgn;
1431 	phys_addr_t total_size;
1432 
1433 	rgn = &memblock.memory.regions[0];
1434 
1435 	struct region r1 = {
1436 		.base = PHYS_ADDR_MAX - SZ_2M,
1437 		.size = SZ_2M
1438 	};
1439 
1440 	struct region r2 = {
1441 		.base = PHYS_ADDR_MAX - SZ_1M,
1442 		.size = SZ_2M
1443 	};
1444 
1445 	PREFIX_PUSH();
1446 
1447 	total_size = r1.size - (PHYS_ADDR_MAX - r2.base);
1448 
1449 	reset_memblock_regions();
1450 	memblock_add(r1.base, r1.size);
1451 	memblock_remove(r2.base, r2.size);
1452 
1453 	ASSERT_EQ(rgn->base, r1.base);
1454 	ASSERT_EQ(rgn->size, total_size);
1455 
1456 	ASSERT_EQ(memblock.memory.cnt, 1);
1457 	ASSERT_EQ(memblock.memory.total_size, total_size);
1458 
1459 	test_pass_pop();
1460 
1461 	return 0;
1462 }
1463 
1464 /*
1465  * A test that tries to remove a region r3 that overlaps with two existing
1466  * regions r1 and r2:
1467  *
1468  *            +----------------+
1469  *            |       r3       |
1470  *            +----------------+
1471  *  |    +----+.....   ........+--------+
1472  *  |    |    |r1  :   :       |r2      |     |
1473  *  +----+----+----+---+-------+--------+-----+
1474  *
1475  * Expect that only the intersections of r1 with r3 and r2 with r3 are removed
1476  * from the available memory pool. Expect the total size of available memory to
1477  * be updated and the counter to not be updated.
1478  */
1479 static int memblock_remove_overlap_two_check(void)
1480 {
1481 	struct memblock_region *rgn1, *rgn2;
1482 	phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size;
1483 
1484 	rgn1 = &memblock.memory.regions[0];
1485 	rgn2 = &memblock.memory.regions[1];
1486 
1487 	struct region r1 = {
1488 		.base = SZ_16M,
1489 		.size = SZ_32M
1490 	};
1491 	struct region r2 = {
1492 		.base = SZ_64M,
1493 		.size = SZ_64M
1494 	};
1495 	struct region r3 = {
1496 		.base = SZ_32M,
1497 		.size = SZ_64M
1498 	};
1499 
1500 	PREFIX_PUSH();
1501 
1502 	r2_end = r2.base + r2.size;
1503 	r3_end = r3.base + r3.size;
1504 	new_r1_size = r3.base - r1.base;
1505 	new_r2_size = r2_end - r3_end;
1506 	total_size = new_r1_size + new_r2_size;
1507 
1508 	reset_memblock_regions();
1509 	memblock_add(r1.base, r1.size);
1510 	memblock_add(r2.base, r2.size);
1511 	memblock_remove(r3.base, r3.size);
1512 
1513 	ASSERT_EQ(rgn1->base, r1.base);
1514 	ASSERT_EQ(rgn1->size, new_r1_size);
1515 
1516 	ASSERT_EQ(rgn2->base, r3_end);
1517 	ASSERT_EQ(rgn2->size, new_r2_size);
1518 
1519 	ASSERT_EQ(memblock.memory.cnt, 2);
1520 	ASSERT_EQ(memblock.memory.total_size, total_size);
1521 
1522 	test_pass_pop();
1523 
1524 	return 0;
1525 }
1526 
1527 static int memblock_remove_checks(void)
1528 {
1529 	prefix_reset();
1530 	prefix_push(FUNC_REMOVE);
1531 	test_print("Running %s tests...\n", FUNC_REMOVE);
1532 
1533 	memblock_remove_simple_check();
1534 	memblock_remove_absent_check();
1535 	memblock_remove_overlap_top_check();
1536 	memblock_remove_overlap_bottom_check();
1537 	memblock_remove_within_check();
1538 	memblock_remove_only_region_check();
1539 	memblock_remove_near_max_check();
1540 	memblock_remove_overlap_two_check();
1541 
1542 	prefix_pop();
1543 
1544 	return 0;
1545 }
1546 
1547 /*
1548  * A simple test that tries to free a memory block r1 that was marked
1549  * earlier as reserved. By "freeing" a region we mean overwriting it with
1550  * the next entry r2 in memblock.reserved:
1551  *
1552  *  |              ......           +----+ |
1553  *  |              : r1 :           | r2 | |
1554  *  +--------------+----+-----------+----+-+
1555  *                                  ^
1556  *                                  |
1557  *                                  rgn.base
1558  *
1559  * Expect to reserve two memory regions and then erase r1 region with the
1560  * value of r2. The region counter and total size are updated.
1561  */
1562 static int memblock_free_simple_check(void)
1563 {
1564 	struct memblock_region *rgn;
1565 
1566 	rgn = &memblock.reserved.regions[0];
1567 
1568 	struct region r1 = {
1569 		.base = SZ_4M,
1570 		.size = SZ_1M
1571 	};
1572 	struct region r2 = {
1573 		.base = SZ_8M,
1574 		.size = SZ_1M
1575 	};
1576 
1577 	PREFIX_PUSH();
1578 
1579 	reset_memblock_regions();
1580 	memblock_reserve(r1.base, r1.size);
1581 	memblock_reserve(r2.base, r2.size);
1582 	memblock_free((void *)r1.base, r1.size);
1583 
1584 	ASSERT_EQ(rgn->base, r2.base);
1585 	ASSERT_EQ(rgn->size, r2.size);
1586 
1587 	ASSERT_EQ(memblock.reserved.cnt, 1);
1588 	ASSERT_EQ(memblock.reserved.total_size, r2.size);
1589 
1590 	test_pass_pop();
1591 
1592 	return 0;
1593 }
1594 
1595 /*
1596  * A test that tries to free a region r2 that was not marked as reserved
1597  * (i.e. has no corresponding entry in memblock.reserved):
1598  *
1599  *                     +----------------+
1600  *                     |       r2       |
1601  *                     +----------------+
1602  *  |  +----+                              |
1603  *  |  | r1 |                              |
1604  *  +--+----+------------------------------+
1605  *     ^
1606  *     |
1607  *     rgn.base
1608  *
1609  * The array, regions counter and total size are not modified.
1610  */
1611 static int memblock_free_absent_check(void)
1612 {
1613 	struct memblock_region *rgn;
1614 
1615 	rgn = &memblock.reserved.regions[0];
1616 
1617 	struct region r1 = {
1618 		.base = SZ_2M,
1619 		.size = SZ_8K
1620 	};
1621 	struct region r2 = {
1622 		.base = SZ_16M,
1623 		.size = SZ_128M
1624 	};
1625 
1626 	PREFIX_PUSH();
1627 
1628 	reset_memblock_regions();
1629 	memblock_reserve(r1.base, r1.size);
1630 	memblock_free((void *)r2.base, r2.size);
1631 
1632 	ASSERT_EQ(rgn->base, r1.base);
1633 	ASSERT_EQ(rgn->size, r1.size);
1634 
1635 	ASSERT_EQ(memblock.reserved.cnt, 1);
1636 	ASSERT_EQ(memblock.reserved.total_size, r1.size);
1637 
1638 	test_pass_pop();
1639 
1640 	return 0;
1641 }
1642 
1643 /*
1644  * A test that tries to free a region r2 that overlaps with the beginning
1645  * of the already existing entry r1 (that is r1.base < r2.base + r2.size):
1646  *
1647  *     +----+
1648  *     | r2 |
1649  *     +----+
1650  *  |    ...+--------------+               |
1651  *  |    :  |    r1        |               |
1652  *  +----+--+--------------+---------------+
1653  *       ^  ^
1654  *       |  |
1655  *       |  rgn.base
1656  *       |
1657  *       r1.base
1658  *
1659  * Expect that only the intersection of both regions is freed. The
1660  * regions counter and total size are updated.
1661  */
1662 static int memblock_free_overlap_top_check(void)
1663 {
1664 	struct memblock_region *rgn;
1665 	phys_addr_t total_size;
1666 
1667 	rgn = &memblock.reserved.regions[0];
1668 
1669 	struct region r1 = {
1670 		.base = SZ_8M,
1671 		.size = SZ_32M
1672 	};
1673 	struct region r2 = {
1674 		.base = SZ_1M,
1675 		.size = SZ_8M
1676 	};
1677 
1678 	PREFIX_PUSH();
1679 
1680 	total_size = (r1.size + r1.base) - (r2.base + r2.size);
1681 
1682 	reset_memblock_regions();
1683 	memblock_reserve(r1.base, r1.size);
1684 	memblock_free((void *)r2.base, r2.size);
1685 
1686 	ASSERT_EQ(rgn->base, r2.base + r2.size);
1687 	ASSERT_EQ(rgn->size, total_size);
1688 
1689 	ASSERT_EQ(memblock.reserved.cnt, 1);
1690 	ASSERT_EQ(memblock.reserved.total_size, total_size);
1691 
1692 	test_pass_pop();
1693 
1694 	return 0;
1695 }
1696 
1697 /*
1698  * A test that tries to free a region r2 that overlaps with the end of
1699  * the already existing entry r1 (that is r2.base < r1.base + r1.size):
1700  *
1701  *                   +----------------+
1702  *                   |       r2       |
1703  *                   +----------------+
1704  *  |    +-----------+.....                |
1705  *  |    |       r1  |    :                |
1706  *  +----+-----------+----+----------------+
1707  *
1708  * Expect that only the intersection of both regions is freed. The
1709  * regions counter and total size are updated.
1710  */
1711 static int memblock_free_overlap_bottom_check(void)
1712 {
1713 	struct memblock_region *rgn;
1714 	phys_addr_t total_size;
1715 
1716 	rgn = &memblock.reserved.regions[0];
1717 
1718 	struct region r1 = {
1719 		.base = SZ_8M,
1720 		.size = SZ_32M
1721 	};
1722 	struct region r2 = {
1723 		.base = SZ_32M,
1724 		.size = SZ_32M
1725 	};
1726 
1727 	PREFIX_PUSH();
1728 
1729 	total_size = r2.base - r1.base;
1730 
1731 	reset_memblock_regions();
1732 	memblock_reserve(r1.base, r1.size);
1733 	memblock_free((void *)r2.base, r2.size);
1734 
1735 	ASSERT_EQ(rgn->base, r1.base);
1736 	ASSERT_EQ(rgn->size, total_size);
1737 
1738 	ASSERT_EQ(memblock.reserved.cnt, 1);
1739 	ASSERT_EQ(memblock.reserved.total_size, total_size);
1740 
1741 	test_pass_pop();
1742 
1743 	return 0;
1744 }
1745 
1746 /*
1747  * A test that tries to free a region r2 that is within the range of the
1748  * already existing entry r1 (that is
1749  * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
1750  *
1751  *                    +----+
1752  *                    | r2 |
1753  *                    +----+
1754  *  |    +------------+....+---------------+
1755  *  |    |    rgn1    | r1 |     rgn2      |
1756  *  +----+------------+----+---------------+
1757  *       ^
1758  *       |
1759  *       r1.base
1760  *
1761  * Expect that the region is split into two - one that ends at r2.base and
1762  * another that starts at r2.base + r2.size, with appropriate sizes. The
1763  * region counter and total size fields are updated.
1764  */
1765 static int memblock_free_within_check(void)
1766 {
1767 	struct memblock_region *rgn1, *rgn2;
1768 	phys_addr_t r1_size, r2_size, total_size;
1769 
1770 	rgn1 = &memblock.reserved.regions[0];
1771 	rgn2 = &memblock.reserved.regions[1];
1772 
1773 	struct region r1 = {
1774 		.base = SZ_1M,
1775 		.size = SZ_8M
1776 	};
1777 	struct region r2 = {
1778 		.base = SZ_4M,
1779 		.size = SZ_1M
1780 	};
1781 
1782 	PREFIX_PUSH();
1783 
1784 	r1_size = r2.base - r1.base;
1785 	r2_size = (r1.base + r1.size) - (r2.base + r2.size);
1786 	total_size = r1_size + r2_size;
1787 
1788 	reset_memblock_regions();
1789 	memblock_reserve(r1.base, r1.size);
1790 	memblock_free((void *)r2.base, r2.size);
1791 
1792 	ASSERT_EQ(rgn1->base, r1.base);
1793 	ASSERT_EQ(rgn1->size, r1_size);
1794 
1795 	ASSERT_EQ(rgn2->base, r2.base + r2.size);
1796 	ASSERT_EQ(rgn2->size, r2_size);
1797 
1798 	ASSERT_EQ(memblock.reserved.cnt, 2);
1799 	ASSERT_EQ(memblock.reserved.total_size, total_size);
1800 
1801 	test_pass_pop();
1802 
1803 	return 0;
1804 }
1805 
1806 /*
1807  * A simple test that tries to free a memory block r1 that was marked
1808  * earlier as reserved when r1 is the only available region.
1809  * Expect to reserve a memory block r1 and then free r1 so that r1 is
1810  * overwritten with a dummy region. The region counter stays the same,
1811  * and the total size is updated.
1812  */
1813 static int memblock_free_only_region_check(void)
1814 {
1815 	struct memblock_region *rgn;
1816 
1817 	rgn = &memblock.reserved.regions[0];
1818 
1819 	struct region r1 = {
1820 		.base = SZ_2K,
1821 		.size = SZ_4K
1822 	};
1823 
1824 	PREFIX_PUSH();
1825 
1826 	reset_memblock_regions();
1827 	memblock_reserve(r1.base, r1.size);
1828 	memblock_free((void *)r1.base, r1.size);
1829 
1830 	ASSERT_EQ(rgn->base, 0);
1831 	ASSERT_EQ(rgn->size, 0);
1832 
1833 	ASSERT_EQ(memblock.reserved.cnt, 0);
1834 	ASSERT_EQ(memblock.reserved.total_size, 0);
1835 
1836 	test_pass_pop();
1837 
1838 	return 0;
1839 }
1840 
1841 /*
1842  * A simple test that tries free a region r2 when r2 extends past PHYS_ADDR_MAX:
1843  *
1844  *                               +--------+
1845  *                               |   r2   |
1846  *                               +--------+
1847  *  |                        +---+....+
1848  *  |                        |rgn|    |
1849  *  +------------------------+---+----+
1850  *
1851  * Expect that only the portion between PHYS_ADDR_MAX and r2.base is freed.
1852  * Expect the total size of reserved memory to be updated and the counter to
1853  * not be updated.
1854  */
1855 static int memblock_free_near_max_check(void)
1856 {
1857 	struct memblock_region *rgn;
1858 	phys_addr_t total_size;
1859 
1860 	rgn = &memblock.reserved.regions[0];
1861 
1862 	struct region r1 = {
1863 		.base = PHYS_ADDR_MAX - SZ_2M,
1864 		.size = SZ_2M
1865 	};
1866 
1867 	struct region r2 = {
1868 		.base = PHYS_ADDR_MAX - SZ_1M,
1869 		.size = SZ_2M
1870 	};
1871 
1872 	PREFIX_PUSH();
1873 
1874 	total_size = r1.size - (PHYS_ADDR_MAX - r2.base);
1875 
1876 	reset_memblock_regions();
1877 	memblock_reserve(r1.base, r1.size);
1878 	memblock_free((void *)r2.base, r2.size);
1879 
1880 	ASSERT_EQ(rgn->base, r1.base);
1881 	ASSERT_EQ(rgn->size, total_size);
1882 
1883 	ASSERT_EQ(memblock.reserved.cnt, 1);
1884 	ASSERT_EQ(memblock.reserved.total_size, total_size);
1885 
1886 	test_pass_pop();
1887 
1888 	return 0;
1889 }
1890 
1891 /*
1892  * A test that tries to free a reserved region r3 that overlaps with two
1893  * existing reserved regions r1 and r2:
1894  *
1895  *            +----------------+
1896  *            |       r3       |
1897  *            +----------------+
1898  *  |    +----+.....   ........+--------+
1899  *  |    |    |r1  :   :       |r2      |     |
1900  *  +----+----+----+---+-------+--------+-----+
1901  *
1902  * Expect that only the intersections of r1 with r3 and r2 with r3 are freed
1903  * from the collection of reserved memory. Expect the total size of reserved
1904  * memory to be updated and the counter to not be updated.
1905  */
1906 static int memblock_free_overlap_two_check(void)
1907 {
1908 	struct memblock_region *rgn1, *rgn2;
1909 	phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size;
1910 
1911 	rgn1 = &memblock.reserved.regions[0];
1912 	rgn2 = &memblock.reserved.regions[1];
1913 
1914 	struct region r1 = {
1915 		.base = SZ_16M,
1916 		.size = SZ_32M
1917 	};
1918 	struct region r2 = {
1919 		.base = SZ_64M,
1920 		.size = SZ_64M
1921 	};
1922 	struct region r3 = {
1923 		.base = SZ_32M,
1924 		.size = SZ_64M
1925 	};
1926 
1927 	PREFIX_PUSH();
1928 
1929 	r2_end = r2.base + r2.size;
1930 	r3_end = r3.base + r3.size;
1931 	new_r1_size = r3.base - r1.base;
1932 	new_r2_size = r2_end - r3_end;
1933 	total_size = new_r1_size + new_r2_size;
1934 
1935 	reset_memblock_regions();
1936 	memblock_reserve(r1.base, r1.size);
1937 	memblock_reserve(r2.base, r2.size);
1938 	memblock_free((void *)r3.base, r3.size);
1939 
1940 	ASSERT_EQ(rgn1->base, r1.base);
1941 	ASSERT_EQ(rgn1->size, new_r1_size);
1942 
1943 	ASSERT_EQ(rgn2->base, r3_end);
1944 	ASSERT_EQ(rgn2->size, new_r2_size);
1945 
1946 	ASSERT_EQ(memblock.reserved.cnt, 2);
1947 	ASSERT_EQ(memblock.reserved.total_size, total_size);
1948 
1949 	test_pass_pop();
1950 
1951 	return 0;
1952 }
1953 
1954 static int memblock_free_checks(void)
1955 {
1956 	prefix_reset();
1957 	prefix_push(FUNC_FREE);
1958 	test_print("Running %s tests...\n", FUNC_FREE);
1959 
1960 	memblock_free_simple_check();
1961 	memblock_free_absent_check();
1962 	memblock_free_overlap_top_check();
1963 	memblock_free_overlap_bottom_check();
1964 	memblock_free_within_check();
1965 	memblock_free_only_region_check();
1966 	memblock_free_near_max_check();
1967 	memblock_free_overlap_two_check();
1968 
1969 	prefix_pop();
1970 
1971 	return 0;
1972 }
1973 
1974 static int memblock_set_bottom_up_check(void)
1975 {
1976 	prefix_push("memblock_set_bottom_up");
1977 
1978 	memblock_set_bottom_up(false);
1979 	ASSERT_EQ(memblock.bottom_up, false);
1980 	memblock_set_bottom_up(true);
1981 	ASSERT_EQ(memblock.bottom_up, true);
1982 
1983 	reset_memblock_attributes();
1984 	test_pass_pop();
1985 
1986 	return 0;
1987 }
1988 
1989 static int memblock_bottom_up_check(void)
1990 {
1991 	prefix_push("memblock_bottom_up");
1992 
1993 	memblock_set_bottom_up(false);
1994 	ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up);
1995 	ASSERT_EQ(memblock_bottom_up(), false);
1996 	memblock_set_bottom_up(true);
1997 	ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up);
1998 	ASSERT_EQ(memblock_bottom_up(), true);
1999 
2000 	reset_memblock_attributes();
2001 	test_pass_pop();
2002 
2003 	return 0;
2004 }
2005 
2006 static int memblock_bottom_up_checks(void)
2007 {
2008 	test_print("Running memblock_*bottom_up tests...\n");
2009 
2010 	prefix_reset();
2011 	memblock_set_bottom_up_check();
2012 	prefix_reset();
2013 	memblock_bottom_up_check();
2014 
2015 	return 0;
2016 }
2017 
2018 /*
2019  * A test that tries to trim memory when both ends of the memory region are
2020  * aligned. Expect that the memory will not be trimmed. Expect the counter to
2021  * not be updated.
2022  */
2023 static int memblock_trim_memory_aligned_check(void)
2024 {
2025 	struct memblock_region *rgn;
2026 	const phys_addr_t alignment = SMP_CACHE_BYTES;
2027 
2028 	rgn = &memblock.memory.regions[0];
2029 
2030 	struct region r = {
2031 		.base = alignment,
2032 		.size = alignment * 4
2033 	};
2034 
2035 	PREFIX_PUSH();
2036 
2037 	reset_memblock_regions();
2038 	memblock_add(r.base, r.size);
2039 	memblock_trim_memory(alignment);
2040 
2041 	ASSERT_EQ(rgn->base, r.base);
2042 	ASSERT_EQ(rgn->size, r.size);
2043 
2044 	ASSERT_EQ(memblock.memory.cnt, 1);
2045 
2046 	test_pass_pop();
2047 
2048 	return 0;
2049 }
2050 
2051 /*
2052  * A test that tries to trim memory when there are two available regions, r1 and
2053  * r2. Region r1 is aligned on both ends and region r2 is unaligned on one end
2054  * and smaller than the alignment:
2055  *
2056  *                                     alignment
2057  *                                     |--------|
2058  * |        +-----------------+        +------+   |
2059  * |        |        r1       |        |  r2  |   |
2060  * +--------+-----------------+--------+------+---+
2061  *          ^        ^        ^        ^      ^
2062  *          |________|________|________|      |
2063  *                            |               Unaligned address
2064  *                Aligned addresses
2065  *
2066  * Expect that r1 will not be trimmed and r2 will be removed. Expect the
2067  * counter to be updated.
2068  */
2069 static int memblock_trim_memory_too_small_check(void)
2070 {
2071 	struct memblock_region *rgn;
2072 	const phys_addr_t alignment = SMP_CACHE_BYTES;
2073 
2074 	rgn = &memblock.memory.regions[0];
2075 
2076 	struct region r1 = {
2077 		.base = alignment,
2078 		.size = alignment * 2
2079 	};
2080 	struct region r2 = {
2081 		.base = alignment * 4,
2082 		.size = alignment - SZ_2
2083 	};
2084 
2085 	PREFIX_PUSH();
2086 
2087 	reset_memblock_regions();
2088 	memblock_add(r1.base, r1.size);
2089 	memblock_add(r2.base, r2.size);
2090 	memblock_trim_memory(alignment);
2091 
2092 	ASSERT_EQ(rgn->base, r1.base);
2093 	ASSERT_EQ(rgn->size, r1.size);
2094 
2095 	ASSERT_EQ(memblock.memory.cnt, 1);
2096 
2097 	test_pass_pop();
2098 
2099 	return 0;
2100 }
2101 
2102 /*
2103  * A test that tries to trim memory when there are two available regions, r1 and
2104  * r2. Region r1 is aligned on both ends and region r2 is unaligned at the base
2105  * and aligned at the end:
2106  *
2107  *                               Unaligned address
2108  *                                       |
2109  *                                       v
2110  * |        +-----------------+          +---------------+   |
2111  * |        |        r1       |          |      r2       |   |
2112  * +--------+-----------------+----------+---------------+---+
2113  *          ^        ^        ^        ^        ^        ^
2114  *          |________|________|________|________|________|
2115  *                            |
2116  *                    Aligned addresses
2117  *
2118  * Expect that r1 will not be trimmed and r2 will be trimmed at the base.
2119  * Expect the counter to not be updated.
2120  */
2121 static int memblock_trim_memory_unaligned_base_check(void)
2122 {
2123 	struct memblock_region *rgn1, *rgn2;
2124 	const phys_addr_t alignment = SMP_CACHE_BYTES;
2125 	phys_addr_t offset = SZ_2;
2126 	phys_addr_t new_r2_base, new_r2_size;
2127 
2128 	rgn1 = &memblock.memory.regions[0];
2129 	rgn2 = &memblock.memory.regions[1];
2130 
2131 	struct region r1 = {
2132 		.base = alignment,
2133 		.size = alignment * 2
2134 	};
2135 	struct region r2 = {
2136 		.base = alignment * 4 + offset,
2137 		.size = alignment * 2 - offset
2138 	};
2139 
2140 	PREFIX_PUSH();
2141 
2142 	new_r2_base = r2.base + (alignment - offset);
2143 	new_r2_size = r2.size - (alignment - offset);
2144 
2145 	reset_memblock_regions();
2146 	memblock_add(r1.base, r1.size);
2147 	memblock_add(r2.base, r2.size);
2148 	memblock_trim_memory(alignment);
2149 
2150 	ASSERT_EQ(rgn1->base, r1.base);
2151 	ASSERT_EQ(rgn1->size, r1.size);
2152 
2153 	ASSERT_EQ(rgn2->base, new_r2_base);
2154 	ASSERT_EQ(rgn2->size, new_r2_size);
2155 
2156 	ASSERT_EQ(memblock.memory.cnt, 2);
2157 
2158 	test_pass_pop();
2159 
2160 	return 0;
2161 }
2162 
2163 /*
2164  * A test that tries to trim memory when there are two available regions, r1 and
2165  * r2. Region r1 is aligned on both ends and region r2 is aligned at the base
2166  * and unaligned at the end:
2167  *
2168  *                                             Unaligned address
2169  *                                                     |
2170  *                                                     v
2171  * |        +-----------------+        +---------------+   |
2172  * |        |        r1       |        |      r2       |   |
2173  * +--------+-----------------+--------+---------------+---+
2174  *          ^        ^        ^        ^        ^        ^
2175  *          |________|________|________|________|________|
2176  *                            |
2177  *                    Aligned addresses
2178  *
2179  * Expect that r1 will not be trimmed and r2 will be trimmed at the end.
2180  * Expect the counter to not be updated.
2181  */
2182 static int memblock_trim_memory_unaligned_end_check(void)
2183 {
2184 	struct memblock_region *rgn1, *rgn2;
2185 	const phys_addr_t alignment = SMP_CACHE_BYTES;
2186 	phys_addr_t offset = SZ_2;
2187 	phys_addr_t new_r2_size;
2188 
2189 	rgn1 = &memblock.memory.regions[0];
2190 	rgn2 = &memblock.memory.regions[1];
2191 
2192 	struct region r1 = {
2193 		.base = alignment,
2194 		.size = alignment * 2
2195 	};
2196 	struct region r2 = {
2197 		.base = alignment * 4,
2198 		.size = alignment * 2 - offset
2199 	};
2200 
2201 	PREFIX_PUSH();
2202 
2203 	new_r2_size = r2.size - (alignment - offset);
2204 
2205 	reset_memblock_regions();
2206 	memblock_add(r1.base, r1.size);
2207 	memblock_add(r2.base, r2.size);
2208 	memblock_trim_memory(alignment);
2209 
2210 	ASSERT_EQ(rgn1->base, r1.base);
2211 	ASSERT_EQ(rgn1->size, r1.size);
2212 
2213 	ASSERT_EQ(rgn2->base, r2.base);
2214 	ASSERT_EQ(rgn2->size, new_r2_size);
2215 
2216 	ASSERT_EQ(memblock.memory.cnt, 2);
2217 
2218 	test_pass_pop();
2219 
2220 	return 0;
2221 }
2222 
2223 static int memblock_trim_memory_checks(void)
2224 {
2225 	prefix_reset();
2226 	prefix_push(FUNC_TRIM);
2227 	test_print("Running %s tests...\n", FUNC_TRIM);
2228 
2229 	memblock_trim_memory_aligned_check();
2230 	memblock_trim_memory_too_small_check();
2231 	memblock_trim_memory_unaligned_base_check();
2232 	memblock_trim_memory_unaligned_end_check();
2233 
2234 	prefix_pop();
2235 
2236 	return 0;
2237 }
2238 
2239 int memblock_basic_checks(void)
2240 {
2241 	memblock_initialization_check();
2242 	memblock_add_checks();
2243 	memblock_reserve_checks();
2244 	memblock_remove_checks();
2245 	memblock_free_checks();
2246 	memblock_bottom_up_checks();
2247 	memblock_trim_memory_checks();
2248 
2249 	return 0;
2250 }
2251