xref: /linux/tools/testing/memblock/tests/basic_api.c (revision 3a38ef2b3cb6b63c105247b5ea4a9cf600e673f0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <string.h>
3 #include <linux/memblock.h>
4 #include "basic_api.h"
5 
6 #define EXPECTED_MEMBLOCK_REGIONS			128
7 #define FUNC_ADD					"memblock_add"
8 #define FUNC_RESERVE					"memblock_reserve"
9 #define FUNC_REMOVE					"memblock_remove"
10 #define FUNC_FREE					"memblock_free"
11 #define FUNC_TRIM					"memblock_trim_memory"
12 
13 static int memblock_initialization_check(void)
14 {
15 	PREFIX_PUSH();
16 
17 	ASSERT_NE(memblock.memory.regions, NULL);
18 	ASSERT_EQ(memblock.memory.cnt, 1);
19 	ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS);
20 	ASSERT_EQ(strcmp(memblock.memory.name, "memory"), 0);
21 
22 	ASSERT_NE(memblock.reserved.regions, NULL);
23 	ASSERT_EQ(memblock.reserved.cnt, 1);
24 	ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS);
25 	ASSERT_EQ(strcmp(memblock.reserved.name, "reserved"), 0);
26 
27 	ASSERT_EQ(memblock.bottom_up, false);
28 	ASSERT_EQ(memblock.current_limit, MEMBLOCK_ALLOC_ANYWHERE);
29 
30 	test_pass_pop();
31 
32 	return 0;
33 }
34 
35 /*
36  * A simple test that adds a memory block of a specified base address
37  * and size to the collection of available memory regions (memblock.memory).
38  * Expect to create a new entry. The region counter and total memory get
39  * updated.
40  */
41 static int memblock_add_simple_check(void)
42 {
43 	struct memblock_region *rgn;
44 
45 	rgn = &memblock.memory.regions[0];
46 
47 	struct region r = {
48 		.base = SZ_1G,
49 		.size = SZ_4M
50 	};
51 
52 	PREFIX_PUSH();
53 
54 	reset_memblock_regions();
55 	memblock_add(r.base, r.size);
56 
57 	ASSERT_EQ(rgn->base, r.base);
58 	ASSERT_EQ(rgn->size, r.size);
59 
60 	ASSERT_EQ(memblock.memory.cnt, 1);
61 	ASSERT_EQ(memblock.memory.total_size, r.size);
62 
63 	test_pass_pop();
64 
65 	return 0;
66 }
67 
68 /*
69  * A simple test that adds a memory block of a specified base address, size,
70  * NUMA node and memory flags to the collection of available memory regions.
71  * Expect to create a new entry. The region counter and total memory get
72  * updated.
73  */
74 static int memblock_add_node_simple_check(void)
75 {
76 	struct memblock_region *rgn;
77 
78 	rgn = &memblock.memory.regions[0];
79 
80 	struct region r = {
81 		.base = SZ_1M,
82 		.size = SZ_16M
83 	};
84 
85 	PREFIX_PUSH();
86 
87 	reset_memblock_regions();
88 	memblock_add_node(r.base, r.size, 1, MEMBLOCK_HOTPLUG);
89 
90 	ASSERT_EQ(rgn->base, r.base);
91 	ASSERT_EQ(rgn->size, r.size);
92 #ifdef CONFIG_NUMA
93 	ASSERT_EQ(rgn->nid, 1);
94 #endif
95 	ASSERT_EQ(rgn->flags, MEMBLOCK_HOTPLUG);
96 
97 	ASSERT_EQ(memblock.memory.cnt, 1);
98 	ASSERT_EQ(memblock.memory.total_size, r.size);
99 
100 	test_pass_pop();
101 
102 	return 0;
103 }
104 
105 /*
106  * A test that tries to add two memory blocks that don't overlap with one
107  * another:
108  *
109  *  |        +--------+        +--------+  |
110  *  |        |   r1   |        |   r2   |  |
111  *  +--------+--------+--------+--------+--+
112  *
113  * Expect to add two correctly initialized entries to the collection of
114  * available memory regions (memblock.memory). The total size and
115  * region counter fields get updated.
116  */
117 static int memblock_add_disjoint_check(void)
118 {
119 	struct memblock_region *rgn1, *rgn2;
120 
121 	rgn1 = &memblock.memory.regions[0];
122 	rgn2 = &memblock.memory.regions[1];
123 
124 	struct region r1 = {
125 		.base = SZ_1G,
126 		.size = SZ_8K
127 	};
128 	struct region r2 = {
129 		.base = SZ_1G + SZ_16K,
130 		.size = SZ_8K
131 	};
132 
133 	PREFIX_PUSH();
134 
135 	reset_memblock_regions();
136 	memblock_add(r1.base, r1.size);
137 	memblock_add(r2.base, r2.size);
138 
139 	ASSERT_EQ(rgn1->base, r1.base);
140 	ASSERT_EQ(rgn1->size, r1.size);
141 
142 	ASSERT_EQ(rgn2->base, r2.base);
143 	ASSERT_EQ(rgn2->size, r2.size);
144 
145 	ASSERT_EQ(memblock.memory.cnt, 2);
146 	ASSERT_EQ(memblock.memory.total_size, r1.size + r2.size);
147 
148 	test_pass_pop();
149 
150 	return 0;
151 }
152 
153 /*
154  * A test that tries to add two memory blocks r1 and r2, where r2 overlaps
155  * with the beginning of r1 (that is r1.base < r2.base + r2.size):
156  *
157  *  |    +----+----+------------+          |
158  *  |    |    |r2  |   r1       |          |
159  *  +----+----+----+------------+----------+
160  *       ^    ^
161  *       |    |
162  *       |    r1.base
163  *       |
164  *       r2.base
165  *
166  * Expect to merge the two entries into one region that starts at r2.base
167  * and has size of two regions minus their intersection. The total size of
168  * the available memory is updated, and the region counter stays the same.
169  */
170 static int memblock_add_overlap_top_check(void)
171 {
172 	struct memblock_region *rgn;
173 	phys_addr_t total_size;
174 
175 	rgn = &memblock.memory.regions[0];
176 
177 	struct region r1 = {
178 		.base = SZ_512M,
179 		.size = SZ_1G
180 	};
181 	struct region r2 = {
182 		.base = SZ_256M,
183 		.size = SZ_512M
184 	};
185 
186 	PREFIX_PUSH();
187 
188 	total_size = (r1.base - r2.base) + r1.size;
189 
190 	reset_memblock_regions();
191 	memblock_add(r1.base, r1.size);
192 	memblock_add(r2.base, r2.size);
193 
194 	ASSERT_EQ(rgn->base, r2.base);
195 	ASSERT_EQ(rgn->size, total_size);
196 
197 	ASSERT_EQ(memblock.memory.cnt, 1);
198 	ASSERT_EQ(memblock.memory.total_size, total_size);
199 
200 	test_pass_pop();
201 
202 	return 0;
203 }
204 
205 /*
206  * A test that tries to add two memory blocks r1 and r2, where r2 overlaps
207  * with the end of r1 (that is r2.base < r1.base + r1.size):
208  *
209  *  |  +--+------+----------+              |
210  *  |  |  | r1   | r2       |              |
211  *  +--+--+------+----------+--------------+
212  *     ^  ^
213  *     |  |
214  *     |  r2.base
215  *     |
216  *     r1.base
217  *
218  * Expect to merge the two entries into one region that starts at r1.base
219  * and has size of two regions minus their intersection. The total size of
220  * the available memory is updated, and the region counter stays the same.
221  */
222 static int memblock_add_overlap_bottom_check(void)
223 {
224 	struct memblock_region *rgn;
225 	phys_addr_t total_size;
226 
227 	rgn = &memblock.memory.regions[0];
228 
229 	struct region r1 = {
230 		.base = SZ_128M,
231 		.size = SZ_512M
232 	};
233 	struct region r2 = {
234 		.base = SZ_256M,
235 		.size = SZ_1G
236 	};
237 
238 	PREFIX_PUSH();
239 
240 	total_size = (r2.base - r1.base) + r2.size;
241 
242 	reset_memblock_regions();
243 	memblock_add(r1.base, r1.size);
244 	memblock_add(r2.base, r2.size);
245 
246 	ASSERT_EQ(rgn->base, r1.base);
247 	ASSERT_EQ(rgn->size, total_size);
248 
249 	ASSERT_EQ(memblock.memory.cnt, 1);
250 	ASSERT_EQ(memblock.memory.total_size, total_size);
251 
252 	test_pass_pop();
253 
254 	return 0;
255 }
256 
257 /*
258  * A test that tries to add two memory blocks r1 and r2, where r2 is
259  * within the range of r1 (that is r1.base < r2.base &&
260  * r2.base + r2.size < r1.base + r1.size):
261  *
262  *  |   +-------+--+-----------------------+
263  *  |   |       |r2|      r1               |
264  *  +---+-------+--+-----------------------+
265  *      ^
266  *      |
267  *      r1.base
268  *
269  * Expect to merge two entries into one region that stays the same.
270  * The counter and total size of available memory are not updated.
271  */
272 static int memblock_add_within_check(void)
273 {
274 	struct memblock_region *rgn;
275 
276 	rgn = &memblock.memory.regions[0];
277 
278 	struct region r1 = {
279 		.base = SZ_8M,
280 		.size = SZ_32M
281 	};
282 	struct region r2 = {
283 		.base = SZ_16M,
284 		.size = SZ_1M
285 	};
286 
287 	PREFIX_PUSH();
288 
289 	reset_memblock_regions();
290 	memblock_add(r1.base, r1.size);
291 	memblock_add(r2.base, r2.size);
292 
293 	ASSERT_EQ(rgn->base, r1.base);
294 	ASSERT_EQ(rgn->size, r1.size);
295 
296 	ASSERT_EQ(memblock.memory.cnt, 1);
297 	ASSERT_EQ(memblock.memory.total_size, r1.size);
298 
299 	test_pass_pop();
300 
301 	return 0;
302 }
303 
304 /*
305  * A simple test that tries to add the same memory block twice. Expect
306  * the counter and total size of available memory to not be updated.
307  */
308 static int memblock_add_twice_check(void)
309 {
310 	struct region r = {
311 		.base = SZ_16K,
312 		.size = SZ_2M
313 	};
314 
315 	PREFIX_PUSH();
316 
317 	reset_memblock_regions();
318 
319 	memblock_add(r.base, r.size);
320 	memblock_add(r.base, r.size);
321 
322 	ASSERT_EQ(memblock.memory.cnt, 1);
323 	ASSERT_EQ(memblock.memory.total_size, r.size);
324 
325 	test_pass_pop();
326 
327 	return 0;
328 }
329 
330 /*
331  * A test that tries to add two memory blocks that don't overlap with one
332  * another and then add a third memory block in the space between the first two:
333  *
334  *  |        +--------+--------+--------+  |
335  *  |        |   r1   |   r3   |   r2   |  |
336  *  +--------+--------+--------+--------+--+
337  *
338  * Expect to merge the three entries into one region that starts at r1.base
339  * and has size of r1.size + r2.size + r3.size. The region counter and total
340  * size of the available memory are updated.
341  */
342 static int memblock_add_between_check(void)
343 {
344 	struct memblock_region *rgn;
345 	phys_addr_t total_size;
346 
347 	rgn = &memblock.memory.regions[0];
348 
349 	struct region r1 = {
350 		.base = SZ_1G,
351 		.size = SZ_8K
352 	};
353 	struct region r2 = {
354 		.base = SZ_1G + SZ_16K,
355 		.size = SZ_8K
356 	};
357 	struct region r3 = {
358 		.base = SZ_1G + SZ_8K,
359 		.size = SZ_8K
360 	};
361 
362 	PREFIX_PUSH();
363 
364 	total_size = r1.size + r2.size + r3.size;
365 
366 	reset_memblock_regions();
367 	memblock_add(r1.base, r1.size);
368 	memblock_add(r2.base, r2.size);
369 	memblock_add(r3.base, r3.size);
370 
371 	ASSERT_EQ(rgn->base, r1.base);
372 	ASSERT_EQ(rgn->size, total_size);
373 
374 	ASSERT_EQ(memblock.memory.cnt, 1);
375 	ASSERT_EQ(memblock.memory.total_size, total_size);
376 
377 	test_pass_pop();
378 
379 	return 0;
380 }
381 
382 /*
383  * A simple test that tries to add a memory block r when r extends past
384  * PHYS_ADDR_MAX:
385  *
386  *                               +--------+
387  *                               |    r   |
388  *                               +--------+
389  *  |                            +----+
390  *  |                            | rgn|
391  *  +----------------------------+----+
392  *
393  * Expect to add a memory block of size PHYS_ADDR_MAX - r.base. Expect the
394  * total size of available memory and the counter to be updated.
395  */
396 static int memblock_add_near_max_check(void)
397 {
398 	struct memblock_region *rgn;
399 	phys_addr_t total_size;
400 
401 	rgn = &memblock.memory.regions[0];
402 
403 	struct region r = {
404 		.base = PHYS_ADDR_MAX - SZ_1M,
405 		.size = SZ_2M
406 	};
407 
408 	PREFIX_PUSH();
409 
410 	total_size = PHYS_ADDR_MAX - r.base;
411 
412 	reset_memblock_regions();
413 	memblock_add(r.base, r.size);
414 
415 	ASSERT_EQ(rgn->base, r.base);
416 	ASSERT_EQ(rgn->size, total_size);
417 
418 	ASSERT_EQ(memblock.memory.cnt, 1);
419 	ASSERT_EQ(memblock.memory.total_size, total_size);
420 
421 	test_pass_pop();
422 
423 	return 0;
424 }
425 
426 static int memblock_add_checks(void)
427 {
428 	prefix_reset();
429 	prefix_push(FUNC_ADD);
430 	test_print("Running %s tests...\n", FUNC_ADD);
431 
432 	memblock_add_simple_check();
433 	memblock_add_node_simple_check();
434 	memblock_add_disjoint_check();
435 	memblock_add_overlap_top_check();
436 	memblock_add_overlap_bottom_check();
437 	memblock_add_within_check();
438 	memblock_add_twice_check();
439 	memblock_add_between_check();
440 	memblock_add_near_max_check();
441 
442 	prefix_pop();
443 
444 	return 0;
445 }
446 
447 /*
448  * A simple test that marks a memory block of a specified base address
449  * and size as reserved and to the collection of reserved memory regions
450  * (memblock.reserved). Expect to create a new entry. The region counter
451  * and total memory size are updated.
452  */
453 static int memblock_reserve_simple_check(void)
454 {
455 	struct memblock_region *rgn;
456 
457 	rgn =  &memblock.reserved.regions[0];
458 
459 	struct region r = {
460 		.base = SZ_2G,
461 		.size = SZ_128M
462 	};
463 
464 	PREFIX_PUSH();
465 
466 	reset_memblock_regions();
467 	memblock_reserve(r.base, r.size);
468 
469 	ASSERT_EQ(rgn->base, r.base);
470 	ASSERT_EQ(rgn->size, r.size);
471 
472 	test_pass_pop();
473 
474 	return 0;
475 }
476 
477 /*
478  * A test that tries to mark two memory blocks that don't overlap as reserved:
479  *
480  *  |        +--+      +----------------+  |
481  *  |        |r1|      |       r2       |  |
482  *  +--------+--+------+----------------+--+
483  *
484  * Expect to add two entries to the collection of reserved memory regions
485  * (memblock.reserved). The total size and region counter for
486  * memblock.reserved are updated.
487  */
488 static int memblock_reserve_disjoint_check(void)
489 {
490 	struct memblock_region *rgn1, *rgn2;
491 
492 	rgn1 = &memblock.reserved.regions[0];
493 	rgn2 = &memblock.reserved.regions[1];
494 
495 	struct region r1 = {
496 		.base = SZ_256M,
497 		.size = SZ_16M
498 	};
499 	struct region r2 = {
500 		.base = SZ_512M,
501 		.size = SZ_512M
502 	};
503 
504 	PREFIX_PUSH();
505 
506 	reset_memblock_regions();
507 	memblock_reserve(r1.base, r1.size);
508 	memblock_reserve(r2.base, r2.size);
509 
510 	ASSERT_EQ(rgn1->base, r1.base);
511 	ASSERT_EQ(rgn1->size, r1.size);
512 
513 	ASSERT_EQ(rgn2->base, r2.base);
514 	ASSERT_EQ(rgn2->size, r2.size);
515 
516 	ASSERT_EQ(memblock.reserved.cnt, 2);
517 	ASSERT_EQ(memblock.reserved.total_size, r1.size + r2.size);
518 
519 	test_pass_pop();
520 
521 	return 0;
522 }
523 
524 /*
525  * A test that tries to mark two memory blocks r1 and r2 as reserved,
526  * where r2 overlaps with the beginning of r1 (that is
527  * r1.base < r2.base + r2.size):
528  *
529  *  |  +--------------+--+--------------+  |
530  *  |  |       r2     |  |     r1       |  |
531  *  +--+--------------+--+--------------+--+
532  *     ^              ^
533  *     |              |
534  *     |              r1.base
535  *     |
536  *     r2.base
537  *
538  * Expect to merge two entries into one region that starts at r2.base and
539  * has size of two regions minus their intersection. The total size of the
540  * reserved memory is updated, and the region counter is not updated.
541  */
542 static int memblock_reserve_overlap_top_check(void)
543 {
544 	struct memblock_region *rgn;
545 	phys_addr_t total_size;
546 
547 	rgn = &memblock.reserved.regions[0];
548 
549 	struct region r1 = {
550 		.base = SZ_1G,
551 		.size = SZ_1G
552 	};
553 	struct region r2 = {
554 		.base = SZ_128M,
555 		.size = SZ_1G
556 	};
557 
558 	PREFIX_PUSH();
559 
560 	total_size = (r1.base - r2.base) + r1.size;
561 
562 	reset_memblock_regions();
563 	memblock_reserve(r1.base, r1.size);
564 	memblock_reserve(r2.base, r2.size);
565 
566 	ASSERT_EQ(rgn->base, r2.base);
567 	ASSERT_EQ(rgn->size, total_size);
568 
569 	ASSERT_EQ(memblock.reserved.cnt, 1);
570 	ASSERT_EQ(memblock.reserved.total_size, total_size);
571 
572 	test_pass_pop();
573 
574 	return 0;
575 }
576 
577 /*
578  * A test that tries to mark two memory blocks r1 and r2 as reserved,
579  * where r2 overlaps with the end of r1 (that is
580  * r2.base < r1.base + r1.size):
581  *
582  *  |  +--------------+--+--------------+  |
583  *  |  |       r1     |  |     r2       |  |
584  *  +--+--------------+--+--------------+--+
585  *     ^              ^
586  *     |              |
587  *     |              r2.base
588  *     |
589  *     r1.base
590  *
591  * Expect to merge two entries into one region that starts at r1.base and
592  * has size of two regions minus their intersection. The total size of the
593  * reserved memory is updated, and the region counter is not updated.
594  */
595 static int memblock_reserve_overlap_bottom_check(void)
596 {
597 	struct memblock_region *rgn;
598 	phys_addr_t total_size;
599 
600 	rgn = &memblock.reserved.regions[0];
601 
602 	struct region r1 = {
603 		.base = SZ_2K,
604 		.size = SZ_128K
605 	};
606 	struct region r2 = {
607 		.base = SZ_128K,
608 		.size = SZ_128K
609 	};
610 
611 	PREFIX_PUSH();
612 
613 	total_size = (r2.base - r1.base) + r2.size;
614 
615 	reset_memblock_regions();
616 	memblock_reserve(r1.base, r1.size);
617 	memblock_reserve(r2.base, r2.size);
618 
619 	ASSERT_EQ(rgn->base, r1.base);
620 	ASSERT_EQ(rgn->size, total_size);
621 
622 	ASSERT_EQ(memblock.reserved.cnt, 1);
623 	ASSERT_EQ(memblock.reserved.total_size, total_size);
624 
625 	test_pass_pop();
626 
627 	return 0;
628 }
629 
630 /*
631  * A test that tries to mark two memory blocks r1 and r2 as reserved,
632  * where r2 is within the range of r1 (that is
633  * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
634  *
635  *  | +-----+--+---------------------------|
636  *  | |     |r2|          r1               |
637  *  +-+-----+--+---------------------------+
638  *    ^     ^
639  *    |     |
640  *    |     r2.base
641  *    |
642  *    r1.base
643  *
644  * Expect to merge two entries into one region that stays the same. The
645  * counter and total size of available memory are not updated.
646  */
647 static int memblock_reserve_within_check(void)
648 {
649 	struct memblock_region *rgn;
650 
651 	rgn = &memblock.reserved.regions[0];
652 
653 	struct region r1 = {
654 		.base = SZ_1M,
655 		.size = SZ_8M
656 	};
657 	struct region r2 = {
658 		.base = SZ_2M,
659 		.size = SZ_64K
660 	};
661 
662 	PREFIX_PUSH();
663 
664 	reset_memblock_regions();
665 	memblock_reserve(r1.base, r1.size);
666 	memblock_reserve(r2.base, r2.size);
667 
668 	ASSERT_EQ(rgn->base, r1.base);
669 	ASSERT_EQ(rgn->size, r1.size);
670 
671 	ASSERT_EQ(memblock.reserved.cnt, 1);
672 	ASSERT_EQ(memblock.reserved.total_size, r1.size);
673 
674 	test_pass_pop();
675 
676 	return 0;
677 }
678 
679 /*
680  * A simple test that tries to reserve the same memory block twice.
681  * Expect the region counter and total size of reserved memory to not
682  * be updated.
683  */
684 static int memblock_reserve_twice_check(void)
685 {
686 	struct region r = {
687 		.base = SZ_16K,
688 		.size = SZ_2M
689 	};
690 
691 	PREFIX_PUSH();
692 
693 	reset_memblock_regions();
694 
695 	memblock_reserve(r.base, r.size);
696 	memblock_reserve(r.base, r.size);
697 
698 	ASSERT_EQ(memblock.reserved.cnt, 1);
699 	ASSERT_EQ(memblock.reserved.total_size, r.size);
700 
701 	test_pass_pop();
702 
703 	return 0;
704 }
705 
706 /*
707  * A test that tries to mark two memory blocks that don't overlap as reserved
708  * and then reserve a third memory block in the space between the first two:
709  *
710  *  |        +--------+--------+--------+  |
711  *  |        |   r1   |   r3   |   r2   |  |
712  *  +--------+--------+--------+--------+--+
713  *
714  * Expect to merge the three entries into one reserved region that starts at
715  * r1.base and has size of r1.size + r2.size + r3.size. The region counter and
716  * total for memblock.reserved are updated.
717  */
718 static int memblock_reserve_between_check(void)
719 {
720 	struct memblock_region *rgn;
721 	phys_addr_t total_size;
722 
723 	rgn = &memblock.reserved.regions[0];
724 
725 	struct region r1 = {
726 		.base = SZ_1G,
727 		.size = SZ_8K
728 	};
729 	struct region r2 = {
730 		.base = SZ_1G + SZ_16K,
731 		.size = SZ_8K
732 	};
733 	struct region r3 = {
734 		.base = SZ_1G + SZ_8K,
735 		.size = SZ_8K
736 	};
737 
738 	PREFIX_PUSH();
739 
740 	total_size = r1.size + r2.size + r3.size;
741 
742 	reset_memblock_regions();
743 	memblock_reserve(r1.base, r1.size);
744 	memblock_reserve(r2.base, r2.size);
745 	memblock_reserve(r3.base, r3.size);
746 
747 	ASSERT_EQ(rgn->base, r1.base);
748 	ASSERT_EQ(rgn->size, total_size);
749 
750 	ASSERT_EQ(memblock.reserved.cnt, 1);
751 	ASSERT_EQ(memblock.reserved.total_size, total_size);
752 
753 	test_pass_pop();
754 
755 	return 0;
756 }
757 
758 /*
759  * A simple test that tries to reserve a memory block r when r extends past
760  * PHYS_ADDR_MAX:
761  *
762  *                               +--------+
763  *                               |    r   |
764  *                               +--------+
765  *  |                            +----+
766  *  |                            | rgn|
767  *  +----------------------------+----+
768  *
769  * Expect to reserve a memory block of size PHYS_ADDR_MAX - r.base. Expect the
770  * total size of reserved memory and the counter to be updated.
771  */
772 static int memblock_reserve_near_max_check(void)
773 {
774 	struct memblock_region *rgn;
775 	phys_addr_t total_size;
776 
777 	rgn = &memblock.reserved.regions[0];
778 
779 	struct region r = {
780 		.base = PHYS_ADDR_MAX - SZ_1M,
781 		.size = SZ_2M
782 	};
783 
784 	PREFIX_PUSH();
785 
786 	total_size = PHYS_ADDR_MAX - r.base;
787 
788 	reset_memblock_regions();
789 	memblock_reserve(r.base, r.size);
790 
791 	ASSERT_EQ(rgn->base, r.base);
792 	ASSERT_EQ(rgn->size, total_size);
793 
794 	ASSERT_EQ(memblock.reserved.cnt, 1);
795 	ASSERT_EQ(memblock.reserved.total_size, total_size);
796 
797 	test_pass_pop();
798 
799 	return 0;
800 }
801 
802 static int memblock_reserve_checks(void)
803 {
804 	prefix_reset();
805 	prefix_push(FUNC_RESERVE);
806 	test_print("Running %s tests...\n", FUNC_RESERVE);
807 
808 	memblock_reserve_simple_check();
809 	memblock_reserve_disjoint_check();
810 	memblock_reserve_overlap_top_check();
811 	memblock_reserve_overlap_bottom_check();
812 	memblock_reserve_within_check();
813 	memblock_reserve_twice_check();
814 	memblock_reserve_between_check();
815 	memblock_reserve_near_max_check();
816 
817 	prefix_pop();
818 
819 	return 0;
820 }
821 
822 /*
823  * A simple test that tries to remove a region r1 from the array of
824  * available memory regions. By "removing" a region we mean overwriting it
825  * with the next region r2 in memblock.memory:
826  *
827  *  |  ......          +----------------+  |
828  *  |  : r1 :          |       r2       |  |
829  *  +--+----+----------+----------------+--+
830  *                     ^
831  *                     |
832  *                     rgn.base
833  *
834  * Expect to add two memory blocks r1 and r2 and then remove r1 so that
835  * r2 is the first available region. The region counter and total size
836  * are updated.
837  */
838 static int memblock_remove_simple_check(void)
839 {
840 	struct memblock_region *rgn;
841 
842 	rgn = &memblock.memory.regions[0];
843 
844 	struct region r1 = {
845 		.base = SZ_2K,
846 		.size = SZ_4K
847 	};
848 	struct region r2 = {
849 		.base = SZ_128K,
850 		.size = SZ_4M
851 	};
852 
853 	PREFIX_PUSH();
854 
855 	reset_memblock_regions();
856 	memblock_add(r1.base, r1.size);
857 	memblock_add(r2.base, r2.size);
858 	memblock_remove(r1.base, r1.size);
859 
860 	ASSERT_EQ(rgn->base, r2.base);
861 	ASSERT_EQ(rgn->size, r2.size);
862 
863 	ASSERT_EQ(memblock.memory.cnt, 1);
864 	ASSERT_EQ(memblock.memory.total_size, r2.size);
865 
866 	test_pass_pop();
867 
868 	return 0;
869 }
870 
871 /*
872  * A test that tries to remove a region r2 that was not registered as
873  * available memory (i.e. has no corresponding entry in memblock.memory):
874  *
875  *                     +----------------+
876  *                     |       r2       |
877  *                     +----------------+
878  *  |  +----+                              |
879  *  |  | r1 |                              |
880  *  +--+----+------------------------------+
881  *     ^
882  *     |
883  *     rgn.base
884  *
885  * Expect the array, regions counter and total size to not be modified.
886  */
887 static int memblock_remove_absent_check(void)
888 {
889 	struct memblock_region *rgn;
890 
891 	rgn = &memblock.memory.regions[0];
892 
893 	struct region r1 = {
894 		.base = SZ_512K,
895 		.size = SZ_4M
896 	};
897 	struct region r2 = {
898 		.base = SZ_64M,
899 		.size = SZ_1G
900 	};
901 
902 	PREFIX_PUSH();
903 
904 	reset_memblock_regions();
905 	memblock_add(r1.base, r1.size);
906 	memblock_remove(r2.base, r2.size);
907 
908 	ASSERT_EQ(rgn->base, r1.base);
909 	ASSERT_EQ(rgn->size, r1.size);
910 
911 	ASSERT_EQ(memblock.memory.cnt, 1);
912 	ASSERT_EQ(memblock.memory.total_size, r1.size);
913 
914 	test_pass_pop();
915 
916 	return 0;
917 }
918 
919 /*
920  * A test that tries to remove a region r2 that overlaps with the
921  * beginning of the already existing entry r1
922  * (that is r1.base < r2.base + r2.size):
923  *
924  *           +-----------------+
925  *           |       r2        |
926  *           +-----------------+
927  *  |                 .........+--------+  |
928  *  |                 :     r1 |  rgn   |  |
929  *  +-----------------+--------+--------+--+
930  *                    ^        ^
931  *                    |        |
932  *                    |        rgn.base
933  *                    r1.base
934  *
935  * Expect that only the intersection of both regions is removed from the
936  * available memory pool. The regions counter and total size are updated.
937  */
938 static int memblock_remove_overlap_top_check(void)
939 {
940 	struct memblock_region *rgn;
941 	phys_addr_t r1_end, r2_end, total_size;
942 
943 	rgn = &memblock.memory.regions[0];
944 
945 	struct region r1 = {
946 		.base = SZ_32M,
947 		.size = SZ_32M
948 	};
949 	struct region r2 = {
950 		.base = SZ_16M,
951 		.size = SZ_32M
952 	};
953 
954 	PREFIX_PUSH();
955 
956 	r1_end = r1.base + r1.size;
957 	r2_end = r2.base + r2.size;
958 	total_size = r1_end - r2_end;
959 
960 	reset_memblock_regions();
961 	memblock_add(r1.base, r1.size);
962 	memblock_remove(r2.base, r2.size);
963 
964 	ASSERT_EQ(rgn->base, r1.base + r2.base);
965 	ASSERT_EQ(rgn->size, total_size);
966 
967 	ASSERT_EQ(memblock.memory.cnt, 1);
968 	ASSERT_EQ(memblock.memory.total_size, total_size);
969 
970 	test_pass_pop();
971 
972 	return 0;
973 }
974 
975 /*
976  * A test that tries to remove a region r2 that overlaps with the end of
977  * the already existing region r1 (that is r2.base < r1.base + r1.size):
978  *
979  *        +--------------------------------+
980  *        |               r2               |
981  *        +--------------------------------+
982  *  | +---+.....                           |
983  *  | |rgn| r1 :                           |
984  *  +-+---+----+---------------------------+
985  *    ^
986  *    |
987  *    r1.base
988  *
989  * Expect that only the intersection of both regions is removed from the
990  * available memory pool. The regions counter and total size are updated.
991  */
992 static int memblock_remove_overlap_bottom_check(void)
993 {
994 	struct memblock_region *rgn;
995 	phys_addr_t total_size;
996 
997 	rgn = &memblock.memory.regions[0];
998 
999 	struct region r1 = {
1000 		.base = SZ_2M,
1001 		.size = SZ_64M
1002 	};
1003 	struct region r2 = {
1004 		.base = SZ_32M,
1005 		.size = SZ_256M
1006 	};
1007 
1008 	PREFIX_PUSH();
1009 
1010 	total_size = r2.base - r1.base;
1011 
1012 	reset_memblock_regions();
1013 	memblock_add(r1.base, r1.size);
1014 	memblock_remove(r2.base, r2.size);
1015 
1016 	ASSERT_EQ(rgn->base, r1.base);
1017 	ASSERT_EQ(rgn->size, total_size);
1018 
1019 	ASSERT_EQ(memblock.memory.cnt, 1);
1020 	ASSERT_EQ(memblock.memory.total_size, total_size);
1021 
1022 	test_pass_pop();
1023 
1024 	return 0;
1025 }
1026 
1027 /*
1028  * A test that tries to remove a region r2 that is within the range of
1029  * the already existing entry r1 (that is
1030  * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
1031  *
1032  *                  +----+
1033  *                  | r2 |
1034  *                  +----+
1035  *  | +-------------+....+---------------+ |
1036  *  | |     rgn1    | r1 |     rgn2      | |
1037  *  +-+-------------+----+---------------+-+
1038  *    ^
1039  *    |
1040  *    r1.base
1041  *
1042  * Expect that the region is split into two - one that ends at r2.base and
1043  * another that starts at r2.base + r2.size, with appropriate sizes. The
1044  * region counter and total size are updated.
1045  */
1046 static int memblock_remove_within_check(void)
1047 {
1048 	struct memblock_region *rgn1, *rgn2;
1049 	phys_addr_t r1_size, r2_size, total_size;
1050 
1051 	rgn1 = &memblock.memory.regions[0];
1052 	rgn2 = &memblock.memory.regions[1];
1053 
1054 	struct region r1 = {
1055 		.base = SZ_1M,
1056 		.size = SZ_32M
1057 	};
1058 	struct region r2 = {
1059 		.base = SZ_16M,
1060 		.size = SZ_1M
1061 	};
1062 
1063 	PREFIX_PUSH();
1064 
1065 	r1_size = r2.base - r1.base;
1066 	r2_size = (r1.base + r1.size) - (r2.base + r2.size);
1067 	total_size = r1_size + r2_size;
1068 
1069 	reset_memblock_regions();
1070 	memblock_add(r1.base, r1.size);
1071 	memblock_remove(r2.base, r2.size);
1072 
1073 	ASSERT_EQ(rgn1->base, r1.base);
1074 	ASSERT_EQ(rgn1->size, r1_size);
1075 
1076 	ASSERT_EQ(rgn2->base, r2.base + r2.size);
1077 	ASSERT_EQ(rgn2->size, r2_size);
1078 
1079 	ASSERT_EQ(memblock.memory.cnt, 2);
1080 	ASSERT_EQ(memblock.memory.total_size, total_size);
1081 
1082 	test_pass_pop();
1083 
1084 	return 0;
1085 }
1086 
1087 /*
1088  * A simple test that tries to remove a region r1 from the array of
1089  * available memory regions when r1 is the only available region.
1090  * Expect to add a memory block r1 and then remove r1 so that a dummy
1091  * region is added. The region counter stays the same, and the total size
1092  * is updated.
1093  */
1094 static int memblock_remove_only_region_check(void)
1095 {
1096 	struct memblock_region *rgn;
1097 
1098 	rgn = &memblock.memory.regions[0];
1099 
1100 	struct region r1 = {
1101 		.base = SZ_2K,
1102 		.size = SZ_4K
1103 	};
1104 
1105 	PREFIX_PUSH();
1106 
1107 	reset_memblock_regions();
1108 	memblock_add(r1.base, r1.size);
1109 	memblock_remove(r1.base, r1.size);
1110 
1111 	ASSERT_EQ(rgn->base, 0);
1112 	ASSERT_EQ(rgn->size, 0);
1113 
1114 	ASSERT_EQ(memblock.memory.cnt, 1);
1115 	ASSERT_EQ(memblock.memory.total_size, 0);
1116 
1117 	test_pass_pop();
1118 
1119 	return 0;
1120 }
1121 
1122 /*
1123  * A simple test that tries remove a region r2 from the array of available
1124  * memory regions when r2 extends past PHYS_ADDR_MAX:
1125  *
1126  *                               +--------+
1127  *                               |   r2   |
1128  *                               +--------+
1129  *  |                        +---+....+
1130  *  |                        |rgn|    |
1131  *  +------------------------+---+----+
1132  *
1133  * Expect that only the portion between PHYS_ADDR_MAX and r2.base is removed.
1134  * Expect the total size of available memory to be updated and the counter to
1135  * not be updated.
1136  */
1137 static int memblock_remove_near_max_check(void)
1138 {
1139 	struct memblock_region *rgn;
1140 	phys_addr_t total_size;
1141 
1142 	rgn = &memblock.memory.regions[0];
1143 
1144 	struct region r1 = {
1145 		.base = PHYS_ADDR_MAX - SZ_2M,
1146 		.size = SZ_2M
1147 	};
1148 
1149 	struct region r2 = {
1150 		.base = PHYS_ADDR_MAX - SZ_1M,
1151 		.size = SZ_2M
1152 	};
1153 
1154 	PREFIX_PUSH();
1155 
1156 	total_size = r1.size - (PHYS_ADDR_MAX - r2.base);
1157 
1158 	reset_memblock_regions();
1159 	memblock_add(r1.base, r1.size);
1160 	memblock_remove(r2.base, r2.size);
1161 
1162 	ASSERT_EQ(rgn->base, r1.base);
1163 	ASSERT_EQ(rgn->size, total_size);
1164 
1165 	ASSERT_EQ(memblock.memory.cnt, 1);
1166 	ASSERT_EQ(memblock.memory.total_size, total_size);
1167 
1168 	test_pass_pop();
1169 
1170 	return 0;
1171 }
1172 
1173 /*
1174  * A test that tries to remove a region r3 that overlaps with two existing
1175  * regions r1 and r2:
1176  *
1177  *            +----------------+
1178  *            |       r3       |
1179  *            +----------------+
1180  *  |    +----+.....   ........+--------+
1181  *  |    |    |r1  :   :       |r2      |     |
1182  *  +----+----+----+---+-------+--------+-----+
1183  *
1184  * Expect that only the intersections of r1 with r3 and r2 with r3 are removed
1185  * from the available memory pool. Expect the total size of available memory to
1186  * be updated and the counter to not be updated.
1187  */
1188 static int memblock_remove_overlap_two_check(void)
1189 {
1190 	struct memblock_region *rgn1, *rgn2;
1191 	phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size;
1192 
1193 	rgn1 = &memblock.memory.regions[0];
1194 	rgn2 = &memblock.memory.regions[1];
1195 
1196 	struct region r1 = {
1197 		.base = SZ_16M,
1198 		.size = SZ_32M
1199 	};
1200 	struct region r2 = {
1201 		.base = SZ_64M,
1202 		.size = SZ_64M
1203 	};
1204 	struct region r3 = {
1205 		.base = SZ_32M,
1206 		.size = SZ_64M
1207 	};
1208 
1209 	PREFIX_PUSH();
1210 
1211 	r2_end = r2.base + r2.size;
1212 	r3_end = r3.base + r3.size;
1213 	new_r1_size = r3.base - r1.base;
1214 	new_r2_size = r2_end - r3_end;
1215 	total_size = new_r1_size + new_r2_size;
1216 
1217 	reset_memblock_regions();
1218 	memblock_add(r1.base, r1.size);
1219 	memblock_add(r2.base, r2.size);
1220 	memblock_remove(r3.base, r3.size);
1221 
1222 	ASSERT_EQ(rgn1->base, r1.base);
1223 	ASSERT_EQ(rgn1->size, new_r1_size);
1224 
1225 	ASSERT_EQ(rgn2->base, r3_end);
1226 	ASSERT_EQ(rgn2->size, new_r2_size);
1227 
1228 	ASSERT_EQ(memblock.memory.cnt, 2);
1229 	ASSERT_EQ(memblock.memory.total_size, total_size);
1230 
1231 	test_pass_pop();
1232 
1233 	return 0;
1234 }
1235 
1236 static int memblock_remove_checks(void)
1237 {
1238 	prefix_reset();
1239 	prefix_push(FUNC_REMOVE);
1240 	test_print("Running %s tests...\n", FUNC_REMOVE);
1241 
1242 	memblock_remove_simple_check();
1243 	memblock_remove_absent_check();
1244 	memblock_remove_overlap_top_check();
1245 	memblock_remove_overlap_bottom_check();
1246 	memblock_remove_within_check();
1247 	memblock_remove_only_region_check();
1248 	memblock_remove_near_max_check();
1249 	memblock_remove_overlap_two_check();
1250 
1251 	prefix_pop();
1252 
1253 	return 0;
1254 }
1255 
1256 /*
1257  * A simple test that tries to free a memory block r1 that was marked
1258  * earlier as reserved. By "freeing" a region we mean overwriting it with
1259  * the next entry r2 in memblock.reserved:
1260  *
1261  *  |              ......           +----+ |
1262  *  |              : r1 :           | r2 | |
1263  *  +--------------+----+-----------+----+-+
1264  *                                  ^
1265  *                                  |
1266  *                                  rgn.base
1267  *
1268  * Expect to reserve two memory regions and then erase r1 region with the
1269  * value of r2. The region counter and total size are updated.
1270  */
1271 static int memblock_free_simple_check(void)
1272 {
1273 	struct memblock_region *rgn;
1274 
1275 	rgn = &memblock.reserved.regions[0];
1276 
1277 	struct region r1 = {
1278 		.base = SZ_4M,
1279 		.size = SZ_1M
1280 	};
1281 	struct region r2 = {
1282 		.base = SZ_8M,
1283 		.size = SZ_1M
1284 	};
1285 
1286 	PREFIX_PUSH();
1287 
1288 	reset_memblock_regions();
1289 	memblock_reserve(r1.base, r1.size);
1290 	memblock_reserve(r2.base, r2.size);
1291 	memblock_free((void *)r1.base, r1.size);
1292 
1293 	ASSERT_EQ(rgn->base, r2.base);
1294 	ASSERT_EQ(rgn->size, r2.size);
1295 
1296 	ASSERT_EQ(memblock.reserved.cnt, 1);
1297 	ASSERT_EQ(memblock.reserved.total_size, r2.size);
1298 
1299 	test_pass_pop();
1300 
1301 	return 0;
1302 }
1303 
1304 /*
1305  * A test that tries to free a region r2 that was not marked as reserved
1306  * (i.e. has no corresponding entry in memblock.reserved):
1307  *
1308  *                     +----------------+
1309  *                     |       r2       |
1310  *                     +----------------+
1311  *  |  +----+                              |
1312  *  |  | r1 |                              |
1313  *  +--+----+------------------------------+
1314  *     ^
1315  *     |
1316  *     rgn.base
1317  *
1318  * The array, regions counter and total size are not modified.
1319  */
1320 static int memblock_free_absent_check(void)
1321 {
1322 	struct memblock_region *rgn;
1323 
1324 	rgn = &memblock.reserved.regions[0];
1325 
1326 	struct region r1 = {
1327 		.base = SZ_2M,
1328 		.size = SZ_8K
1329 	};
1330 	struct region r2 = {
1331 		.base = SZ_16M,
1332 		.size = SZ_128M
1333 	};
1334 
1335 	PREFIX_PUSH();
1336 
1337 	reset_memblock_regions();
1338 	memblock_reserve(r1.base, r1.size);
1339 	memblock_free((void *)r2.base, r2.size);
1340 
1341 	ASSERT_EQ(rgn->base, r1.base);
1342 	ASSERT_EQ(rgn->size, r1.size);
1343 
1344 	ASSERT_EQ(memblock.reserved.cnt, 1);
1345 	ASSERT_EQ(memblock.reserved.total_size, r1.size);
1346 
1347 	test_pass_pop();
1348 
1349 	return 0;
1350 }
1351 
1352 /*
1353  * A test that tries to free a region r2 that overlaps with the beginning
1354  * of the already existing entry r1 (that is r1.base < r2.base + r2.size):
1355  *
1356  *     +----+
1357  *     | r2 |
1358  *     +----+
1359  *  |    ...+--------------+               |
1360  *  |    :  |    r1        |               |
1361  *  +----+--+--------------+---------------+
1362  *       ^  ^
1363  *       |  |
1364  *       |  rgn.base
1365  *       |
1366  *       r1.base
1367  *
1368  * Expect that only the intersection of both regions is freed. The
1369  * regions counter and total size are updated.
1370  */
1371 static int memblock_free_overlap_top_check(void)
1372 {
1373 	struct memblock_region *rgn;
1374 	phys_addr_t total_size;
1375 
1376 	rgn = &memblock.reserved.regions[0];
1377 
1378 	struct region r1 = {
1379 		.base = SZ_8M,
1380 		.size = SZ_32M
1381 	};
1382 	struct region r2 = {
1383 		.base = SZ_1M,
1384 		.size = SZ_8M
1385 	};
1386 
1387 	PREFIX_PUSH();
1388 
1389 	total_size = (r1.size + r1.base) - (r2.base + r2.size);
1390 
1391 	reset_memblock_regions();
1392 	memblock_reserve(r1.base, r1.size);
1393 	memblock_free((void *)r2.base, r2.size);
1394 
1395 	ASSERT_EQ(rgn->base, r2.base + r2.size);
1396 	ASSERT_EQ(rgn->size, total_size);
1397 
1398 	ASSERT_EQ(memblock.reserved.cnt, 1);
1399 	ASSERT_EQ(memblock.reserved.total_size, total_size);
1400 
1401 	test_pass_pop();
1402 
1403 	return 0;
1404 }
1405 
1406 /*
1407  * A test that tries to free a region r2 that overlaps with the end of
1408  * the already existing entry r1 (that is r2.base < r1.base + r1.size):
1409  *
1410  *                   +----------------+
1411  *                   |       r2       |
1412  *                   +----------------+
1413  *  |    +-----------+.....                |
1414  *  |    |       r1  |    :                |
1415  *  +----+-----------+----+----------------+
1416  *
1417  * Expect that only the intersection of both regions is freed. The
1418  * regions counter and total size are updated.
1419  */
1420 static int memblock_free_overlap_bottom_check(void)
1421 {
1422 	struct memblock_region *rgn;
1423 	phys_addr_t total_size;
1424 
1425 	rgn = &memblock.reserved.regions[0];
1426 
1427 	struct region r1 = {
1428 		.base = SZ_8M,
1429 		.size = SZ_32M
1430 	};
1431 	struct region r2 = {
1432 		.base = SZ_32M,
1433 		.size = SZ_32M
1434 	};
1435 
1436 	PREFIX_PUSH();
1437 
1438 	total_size = r2.base - r1.base;
1439 
1440 	reset_memblock_regions();
1441 	memblock_reserve(r1.base, r1.size);
1442 	memblock_free((void *)r2.base, r2.size);
1443 
1444 	ASSERT_EQ(rgn->base, r1.base);
1445 	ASSERT_EQ(rgn->size, total_size);
1446 
1447 	ASSERT_EQ(memblock.reserved.cnt, 1);
1448 	ASSERT_EQ(memblock.reserved.total_size, total_size);
1449 
1450 	test_pass_pop();
1451 
1452 	return 0;
1453 }
1454 
1455 /*
1456  * A test that tries to free a region r2 that is within the range of the
1457  * already existing entry r1 (that is
1458  * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
1459  *
1460  *                    +----+
1461  *                    | r2 |
1462  *                    +----+
1463  *  |    +------------+....+---------------+
1464  *  |    |    rgn1    | r1 |     rgn2      |
1465  *  +----+------------+----+---------------+
1466  *       ^
1467  *       |
1468  *       r1.base
1469  *
1470  * Expect that the region is split into two - one that ends at r2.base and
1471  * another that starts at r2.base + r2.size, with appropriate sizes. The
1472  * region counter and total size fields are updated.
1473  */
1474 static int memblock_free_within_check(void)
1475 {
1476 	struct memblock_region *rgn1, *rgn2;
1477 	phys_addr_t r1_size, r2_size, total_size;
1478 
1479 	rgn1 = &memblock.reserved.regions[0];
1480 	rgn2 = &memblock.reserved.regions[1];
1481 
1482 	struct region r1 = {
1483 		.base = SZ_1M,
1484 		.size = SZ_8M
1485 	};
1486 	struct region r2 = {
1487 		.base = SZ_4M,
1488 		.size = SZ_1M
1489 	};
1490 
1491 	PREFIX_PUSH();
1492 
1493 	r1_size = r2.base - r1.base;
1494 	r2_size = (r1.base + r1.size) - (r2.base + r2.size);
1495 	total_size = r1_size + r2_size;
1496 
1497 	reset_memblock_regions();
1498 	memblock_reserve(r1.base, r1.size);
1499 	memblock_free((void *)r2.base, r2.size);
1500 
1501 	ASSERT_EQ(rgn1->base, r1.base);
1502 	ASSERT_EQ(rgn1->size, r1_size);
1503 
1504 	ASSERT_EQ(rgn2->base, r2.base + r2.size);
1505 	ASSERT_EQ(rgn2->size, r2_size);
1506 
1507 	ASSERT_EQ(memblock.reserved.cnt, 2);
1508 	ASSERT_EQ(memblock.reserved.total_size, total_size);
1509 
1510 	test_pass_pop();
1511 
1512 	return 0;
1513 }
1514 
1515 /*
1516  * A simple test that tries to free a memory block r1 that was marked
1517  * earlier as reserved when r1 is the only available region.
1518  * Expect to reserve a memory block r1 and then free r1 so that r1 is
1519  * overwritten with a dummy region. The region counter stays the same,
1520  * and the total size is updated.
1521  */
1522 static int memblock_free_only_region_check(void)
1523 {
1524 	struct memblock_region *rgn;
1525 
1526 	rgn = &memblock.reserved.regions[0];
1527 
1528 	struct region r1 = {
1529 		.base = SZ_2K,
1530 		.size = SZ_4K
1531 	};
1532 
1533 	PREFIX_PUSH();
1534 
1535 	reset_memblock_regions();
1536 	memblock_reserve(r1.base, r1.size);
1537 	memblock_free((void *)r1.base, r1.size);
1538 
1539 	ASSERT_EQ(rgn->base, 0);
1540 	ASSERT_EQ(rgn->size, 0);
1541 
1542 	ASSERT_EQ(memblock.reserved.cnt, 1);
1543 	ASSERT_EQ(memblock.reserved.total_size, 0);
1544 
1545 	test_pass_pop();
1546 
1547 	return 0;
1548 }
1549 
1550 /*
1551  * A simple test that tries free a region r2 when r2 extends past PHYS_ADDR_MAX:
1552  *
1553  *                               +--------+
1554  *                               |   r2   |
1555  *                               +--------+
1556  *  |                        +---+....+
1557  *  |                        |rgn|    |
1558  *  +------------------------+---+----+
1559  *
1560  * Expect that only the portion between PHYS_ADDR_MAX and r2.base is freed.
1561  * Expect the total size of reserved memory to be updated and the counter to
1562  * not be updated.
1563  */
1564 static int memblock_free_near_max_check(void)
1565 {
1566 	struct memblock_region *rgn;
1567 	phys_addr_t total_size;
1568 
1569 	rgn = &memblock.reserved.regions[0];
1570 
1571 	struct region r1 = {
1572 		.base = PHYS_ADDR_MAX - SZ_2M,
1573 		.size = SZ_2M
1574 	};
1575 
1576 	struct region r2 = {
1577 		.base = PHYS_ADDR_MAX - SZ_1M,
1578 		.size = SZ_2M
1579 	};
1580 
1581 	PREFIX_PUSH();
1582 
1583 	total_size = r1.size - (PHYS_ADDR_MAX - r2.base);
1584 
1585 	reset_memblock_regions();
1586 	memblock_reserve(r1.base, r1.size);
1587 	memblock_free((void *)r2.base, r2.size);
1588 
1589 	ASSERT_EQ(rgn->base, r1.base);
1590 	ASSERT_EQ(rgn->size, total_size);
1591 
1592 	ASSERT_EQ(memblock.reserved.cnt, 1);
1593 	ASSERT_EQ(memblock.reserved.total_size, total_size);
1594 
1595 	test_pass_pop();
1596 
1597 	return 0;
1598 }
1599 
1600 /*
1601  * A test that tries to free a reserved region r3 that overlaps with two
1602  * existing reserved regions r1 and r2:
1603  *
1604  *            +----------------+
1605  *            |       r3       |
1606  *            +----------------+
1607  *  |    +----+.....   ........+--------+
1608  *  |    |    |r1  :   :       |r2      |     |
1609  *  +----+----+----+---+-------+--------+-----+
1610  *
1611  * Expect that only the intersections of r1 with r3 and r2 with r3 are freed
1612  * from the collection of reserved memory. Expect the total size of reserved
1613  * memory to be updated and the counter to not be updated.
1614  */
1615 static int memblock_free_overlap_two_check(void)
1616 {
1617 	struct memblock_region *rgn1, *rgn2;
1618 	phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size;
1619 
1620 	rgn1 = &memblock.reserved.regions[0];
1621 	rgn2 = &memblock.reserved.regions[1];
1622 
1623 	struct region r1 = {
1624 		.base = SZ_16M,
1625 		.size = SZ_32M
1626 	};
1627 	struct region r2 = {
1628 		.base = SZ_64M,
1629 		.size = SZ_64M
1630 	};
1631 	struct region r3 = {
1632 		.base = SZ_32M,
1633 		.size = SZ_64M
1634 	};
1635 
1636 	PREFIX_PUSH();
1637 
1638 	r2_end = r2.base + r2.size;
1639 	r3_end = r3.base + r3.size;
1640 	new_r1_size = r3.base - r1.base;
1641 	new_r2_size = r2_end - r3_end;
1642 	total_size = new_r1_size + new_r2_size;
1643 
1644 	reset_memblock_regions();
1645 	memblock_reserve(r1.base, r1.size);
1646 	memblock_reserve(r2.base, r2.size);
1647 	memblock_free((void *)r3.base, r3.size);
1648 
1649 	ASSERT_EQ(rgn1->base, r1.base);
1650 	ASSERT_EQ(rgn1->size, new_r1_size);
1651 
1652 	ASSERT_EQ(rgn2->base, r3_end);
1653 	ASSERT_EQ(rgn2->size, new_r2_size);
1654 
1655 	ASSERT_EQ(memblock.reserved.cnt, 2);
1656 	ASSERT_EQ(memblock.reserved.total_size, total_size);
1657 
1658 	test_pass_pop();
1659 
1660 	return 0;
1661 }
1662 
1663 static int memblock_free_checks(void)
1664 {
1665 	prefix_reset();
1666 	prefix_push(FUNC_FREE);
1667 	test_print("Running %s tests...\n", FUNC_FREE);
1668 
1669 	memblock_free_simple_check();
1670 	memblock_free_absent_check();
1671 	memblock_free_overlap_top_check();
1672 	memblock_free_overlap_bottom_check();
1673 	memblock_free_within_check();
1674 	memblock_free_only_region_check();
1675 	memblock_free_near_max_check();
1676 	memblock_free_overlap_two_check();
1677 
1678 	prefix_pop();
1679 
1680 	return 0;
1681 }
1682 
1683 static int memblock_set_bottom_up_check(void)
1684 {
1685 	prefix_push("memblock_set_bottom_up");
1686 
1687 	memblock_set_bottom_up(false);
1688 	ASSERT_EQ(memblock.bottom_up, false);
1689 	memblock_set_bottom_up(true);
1690 	ASSERT_EQ(memblock.bottom_up, true);
1691 
1692 	reset_memblock_attributes();
1693 	test_pass_pop();
1694 
1695 	return 0;
1696 }
1697 
1698 static int memblock_bottom_up_check(void)
1699 {
1700 	prefix_push("memblock_bottom_up");
1701 
1702 	memblock_set_bottom_up(false);
1703 	ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up);
1704 	ASSERT_EQ(memblock_bottom_up(), false);
1705 	memblock_set_bottom_up(true);
1706 	ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up);
1707 	ASSERT_EQ(memblock_bottom_up(), true);
1708 
1709 	reset_memblock_attributes();
1710 	test_pass_pop();
1711 
1712 	return 0;
1713 }
1714 
1715 static int memblock_bottom_up_checks(void)
1716 {
1717 	test_print("Running memblock_*bottom_up tests...\n");
1718 
1719 	prefix_reset();
1720 	memblock_set_bottom_up_check();
1721 	prefix_reset();
1722 	memblock_bottom_up_check();
1723 
1724 	return 0;
1725 }
1726 
1727 /*
1728  * A test that tries to trim memory when both ends of the memory region are
1729  * aligned. Expect that the memory will not be trimmed. Expect the counter to
1730  * not be updated.
1731  */
1732 static int memblock_trim_memory_aligned_check(void)
1733 {
1734 	struct memblock_region *rgn;
1735 	const phys_addr_t alignment = SMP_CACHE_BYTES;
1736 
1737 	rgn = &memblock.memory.regions[0];
1738 
1739 	struct region r = {
1740 		.base = alignment,
1741 		.size = alignment * 4
1742 	};
1743 
1744 	PREFIX_PUSH();
1745 
1746 	reset_memblock_regions();
1747 	memblock_add(r.base, r.size);
1748 	memblock_trim_memory(alignment);
1749 
1750 	ASSERT_EQ(rgn->base, r.base);
1751 	ASSERT_EQ(rgn->size, r.size);
1752 
1753 	ASSERT_EQ(memblock.memory.cnt, 1);
1754 
1755 	test_pass_pop();
1756 
1757 	return 0;
1758 }
1759 
1760 /*
1761  * A test that tries to trim memory when there are two available regions, r1 and
1762  * r2. Region r1 is aligned on both ends and region r2 is unaligned on one end
1763  * and smaller than the alignment:
1764  *
1765  *                                     alignment
1766  *                                     |--------|
1767  * |        +-----------------+        +------+   |
1768  * |        |        r1       |        |  r2  |   |
1769  * +--------+-----------------+--------+------+---+
1770  *          ^        ^        ^        ^      ^
1771  *          |________|________|________|      |
1772  *                            |               Unaligned address
1773  *                Aligned addresses
1774  *
1775  * Expect that r1 will not be trimmed and r2 will be removed. Expect the
1776  * counter to be updated.
1777  */
1778 static int memblock_trim_memory_too_small_check(void)
1779 {
1780 	struct memblock_region *rgn;
1781 	const phys_addr_t alignment = SMP_CACHE_BYTES;
1782 
1783 	rgn = &memblock.memory.regions[0];
1784 
1785 	struct region r1 = {
1786 		.base = alignment,
1787 		.size = alignment * 2
1788 	};
1789 	struct region r2 = {
1790 		.base = alignment * 4,
1791 		.size = alignment - SZ_2
1792 	};
1793 
1794 	PREFIX_PUSH();
1795 
1796 	reset_memblock_regions();
1797 	memblock_add(r1.base, r1.size);
1798 	memblock_add(r2.base, r2.size);
1799 	memblock_trim_memory(alignment);
1800 
1801 	ASSERT_EQ(rgn->base, r1.base);
1802 	ASSERT_EQ(rgn->size, r1.size);
1803 
1804 	ASSERT_EQ(memblock.memory.cnt, 1);
1805 
1806 	test_pass_pop();
1807 
1808 	return 0;
1809 }
1810 
1811 /*
1812  * A test that tries to trim memory when there are two available regions, r1 and
1813  * r2. Region r1 is aligned on both ends and region r2 is unaligned at the base
1814  * and aligned at the end:
1815  *
1816  *                               Unaligned address
1817  *                                       |
1818  *                                       v
1819  * |        +-----------------+          +---------------+   |
1820  * |        |        r1       |          |      r2       |   |
1821  * +--------+-----------------+----------+---------------+---+
1822  *          ^        ^        ^        ^        ^        ^
1823  *          |________|________|________|________|________|
1824  *                            |
1825  *                    Aligned addresses
1826  *
1827  * Expect that r1 will not be trimmed and r2 will be trimmed at the base.
1828  * Expect the counter to not be updated.
1829  */
1830 static int memblock_trim_memory_unaligned_base_check(void)
1831 {
1832 	struct memblock_region *rgn1, *rgn2;
1833 	const phys_addr_t alignment = SMP_CACHE_BYTES;
1834 	phys_addr_t offset = SZ_2;
1835 	phys_addr_t new_r2_base, new_r2_size;
1836 
1837 	rgn1 = &memblock.memory.regions[0];
1838 	rgn2 = &memblock.memory.regions[1];
1839 
1840 	struct region r1 = {
1841 		.base = alignment,
1842 		.size = alignment * 2
1843 	};
1844 	struct region r2 = {
1845 		.base = alignment * 4 + offset,
1846 		.size = alignment * 2 - offset
1847 	};
1848 
1849 	PREFIX_PUSH();
1850 
1851 	new_r2_base = r2.base + (alignment - offset);
1852 	new_r2_size = r2.size - (alignment - offset);
1853 
1854 	reset_memblock_regions();
1855 	memblock_add(r1.base, r1.size);
1856 	memblock_add(r2.base, r2.size);
1857 	memblock_trim_memory(alignment);
1858 
1859 	ASSERT_EQ(rgn1->base, r1.base);
1860 	ASSERT_EQ(rgn1->size, r1.size);
1861 
1862 	ASSERT_EQ(rgn2->base, new_r2_base);
1863 	ASSERT_EQ(rgn2->size, new_r2_size);
1864 
1865 	ASSERT_EQ(memblock.memory.cnt, 2);
1866 
1867 	test_pass_pop();
1868 
1869 	return 0;
1870 }
1871 
1872 /*
1873  * A test that tries to trim memory when there are two available regions, r1 and
1874  * r2. Region r1 is aligned on both ends and region r2 is aligned at the base
1875  * and unaligned at the end:
1876  *
1877  *                                             Unaligned address
1878  *                                                     |
1879  *                                                     v
1880  * |        +-----------------+        +---------------+   |
1881  * |        |        r1       |        |      r2       |   |
1882  * +--------+-----------------+--------+---------------+---+
1883  *          ^        ^        ^        ^        ^        ^
1884  *          |________|________|________|________|________|
1885  *                            |
1886  *                    Aligned addresses
1887  *
1888  * Expect that r1 will not be trimmed and r2 will be trimmed at the end.
1889  * Expect the counter to not be updated.
1890  */
1891 static int memblock_trim_memory_unaligned_end_check(void)
1892 {
1893 	struct memblock_region *rgn1, *rgn2;
1894 	const phys_addr_t alignment = SMP_CACHE_BYTES;
1895 	phys_addr_t offset = SZ_2;
1896 	phys_addr_t new_r2_size;
1897 
1898 	rgn1 = &memblock.memory.regions[0];
1899 	rgn2 = &memblock.memory.regions[1];
1900 
1901 	struct region r1 = {
1902 		.base = alignment,
1903 		.size = alignment * 2
1904 	};
1905 	struct region r2 = {
1906 		.base = alignment * 4,
1907 		.size = alignment * 2 - offset
1908 	};
1909 
1910 	PREFIX_PUSH();
1911 
1912 	new_r2_size = r2.size - (alignment - offset);
1913 
1914 	reset_memblock_regions();
1915 	memblock_add(r1.base, r1.size);
1916 	memblock_add(r2.base, r2.size);
1917 	memblock_trim_memory(alignment);
1918 
1919 	ASSERT_EQ(rgn1->base, r1.base);
1920 	ASSERT_EQ(rgn1->size, r1.size);
1921 
1922 	ASSERT_EQ(rgn2->base, r2.base);
1923 	ASSERT_EQ(rgn2->size, new_r2_size);
1924 
1925 	ASSERT_EQ(memblock.memory.cnt, 2);
1926 
1927 	test_pass_pop();
1928 
1929 	return 0;
1930 }
1931 
1932 static int memblock_trim_memory_checks(void)
1933 {
1934 	prefix_reset();
1935 	prefix_push(FUNC_TRIM);
1936 	test_print("Running %s tests...\n", FUNC_TRIM);
1937 
1938 	memblock_trim_memory_aligned_check();
1939 	memblock_trim_memory_too_small_check();
1940 	memblock_trim_memory_unaligned_base_check();
1941 	memblock_trim_memory_unaligned_end_check();
1942 
1943 	prefix_pop();
1944 
1945 	return 0;
1946 }
1947 
1948 int memblock_basic_checks(void)
1949 {
1950 	memblock_initialization_check();
1951 	memblock_add_checks();
1952 	memblock_reserve_checks();
1953 	memblock_remove_checks();
1954 	memblock_free_checks();
1955 	memblock_bottom_up_checks();
1956 	memblock_trim_memory_checks();
1957 
1958 	return 0;
1959 }
1960