xref: /linux/tools/testing/memblock/tests/alloc_api.c (revision 21a233f68afe55aafa8b79705c97f7a1d37be3e1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include "alloc_api.h"
3 
4 /*
5  * A simple test that tries to allocate a small memory region.
6  * Expect to allocate an aligned region near the end of the available memory.
7  */
8 static int alloc_top_down_simple_check(void)
9 {
10 	struct memblock_region *rgn = &memblock.reserved.regions[0];
11 	void *allocated_ptr = NULL;
12 
13 	PREFIX_PUSH();
14 
15 	phys_addr_t size = SZ_2;
16 	phys_addr_t expected_start;
17 
18 	setup_memblock();
19 
20 	expected_start = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
21 
22 	allocated_ptr = memblock_alloc(size, SMP_CACHE_BYTES);
23 
24 	ASSERT_NE(allocated_ptr, NULL);
25 	ASSERT_MEM_EQ(allocated_ptr, 0, size);
26 
27 	ASSERT_EQ(rgn->size, size);
28 	ASSERT_EQ(rgn->base, expected_start);
29 
30 	ASSERT_EQ(memblock.reserved.cnt, 1);
31 	ASSERT_EQ(memblock.reserved.total_size, size);
32 
33 	test_pass_pop();
34 
35 	return 0;
36 }
37 
38 /*
39  * A test that tries to allocate memory next to a reserved region that starts at
40  * the misaligned address. Expect to create two separate entries, with the new
41  * entry aligned to the provided alignment:
42  *
43  *              +
44  * |            +--------+         +--------|
45  * |            |  rgn2  |         |  rgn1  |
46  * +------------+--------+---------+--------+
47  *              ^
48  *              |
49  *              Aligned address boundary
50  *
51  * The allocation direction is top-down and region arrays are sorted from lower
52  * to higher addresses, so the new region will be the first entry in
53  * memory.reserved array. The previously reserved region does not get modified.
54  * Region counter and total size get updated.
55  */
56 static int alloc_top_down_disjoint_check(void)
57 {
58 	/* After allocation, this will point to the "old" region */
59 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
60 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
61 	struct region r1;
62 	void *allocated_ptr = NULL;
63 
64 	PREFIX_PUSH();
65 
66 	phys_addr_t r2_size = SZ_16;
67 	/* Use custom alignment */
68 	phys_addr_t alignment = SMP_CACHE_BYTES * 2;
69 	phys_addr_t total_size;
70 	phys_addr_t expected_start;
71 
72 	setup_memblock();
73 
74 	r1.base = memblock_end_of_DRAM() - SZ_2;
75 	r1.size = SZ_2;
76 
77 	total_size = r1.size + r2_size;
78 	expected_start = memblock_end_of_DRAM() - alignment;
79 
80 	memblock_reserve(r1.base, r1.size);
81 
82 	allocated_ptr = memblock_alloc(r2_size, alignment);
83 
84 	ASSERT_NE(allocated_ptr, NULL);
85 	ASSERT_MEM_EQ(allocated_ptr, 0, r2_size);
86 
87 	ASSERT_EQ(rgn1->size, r1.size);
88 	ASSERT_EQ(rgn1->base, r1.base);
89 
90 	ASSERT_EQ(rgn2->size, r2_size);
91 	ASSERT_EQ(rgn2->base, expected_start);
92 
93 	ASSERT_EQ(memblock.reserved.cnt, 2);
94 	ASSERT_EQ(memblock.reserved.total_size, total_size);
95 
96 	test_pass_pop();
97 
98 	return 0;
99 }
100 
101 /*
102  * A test that tries to allocate memory when there is enough space at the end
103  * of the previously reserved block (i.e. first fit):
104  *
105  *  |              +--------+--------------|
106  *  |              |   r1   |      r2      |
107  *  +--------------+--------+--------------+
108  *
109  * Expect a merge of both regions. Only the region size gets updated.
110  */
111 static int alloc_top_down_before_check(void)
112 {
113 	struct memblock_region *rgn = &memblock.reserved.regions[0];
114 	void *allocated_ptr = NULL;
115 
116 	PREFIX_PUSH();
117 
118 	/*
119 	 * The first region ends at the aligned address to test region merging
120 	 */
121 	phys_addr_t r1_size = SMP_CACHE_BYTES;
122 	phys_addr_t r2_size = SZ_512;
123 	phys_addr_t total_size = r1_size + r2_size;
124 
125 	setup_memblock();
126 
127 	memblock_reserve(memblock_end_of_DRAM() - total_size, r1_size);
128 
129 	allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
130 
131 	ASSERT_NE(allocated_ptr, NULL);
132 	ASSERT_MEM_EQ(allocated_ptr, 0, r2_size);
133 
134 	ASSERT_EQ(rgn->size, total_size);
135 	ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - total_size);
136 
137 	ASSERT_EQ(memblock.reserved.cnt, 1);
138 	ASSERT_EQ(memblock.reserved.total_size, total_size);
139 
140 	test_pass_pop();
141 
142 	return 0;
143 }
144 
145 /*
146  * A test that tries to allocate memory when there is not enough space at the
147  * end of the previously reserved block (i.e. second fit):
148  *
149  *  |            +-----------+------+     |
150  *  |            |     r2    |  r1  |     |
151  *  +------------+-----------+------+-----+
152  *
153  * Expect a merge of both regions. Both the base address and size of the region
154  * get updated.
155  */
156 static int alloc_top_down_after_check(void)
157 {
158 	struct memblock_region *rgn = &memblock.reserved.regions[0];
159 	struct region r1;
160 	void *allocated_ptr = NULL;
161 
162 	PREFIX_PUSH();
163 
164 	phys_addr_t r2_size = SZ_512;
165 	phys_addr_t total_size;
166 
167 	setup_memblock();
168 
169 	/*
170 	 * The first region starts at the aligned address to test region merging
171 	 */
172 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
173 	r1.size = SZ_8;
174 
175 	total_size = r1.size + r2_size;
176 
177 	memblock_reserve(r1.base, r1.size);
178 
179 	allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
180 
181 	ASSERT_NE(allocated_ptr, NULL);
182 	ASSERT_MEM_EQ(allocated_ptr, 0, r2_size);
183 
184 	ASSERT_EQ(rgn->size, total_size);
185 	ASSERT_EQ(rgn->base, r1.base - r2_size);
186 
187 	ASSERT_EQ(memblock.reserved.cnt, 1);
188 	ASSERT_EQ(memblock.reserved.total_size, total_size);
189 
190 	test_pass_pop();
191 
192 	return 0;
193 }
194 
195 /*
196  * A test that tries to allocate memory when there are two reserved regions with
197  * a gap too small to fit the new region:
198  *
199  *  |       +--------+----------+   +------|
200  *  |       |   r3   |    r2    |   |  r1  |
201  *  +-------+--------+----------+---+------+
202  *
203  * Expect to allocate a region before the one that starts at the lower address,
204  * and merge them into one. The region counter and total size fields get
205  * updated.
206  */
207 static int alloc_top_down_second_fit_check(void)
208 {
209 	struct memblock_region *rgn = &memblock.reserved.regions[0];
210 	struct region r1, r2;
211 	void *allocated_ptr = NULL;
212 
213 	PREFIX_PUSH();
214 
215 	phys_addr_t r3_size = SZ_1K;
216 	phys_addr_t total_size;
217 
218 	setup_memblock();
219 
220 	r1.base = memblock_end_of_DRAM() - SZ_512;
221 	r1.size = SZ_512;
222 
223 	r2.base = r1.base - SZ_512;
224 	r2.size = SZ_256;
225 
226 	total_size = r1.size + r2.size + r3_size;
227 
228 	memblock_reserve(r1.base, r1.size);
229 	memblock_reserve(r2.base, r2.size);
230 
231 	allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
232 
233 	ASSERT_NE(allocated_ptr, NULL);
234 	ASSERT_MEM_EQ(allocated_ptr, 0, r3_size);
235 
236 	ASSERT_EQ(rgn->size, r2.size + r3_size);
237 	ASSERT_EQ(rgn->base, r2.base - r3_size);
238 
239 	ASSERT_EQ(memblock.reserved.cnt, 2);
240 	ASSERT_EQ(memblock.reserved.total_size, total_size);
241 
242 	test_pass_pop();
243 
244 	return 0;
245 }
246 
247 /*
248  * A test that tries to allocate memory when there are two reserved regions with
249  * a gap big enough to accommodate the new region:
250  *
251  *  |     +--------+--------+--------+     |
252  *  |     |   r2   |   r3   |   r1   |     |
253  *  +-----+--------+--------+--------+-----+
254  *
255  * Expect to merge all of them, creating one big entry in memblock.reserved
256  * array. The region counter and total size fields get updated.
257  */
258 static int alloc_in_between_generic_check(void)
259 {
260 	struct memblock_region *rgn = &memblock.reserved.regions[0];
261 	struct region r1, r2;
262 	void *allocated_ptr = NULL;
263 
264 	PREFIX_PUSH();
265 
266 	phys_addr_t gap_size = SMP_CACHE_BYTES;
267 	phys_addr_t r3_size = SZ_64;
268 	/*
269 	 * Calculate regions size so there's just enough space for the new entry
270 	 */
271 	phys_addr_t rgn_size = (MEM_SIZE - (2 * gap_size + r3_size)) / 2;
272 	phys_addr_t total_size;
273 
274 	setup_memblock();
275 
276 	r1.size = rgn_size;
277 	r1.base = memblock_end_of_DRAM() - (gap_size + rgn_size);
278 
279 	r2.size = rgn_size;
280 	r2.base = memblock_start_of_DRAM() + gap_size;
281 
282 	total_size = r1.size + r2.size + r3_size;
283 
284 	memblock_reserve(r1.base, r1.size);
285 	memblock_reserve(r2.base, r2.size);
286 
287 	allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
288 
289 	ASSERT_NE(allocated_ptr, NULL);
290 	ASSERT_MEM_EQ(allocated_ptr, 0, r3_size);
291 
292 	ASSERT_EQ(rgn->size, total_size);
293 	ASSERT_EQ(rgn->base, r1.base - r2.size - r3_size);
294 
295 	ASSERT_EQ(memblock.reserved.cnt, 1);
296 	ASSERT_EQ(memblock.reserved.total_size, total_size);
297 
298 	test_pass_pop();
299 
300 	return 0;
301 }
302 
303 /*
304  * A test that tries to allocate memory when the memory is filled with reserved
305  * regions with memory gaps too small to fit the new region:
306  *
307  * +-------+
308  * |  new  |
309  * +--+----+
310  *    |    +-----+    +-----+    +-----+    |
311  *    |    | res |    | res |    | res |    |
312  *    +----+-----+----+-----+----+-----+----+
313  *
314  * Expect no allocation to happen.
315  */
316 static int alloc_small_gaps_generic_check(void)
317 {
318 	void *allocated_ptr = NULL;
319 
320 	PREFIX_PUSH();
321 
322 	phys_addr_t region_size = SZ_1K;
323 	phys_addr_t gap_size = SZ_256;
324 	phys_addr_t region_end;
325 
326 	setup_memblock();
327 
328 	region_end = memblock_start_of_DRAM();
329 
330 	while (region_end < memblock_end_of_DRAM()) {
331 		memblock_reserve(region_end + gap_size, region_size);
332 		region_end += gap_size + region_size;
333 	}
334 
335 	allocated_ptr = memblock_alloc(region_size, SMP_CACHE_BYTES);
336 
337 	ASSERT_EQ(allocated_ptr, NULL);
338 
339 	test_pass_pop();
340 
341 	return 0;
342 }
343 
344 /*
345  * A test that tries to allocate memory when all memory is reserved.
346  * Expect no allocation to happen.
347  */
348 static int alloc_all_reserved_generic_check(void)
349 {
350 	void *allocated_ptr = NULL;
351 
352 	PREFIX_PUSH();
353 
354 	setup_memblock();
355 
356 	/* Simulate full memory */
357 	memblock_reserve(memblock_start_of_DRAM(), MEM_SIZE);
358 
359 	allocated_ptr = memblock_alloc(SZ_256, SMP_CACHE_BYTES);
360 
361 	ASSERT_EQ(allocated_ptr, NULL);
362 
363 	test_pass_pop();
364 
365 	return 0;
366 }
367 
368 /*
369  * A test that tries to allocate memory when the memory is almost full,
370  * with not enough space left for the new region:
371  *
372  *                                +-------+
373  *                                |  new  |
374  *                                +-------+
375  *  |-----------------------------+   |
376  *  |          reserved           |   |
377  *  +-----------------------------+---+
378  *
379  * Expect no allocation to happen.
380  */
381 static int alloc_no_space_generic_check(void)
382 {
383 	void *allocated_ptr = NULL;
384 
385 	PREFIX_PUSH();
386 
387 	setup_memblock();
388 
389 	phys_addr_t available_size = SZ_256;
390 	phys_addr_t reserved_size = MEM_SIZE - available_size;
391 
392 	/* Simulate almost-full memory */
393 	memblock_reserve(memblock_start_of_DRAM(), reserved_size);
394 
395 	allocated_ptr = memblock_alloc(SZ_1K, SMP_CACHE_BYTES);
396 
397 	ASSERT_EQ(allocated_ptr, NULL);
398 
399 	test_pass_pop();
400 
401 	return 0;
402 }
403 
404 /*
405  * A test that tries to allocate memory when the memory is almost full,
406  * but there is just enough space left:
407  *
408  *  |---------------------------+---------|
409  *  |          reserved         |   new   |
410  *  +---------------------------+---------+
411  *
412  * Expect to allocate memory and merge all the regions. The total size field
413  * gets updated.
414  */
415 static int alloc_limited_space_generic_check(void)
416 {
417 	struct memblock_region *rgn = &memblock.reserved.regions[0];
418 	void *allocated_ptr = NULL;
419 
420 	PREFIX_PUSH();
421 
422 	phys_addr_t available_size = SZ_256;
423 	phys_addr_t reserved_size = MEM_SIZE - available_size;
424 
425 	setup_memblock();
426 
427 	/* Simulate almost-full memory */
428 	memblock_reserve(memblock_start_of_DRAM(), reserved_size);
429 
430 	allocated_ptr = memblock_alloc(available_size, SMP_CACHE_BYTES);
431 
432 	ASSERT_NE(allocated_ptr, NULL);
433 	ASSERT_MEM_EQ(allocated_ptr, 0, available_size);
434 
435 	ASSERT_EQ(rgn->size, MEM_SIZE);
436 	ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
437 
438 	ASSERT_EQ(memblock.reserved.cnt, 1);
439 	ASSERT_EQ(memblock.reserved.total_size, MEM_SIZE);
440 
441 	test_pass_pop();
442 
443 	return 0;
444 }
445 
446 /*
447  * A test that tries to allocate memory when there is no available memory
448  * registered (i.e. memblock.memory has only a dummy entry).
449  * Expect no allocation to happen.
450  */
451 static int alloc_no_memory_generic_check(void)
452 {
453 	struct memblock_region *rgn = &memblock.reserved.regions[0];
454 	void *allocated_ptr = NULL;
455 
456 	PREFIX_PUSH();
457 
458 	reset_memblock_regions();
459 
460 	allocated_ptr = memblock_alloc(SZ_1K, SMP_CACHE_BYTES);
461 
462 	ASSERT_EQ(allocated_ptr, NULL);
463 	ASSERT_EQ(rgn->size, 0);
464 	ASSERT_EQ(rgn->base, 0);
465 	ASSERT_EQ(memblock.reserved.total_size, 0);
466 
467 	test_pass_pop();
468 
469 	return 0;
470 }
471 
472 /*
473  * A test that tries to allocate a region that is larger than the total size of
474  * available memory (memblock.memory):
475  *
476  *  +-----------------------------------+
477  *  |                 new               |
478  *  +-----------------------------------+
479  *  |                                 |
480  *  |                                 |
481  *  +---------------------------------+
482  *
483  * Expect no allocation to happen.
484  */
485 static int alloc_too_large_generic_check(void)
486 {
487 	struct memblock_region *rgn = &memblock.reserved.regions[0];
488 	void *allocated_ptr = NULL;
489 
490 	PREFIX_PUSH();
491 
492 	setup_memblock();
493 
494 	allocated_ptr = memblock_alloc(MEM_SIZE + SZ_2, SMP_CACHE_BYTES);
495 
496 	ASSERT_EQ(allocated_ptr, NULL);
497 	ASSERT_EQ(rgn->size, 0);
498 	ASSERT_EQ(rgn->base, 0);
499 	ASSERT_EQ(memblock.reserved.total_size, 0);
500 
501 	test_pass_pop();
502 
503 	return 0;
504 }
505 
506 /*
507  * A simple test that tries to allocate a small memory region.
508  * Expect to allocate an aligned region at the beginning of the available
509  * memory.
510  */
511 static int alloc_bottom_up_simple_check(void)
512 {
513 	struct memblock_region *rgn = &memblock.reserved.regions[0];
514 	void *allocated_ptr = NULL;
515 
516 	PREFIX_PUSH();
517 
518 	setup_memblock();
519 
520 	allocated_ptr = memblock_alloc(SZ_2, SMP_CACHE_BYTES);
521 
522 	ASSERT_NE(allocated_ptr, NULL);
523 	ASSERT_MEM_EQ(allocated_ptr, 0, SZ_2);
524 
525 	ASSERT_EQ(rgn->size, SZ_2);
526 	ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
527 
528 	ASSERT_EQ(memblock.reserved.cnt, 1);
529 	ASSERT_EQ(memblock.reserved.total_size, SZ_2);
530 
531 	test_pass_pop();
532 
533 	return 0;
534 }
535 
536 /*
537  * A test that tries to allocate memory next to a reserved region that starts at
538  * the misaligned address. Expect to create two separate entries, with the new
539  * entry aligned to the provided alignment:
540  *
541  *                      +
542  *  |    +----------+   +----------+     |
543  *  |    |   rgn1   |   |   rgn2   |     |
544  *  +----+----------+---+----------+-----+
545  *                      ^
546  *                      |
547  *                      Aligned address boundary
548  *
549  * The allocation direction is bottom-up, so the new region will be the second
550  * entry in memory.reserved array. The previously reserved region does not get
551  * modified. Region counter and total size get updated.
552  */
553 static int alloc_bottom_up_disjoint_check(void)
554 {
555 	struct memblock_region *rgn1 = &memblock.reserved.regions[0];
556 	struct memblock_region *rgn2 = &memblock.reserved.regions[1];
557 	struct region r1;
558 	void *allocated_ptr = NULL;
559 
560 	PREFIX_PUSH();
561 
562 	phys_addr_t r2_size = SZ_16;
563 	/* Use custom alignment */
564 	phys_addr_t alignment = SMP_CACHE_BYTES * 2;
565 	phys_addr_t total_size;
566 	phys_addr_t expected_start;
567 
568 	setup_memblock();
569 
570 	r1.base = memblock_start_of_DRAM() + SZ_2;
571 	r1.size = SZ_2;
572 
573 	total_size = r1.size + r2_size;
574 	expected_start = memblock_start_of_DRAM() + alignment;
575 
576 	memblock_reserve(r1.base, r1.size);
577 
578 	allocated_ptr = memblock_alloc(r2_size, alignment);
579 
580 	ASSERT_NE(allocated_ptr, NULL);
581 	ASSERT_MEM_EQ(allocated_ptr, 0, r2_size);
582 
583 	ASSERT_EQ(rgn1->size, r1.size);
584 	ASSERT_EQ(rgn1->base, r1.base);
585 
586 	ASSERT_EQ(rgn2->size, r2_size);
587 	ASSERT_EQ(rgn2->base, expected_start);
588 
589 	ASSERT_EQ(memblock.reserved.cnt, 2);
590 	ASSERT_EQ(memblock.reserved.total_size, total_size);
591 
592 	test_pass_pop();
593 
594 	return 0;
595 }
596 
597 /*
598  * A test that tries to allocate memory when there is enough space at
599  * the beginning of the previously reserved block (i.e. first fit):
600  *
601  *  |------------------+--------+         |
602  *  |        r1        |   r2   |         |
603  *  +------------------+--------+---------+
604  *
605  * Expect a merge of both regions. Only the region size gets updated.
606  */
607 static int alloc_bottom_up_before_check(void)
608 {
609 	struct memblock_region *rgn = &memblock.reserved.regions[0];
610 	void *allocated_ptr = NULL;
611 
612 	PREFIX_PUSH();
613 
614 	phys_addr_t r1_size = SZ_512;
615 	phys_addr_t r2_size = SZ_128;
616 	phys_addr_t total_size = r1_size + r2_size;
617 
618 	setup_memblock();
619 
620 	memblock_reserve(memblock_start_of_DRAM() + r1_size, r2_size);
621 
622 	allocated_ptr = memblock_alloc(r1_size, SMP_CACHE_BYTES);
623 
624 	ASSERT_NE(allocated_ptr, NULL);
625 	ASSERT_MEM_EQ(allocated_ptr, 0, r1_size);
626 
627 	ASSERT_EQ(rgn->size, total_size);
628 	ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
629 
630 	ASSERT_EQ(memblock.reserved.cnt, 1);
631 	ASSERT_EQ(memblock.reserved.total_size, total_size);
632 
633 	test_pass_pop();
634 
635 	return 0;
636 }
637 
638 /*
639  * A test that tries to allocate memory when there is not enough space at
640  * the beginning of the previously reserved block (i.e. second fit):
641  *
642  *  |    +--------+--------------+         |
643  *  |    |   r1   |      r2      |         |
644  *  +----+--------+--------------+---------+
645  *
646  * Expect a merge of both regions. Only the region size gets updated.
647  */
648 static int alloc_bottom_up_after_check(void)
649 {
650 	struct memblock_region *rgn = &memblock.reserved.regions[0];
651 	struct region r1;
652 	void *allocated_ptr = NULL;
653 
654 	PREFIX_PUSH();
655 
656 	phys_addr_t r2_size = SZ_512;
657 	phys_addr_t total_size;
658 
659 	setup_memblock();
660 
661 	/*
662 	 * The first region starts at the aligned address to test region merging
663 	 */
664 	r1.base = memblock_start_of_DRAM() + SMP_CACHE_BYTES;
665 	r1.size = SZ_64;
666 
667 	total_size = r1.size + r2_size;
668 
669 	memblock_reserve(r1.base, r1.size);
670 
671 	allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
672 
673 	ASSERT_NE(allocated_ptr, NULL);
674 	ASSERT_MEM_EQ(allocated_ptr, 0, r2_size);
675 
676 	ASSERT_EQ(rgn->size, total_size);
677 	ASSERT_EQ(rgn->base, r1.base);
678 
679 	ASSERT_EQ(memblock.reserved.cnt, 1);
680 	ASSERT_EQ(memblock.reserved.total_size, total_size);
681 
682 	test_pass_pop();
683 
684 	return 0;
685 }
686 
687 /*
688  * A test that tries to allocate memory when there are two reserved regions, the
689  * first one starting at the beginning of the available memory, with a gap too
690  * small to fit the new region:
691  *
692  *  |------------+     +--------+--------+  |
693  *  |     r1     |     |   r2   |   r3   |  |
694  *  +------------+-----+--------+--------+--+
695  *
696  * Expect to allocate after the second region, which starts at the higher
697  * address, and merge them into one. The region counter and total size fields
698  * get updated.
699  */
700 static int alloc_bottom_up_second_fit_check(void)
701 {
702 	struct memblock_region *rgn  = &memblock.reserved.regions[1];
703 	struct region r1, r2;
704 	void *allocated_ptr = NULL;
705 
706 	PREFIX_PUSH();
707 
708 	phys_addr_t r3_size = SZ_1K;
709 	phys_addr_t total_size;
710 
711 	setup_memblock();
712 
713 	r1.base = memblock_start_of_DRAM();
714 	r1.size = SZ_512;
715 
716 	r2.base = r1.base + r1.size + SZ_512;
717 	r2.size = SZ_256;
718 
719 	total_size = r1.size + r2.size + r3_size;
720 
721 	memblock_reserve(r1.base, r1.size);
722 	memblock_reserve(r2.base, r2.size);
723 
724 	allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
725 
726 	ASSERT_NE(allocated_ptr, NULL);
727 	ASSERT_MEM_EQ(allocated_ptr, 0, r3_size);
728 
729 	ASSERT_EQ(rgn->size, r2.size + r3_size);
730 	ASSERT_EQ(rgn->base, r2.base);
731 
732 	ASSERT_EQ(memblock.reserved.cnt, 2);
733 	ASSERT_EQ(memblock.reserved.total_size, total_size);
734 
735 	test_pass_pop();
736 
737 	return 0;
738 }
739 
740 /* Test case wrappers */
741 static int alloc_simple_check(void)
742 {
743 	test_print("\tRunning %s...\n", __func__);
744 	memblock_set_bottom_up(false);
745 	alloc_top_down_simple_check();
746 	memblock_set_bottom_up(true);
747 	alloc_bottom_up_simple_check();
748 
749 	return 0;
750 }
751 
752 static int alloc_disjoint_check(void)
753 {
754 	test_print("\tRunning %s...\n", __func__);
755 	memblock_set_bottom_up(false);
756 	alloc_top_down_disjoint_check();
757 	memblock_set_bottom_up(true);
758 	alloc_bottom_up_disjoint_check();
759 
760 	return 0;
761 }
762 
763 static int alloc_before_check(void)
764 {
765 	test_print("\tRunning %s...\n", __func__);
766 	memblock_set_bottom_up(false);
767 	alloc_top_down_before_check();
768 	memblock_set_bottom_up(true);
769 	alloc_bottom_up_before_check();
770 
771 	return 0;
772 }
773 
774 static int alloc_after_check(void)
775 {
776 	test_print("\tRunning %s...\n", __func__);
777 	memblock_set_bottom_up(false);
778 	alloc_top_down_after_check();
779 	memblock_set_bottom_up(true);
780 	alloc_bottom_up_after_check();
781 
782 	return 0;
783 }
784 
785 static int alloc_in_between_check(void)
786 {
787 	test_print("\tRunning %s...\n", __func__);
788 	run_top_down(alloc_in_between_generic_check);
789 	run_bottom_up(alloc_in_between_generic_check);
790 
791 	return 0;
792 }
793 
794 static int alloc_second_fit_check(void)
795 {
796 	test_print("\tRunning %s...\n", __func__);
797 	memblock_set_bottom_up(false);
798 	alloc_top_down_second_fit_check();
799 	memblock_set_bottom_up(true);
800 	alloc_bottom_up_second_fit_check();
801 
802 	return 0;
803 }
804 
805 static int alloc_small_gaps_check(void)
806 {
807 	test_print("\tRunning %s...\n", __func__);
808 	run_top_down(alloc_small_gaps_generic_check);
809 	run_bottom_up(alloc_small_gaps_generic_check);
810 
811 	return 0;
812 }
813 
814 static int alloc_all_reserved_check(void)
815 {
816 	test_print("\tRunning %s...\n", __func__);
817 	run_top_down(alloc_all_reserved_generic_check);
818 	run_bottom_up(alloc_all_reserved_generic_check);
819 
820 	return 0;
821 }
822 
823 static int alloc_no_space_check(void)
824 {
825 	test_print("\tRunning %s...\n", __func__);
826 	run_top_down(alloc_no_space_generic_check);
827 	run_bottom_up(alloc_no_space_generic_check);
828 
829 	return 0;
830 }
831 
832 static int alloc_limited_space_check(void)
833 {
834 	test_print("\tRunning %s...\n", __func__);
835 	run_top_down(alloc_limited_space_generic_check);
836 	run_bottom_up(alloc_limited_space_generic_check);
837 
838 	return 0;
839 }
840 
841 static int alloc_no_memory_check(void)
842 {
843 	test_print("\tRunning %s...\n", __func__);
844 	run_top_down(alloc_no_memory_generic_check);
845 	run_bottom_up(alloc_no_memory_generic_check);
846 
847 	return 0;
848 }
849 
850 static int alloc_too_large_check(void)
851 {
852 	test_print("\tRunning %s...\n", __func__);
853 	run_top_down(alloc_too_large_generic_check);
854 	run_bottom_up(alloc_too_large_generic_check);
855 
856 	return 0;
857 }
858 
859 int memblock_alloc_checks(void)
860 {
861 	const char *func_testing = "memblock_alloc";
862 
863 	prefix_reset();
864 	prefix_push(func_testing);
865 	test_print("Running %s tests...\n", func_testing);
866 
867 	reset_memblock_attributes();
868 	dummy_physical_memory_init();
869 
870 	alloc_simple_check();
871 	alloc_disjoint_check();
872 	alloc_before_check();
873 	alloc_after_check();
874 	alloc_second_fit_check();
875 	alloc_small_gaps_check();
876 	alloc_in_between_check();
877 	alloc_all_reserved_check();
878 	alloc_no_space_check();
879 	alloc_limited_space_check();
880 	alloc_no_memory_check();
881 	alloc_too_large_check();
882 
883 	dummy_physical_memory_cleanup();
884 
885 	prefix_pop();
886 
887 	return 0;
888 }
889