xref: /linux/tools/testing/memblock/tests/alloc_nid_api.c (revision ae544fd62c14265dc663a65b3f9c6c5a6134098a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include "alloc_nid_api.h"
3 
4 static int alloc_nid_test_flags = TEST_F_NONE;
5 
6 static inline const char * const get_memblock_alloc_try_nid_name(int flags)
7 {
8 	if (flags & TEST_F_RAW)
9 		return "memblock_alloc_try_nid_raw";
10 	return "memblock_alloc_try_nid";
11 }
12 
13 static inline void *run_memblock_alloc_try_nid(phys_addr_t size,
14 					       phys_addr_t align,
15 					       phys_addr_t min_addr,
16 					       phys_addr_t max_addr, int nid)
17 {
18 	if (alloc_nid_test_flags & TEST_F_RAW)
19 		return memblock_alloc_try_nid_raw(size, align, min_addr,
20 						  max_addr, nid);
21 	return memblock_alloc_try_nid(size, align, min_addr, max_addr, nid);
22 }
23 
24 /*
25  * A simple test that tries to allocate a memory region within min_addr and
26  * max_addr range:
27  *
28  *        +                   +
29  *   |    +       +-----------+      |
30  *   |    |       |    rgn    |      |
31  *   +----+-------+-----------+------+
32  *        ^                   ^
33  *        |                   |
34  *        min_addr           max_addr
35  *
36  * Expect to allocate a cleared region that ends at max_addr.
37  */
38 static int alloc_try_nid_top_down_simple_check(void)
39 {
40 	struct memblock_region *rgn = &memblock.reserved.regions[0];
41 	void *allocated_ptr = NULL;
42 
43 	PREFIX_PUSH();
44 
45 	phys_addr_t size = SZ_128;
46 	phys_addr_t min_addr;
47 	phys_addr_t max_addr;
48 	phys_addr_t rgn_end;
49 
50 	setup_memblock();
51 
52 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
53 	max_addr = min_addr + SZ_512;
54 
55 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
56 						   min_addr, max_addr,
57 						   NUMA_NO_NODE);
58 	rgn_end = rgn->base + rgn->size;
59 
60 	ASSERT_NE(allocated_ptr, NULL);
61 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
62 
63 	ASSERT_EQ(rgn->size, size);
64 	ASSERT_EQ(rgn->base, max_addr - size);
65 	ASSERT_EQ(rgn_end, max_addr);
66 
67 	ASSERT_EQ(memblock.reserved.cnt, 1);
68 	ASSERT_EQ(memblock.reserved.total_size, size);
69 
70 	test_pass_pop();
71 
72 	return 0;
73 }
74 
75 /*
76  * A simple test that tries to allocate a memory region within min_addr and
77  * max_addr range, where the end address is misaligned:
78  *
79  *         +       +            +
80  *  |      +       +---------+  +    |
81  *  |      |       |   rgn   |  |    |
82  *  +------+-------+---------+--+----+
83  *         ^       ^            ^
84  *         |       |            |
85  *       min_add   |            max_addr
86  *                 |
87  *                 Aligned address
88  *                 boundary
89  *
90  * Expect to allocate a cleared, aligned region that ends before max_addr.
91  */
92 static int alloc_try_nid_top_down_end_misaligned_check(void)
93 {
94 	struct memblock_region *rgn = &memblock.reserved.regions[0];
95 	void *allocated_ptr = NULL;
96 
97 	PREFIX_PUSH();
98 
99 	phys_addr_t size = SZ_128;
100 	phys_addr_t misalign = SZ_2;
101 	phys_addr_t min_addr;
102 	phys_addr_t max_addr;
103 	phys_addr_t rgn_end;
104 
105 	setup_memblock();
106 
107 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
108 	max_addr = min_addr + SZ_512 + misalign;
109 
110 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
111 						   min_addr, max_addr,
112 						   NUMA_NO_NODE);
113 	rgn_end = rgn->base + rgn->size;
114 
115 	ASSERT_NE(allocated_ptr, NULL);
116 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
117 
118 	ASSERT_EQ(rgn->size, size);
119 	ASSERT_EQ(rgn->base, max_addr - size - misalign);
120 	ASSERT_LT(rgn_end, max_addr);
121 
122 	ASSERT_EQ(memblock.reserved.cnt, 1);
123 	ASSERT_EQ(memblock.reserved.total_size, size);
124 
125 	test_pass_pop();
126 
127 	return 0;
128 }
129 
130 /*
131  * A simple test that tries to allocate a memory region, which spans over the
132  * min_addr and max_addr range:
133  *
134  *         +               +
135  *  |      +---------------+       |
136  *  |      |      rgn      |       |
137  *  +------+---------------+-------+
138  *         ^               ^
139  *         |               |
140  *         min_addr        max_addr
141  *
142  * Expect to allocate a cleared region that starts at min_addr and ends at
143  * max_addr, given that min_addr is aligned.
144  */
145 static int alloc_try_nid_exact_address_generic_check(void)
146 {
147 	struct memblock_region *rgn = &memblock.reserved.regions[0];
148 	void *allocated_ptr = NULL;
149 
150 	PREFIX_PUSH();
151 
152 	phys_addr_t size = SZ_1K;
153 	phys_addr_t min_addr;
154 	phys_addr_t max_addr;
155 	phys_addr_t rgn_end;
156 
157 	setup_memblock();
158 
159 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES;
160 	max_addr = min_addr + size;
161 
162 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
163 						   min_addr, max_addr,
164 						   NUMA_NO_NODE);
165 	rgn_end = rgn->base + rgn->size;
166 
167 	ASSERT_NE(allocated_ptr, NULL);
168 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
169 
170 	ASSERT_EQ(rgn->size, size);
171 	ASSERT_EQ(rgn->base, min_addr);
172 	ASSERT_EQ(rgn_end, max_addr);
173 
174 	ASSERT_EQ(memblock.reserved.cnt, 1);
175 	ASSERT_EQ(memblock.reserved.total_size, size);
176 
177 	test_pass_pop();
178 
179 	return 0;
180 }
181 
182 /*
183  * A test that tries to allocate a memory region, which can't fit into
184  * min_addr and max_addr range:
185  *
186  *           +          +     +
187  *  |        +----------+-----+    |
188  *  |        |   rgn    +     |    |
189  *  +--------+----------+-----+----+
190  *           ^          ^     ^
191  *           |          |     |
192  *           Aligned    |    max_addr
193  *           address    |
194  *           boundary   min_add
195  *
196  * Expect to drop the lower limit and allocate a cleared memory region which
197  * ends at max_addr (if the address is aligned).
198  */
199 static int alloc_try_nid_top_down_narrow_range_check(void)
200 {
201 	struct memblock_region *rgn = &memblock.reserved.regions[0];
202 	void *allocated_ptr = NULL;
203 
204 	PREFIX_PUSH();
205 
206 	phys_addr_t size = SZ_256;
207 	phys_addr_t min_addr;
208 	phys_addr_t max_addr;
209 
210 	setup_memblock();
211 
212 	min_addr = memblock_start_of_DRAM() + SZ_512;
213 	max_addr = min_addr + SMP_CACHE_BYTES;
214 
215 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
216 						   min_addr, max_addr,
217 						   NUMA_NO_NODE);
218 
219 	ASSERT_NE(allocated_ptr, NULL);
220 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
221 
222 	ASSERT_EQ(rgn->size, size);
223 	ASSERT_EQ(rgn->base, max_addr - size);
224 
225 	ASSERT_EQ(memblock.reserved.cnt, 1);
226 	ASSERT_EQ(memblock.reserved.total_size, size);
227 
228 	test_pass_pop();
229 
230 	return 0;
231 }
232 
233 /*
234  * A test that tries to allocate a memory region, which can't fit into
235  * min_addr and max_addr range, with the latter being too close to the beginning
236  * of the available memory:
237  *
238  *   +-------------+
239  *   |     new     |
240  *   +-------------+
241  *         +       +
242  *         |       +              |
243  *         |       |              |
244  *         +-------+--------------+
245  *         ^       ^
246  *         |       |
247  *         |       max_addr
248  *         |
249  *         min_addr
250  *
251  * Expect no allocation to happen.
252  */
253 static int alloc_try_nid_low_max_generic_check(void)
254 {
255 	void *allocated_ptr = NULL;
256 
257 	PREFIX_PUSH();
258 
259 	phys_addr_t size = SZ_1K;
260 	phys_addr_t min_addr;
261 	phys_addr_t max_addr;
262 
263 	setup_memblock();
264 
265 	min_addr = memblock_start_of_DRAM();
266 	max_addr = min_addr + SMP_CACHE_BYTES;
267 
268 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
269 						   min_addr, max_addr,
270 						   NUMA_NO_NODE);
271 
272 	ASSERT_EQ(allocated_ptr, NULL);
273 
274 	test_pass_pop();
275 
276 	return 0;
277 }
278 
279 /*
280  * A test that tries to allocate a memory region within min_addr min_addr range,
281  * with min_addr being so close that it's next to an allocated region:
282  *
283  *          +                        +
284  *  |       +--------+---------------|
285  *  |       |   r1   |      rgn      |
286  *  +-------+--------+---------------+
287  *          ^                        ^
288  *          |                        |
289  *          min_addr                 max_addr
290  *
291  * Expect a merge of both regions. Only the region size gets updated.
292  */
293 static int alloc_try_nid_min_reserved_generic_check(void)
294 {
295 	struct memblock_region *rgn = &memblock.reserved.regions[0];
296 	void *allocated_ptr = NULL;
297 
298 	PREFIX_PUSH();
299 
300 	phys_addr_t r1_size = SZ_128;
301 	phys_addr_t r2_size = SZ_64;
302 	phys_addr_t total_size = r1_size + r2_size;
303 	phys_addr_t min_addr;
304 	phys_addr_t max_addr;
305 	phys_addr_t reserved_base;
306 
307 	setup_memblock();
308 
309 	max_addr = memblock_end_of_DRAM();
310 	min_addr = max_addr - r2_size;
311 	reserved_base = min_addr - r1_size;
312 
313 	memblock_reserve(reserved_base, r1_size);
314 
315 	allocated_ptr = run_memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES,
316 						   min_addr, max_addr,
317 						   NUMA_NO_NODE);
318 
319 	ASSERT_NE(allocated_ptr, NULL);
320 	assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags);
321 
322 	ASSERT_EQ(rgn->size, total_size);
323 	ASSERT_EQ(rgn->base, reserved_base);
324 
325 	ASSERT_EQ(memblock.reserved.cnt, 1);
326 	ASSERT_EQ(memblock.reserved.total_size, total_size);
327 
328 	test_pass_pop();
329 
330 	return 0;
331 }
332 
333 /*
334  * A test that tries to allocate a memory region within min_addr and max_addr,
335  * with max_addr being so close that it's next to an allocated region:
336  *
337  *             +             +
338  *  |          +-------------+--------|
339  *  |          |     rgn     |   r1   |
340  *  +----------+-------------+--------+
341  *             ^             ^
342  *             |             |
343  *             min_addr      max_addr
344  *
345  * Expect a merge of regions. Only the region size gets updated.
346  */
347 static int alloc_try_nid_max_reserved_generic_check(void)
348 {
349 	struct memblock_region *rgn = &memblock.reserved.regions[0];
350 	void *allocated_ptr = NULL;
351 
352 	PREFIX_PUSH();
353 
354 	phys_addr_t r1_size = SZ_64;
355 	phys_addr_t r2_size = SZ_128;
356 	phys_addr_t total_size = r1_size + r2_size;
357 	phys_addr_t min_addr;
358 	phys_addr_t max_addr;
359 
360 	setup_memblock();
361 
362 	max_addr = memblock_end_of_DRAM() - r1_size;
363 	min_addr = max_addr - r2_size;
364 
365 	memblock_reserve(max_addr, r1_size);
366 
367 	allocated_ptr = run_memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES,
368 						   min_addr, max_addr,
369 						   NUMA_NO_NODE);
370 
371 	ASSERT_NE(allocated_ptr, NULL);
372 	assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags);
373 
374 	ASSERT_EQ(rgn->size, total_size);
375 	ASSERT_EQ(rgn->base, min_addr);
376 
377 	ASSERT_EQ(memblock.reserved.cnt, 1);
378 	ASSERT_EQ(memblock.reserved.total_size, total_size);
379 
380 	test_pass_pop();
381 
382 	return 0;
383 }
384 
385 /*
386  * A test that tries to allocate memory within min_addr and max_add range, when
387  * there are two reserved regions at the borders, with a gap big enough to fit
388  * a new region:
389  *
390  *                +           +
391  *  |    +--------+   +-------+------+  |
392  *  |    |   r2   |   |  rgn  |  r1  |  |
393  *  +----+--------+---+-------+------+--+
394  *                ^           ^
395  *                |           |
396  *                min_addr    max_addr
397  *
398  * Expect to merge the new region with r1. The second region does not get
399  * updated. The total size field gets updated.
400  */
401 
402 static int alloc_try_nid_top_down_reserved_with_space_check(void)
403 {
404 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
405 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
406 	void *allocated_ptr = NULL;
407 	struct region r1, r2;
408 
409 	PREFIX_PUSH();
410 
411 	phys_addr_t r3_size = SZ_64;
412 	phys_addr_t gap_size = SMP_CACHE_BYTES;
413 	phys_addr_t total_size;
414 	phys_addr_t max_addr;
415 	phys_addr_t min_addr;
416 
417 	setup_memblock();
418 
419 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
420 	r1.size = SMP_CACHE_BYTES;
421 
422 	r2.size = SZ_128;
423 	r2.base = r1.base - (r3_size + gap_size + r2.size);
424 
425 	total_size = r1.size + r2.size + r3_size;
426 	min_addr = r2.base + r2.size;
427 	max_addr = r1.base;
428 
429 	memblock_reserve(r1.base, r1.size);
430 	memblock_reserve(r2.base, r2.size);
431 
432 	allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
433 						   min_addr, max_addr,
434 						   NUMA_NO_NODE);
435 
436 	ASSERT_NE(allocated_ptr, NULL);
437 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
438 
439 	ASSERT_EQ(rgn1->size, r1.size + r3_size);
440 	ASSERT_EQ(rgn1->base, max_addr - r3_size);
441 
442 	ASSERT_EQ(rgn2->size, r2.size);
443 	ASSERT_EQ(rgn2->base, r2.base);
444 
445 	ASSERT_EQ(memblock.reserved.cnt, 2);
446 	ASSERT_EQ(memblock.reserved.total_size, total_size);
447 
448 	test_pass_pop();
449 
450 	return 0;
451 }
452 
453 /*
454  * A test that tries to allocate memory within min_addr and max_add range, when
455  * there are two reserved regions at the borders, with a gap of a size equal to
456  * the size of the new region:
457  *
458  *                 +        +
459  *  |     +--------+--------+--------+     |
460  *  |     |   r2   |   r3   |   r1   |     |
461  *  +-----+--------+--------+--------+-----+
462  *                 ^        ^
463  *                 |        |
464  *                 min_addr max_addr
465  *
466  * Expect to merge all of the regions into one. The region counter and total
467  * size fields get updated.
468  */
469 static int alloc_try_nid_reserved_full_merge_generic_check(void)
470 {
471 	struct memblock_region *rgn = &memblock.reserved.regions[0];
472 	void *allocated_ptr = NULL;
473 	struct region r1, r2;
474 
475 	PREFIX_PUSH();
476 
477 	phys_addr_t r3_size = SZ_64;
478 	phys_addr_t total_size;
479 	phys_addr_t max_addr;
480 	phys_addr_t min_addr;
481 
482 	setup_memblock();
483 
484 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
485 	r1.size = SMP_CACHE_BYTES;
486 
487 	r2.size = SZ_128;
488 	r2.base = r1.base - (r3_size + r2.size);
489 
490 	total_size = r1.size + r2.size + r3_size;
491 	min_addr = r2.base + r2.size;
492 	max_addr = r1.base;
493 
494 	memblock_reserve(r1.base, r1.size);
495 	memblock_reserve(r2.base, r2.size);
496 
497 	allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
498 						   min_addr, max_addr,
499 						   NUMA_NO_NODE);
500 
501 	ASSERT_NE(allocated_ptr, NULL);
502 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
503 
504 	ASSERT_EQ(rgn->size, total_size);
505 	ASSERT_EQ(rgn->base, r2.base);
506 
507 	ASSERT_EQ(memblock.reserved.cnt, 1);
508 	ASSERT_EQ(memblock.reserved.total_size, total_size);
509 
510 	test_pass_pop();
511 
512 	return 0;
513 }
514 
515 /*
516  * A test that tries to allocate memory within min_addr and max_add range, when
517  * there are two reserved regions at the borders, with a gap that can't fit
518  * a new region:
519  *
520  *                       +    +
521  *  |  +----------+------+    +------+   |
522  *  |  |    r3    |  r2  |    |  r1  |   |
523  *  +--+----------+------+----+------+---+
524  *                       ^    ^
525  *                       |    |
526  *                       |    max_addr
527  *                       |
528  *                       min_addr
529  *
530  * Expect to merge the new region with r2. The second region does not get
531  * updated. The total size counter gets updated.
532  */
533 static int alloc_try_nid_top_down_reserved_no_space_check(void)
534 {
535 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
536 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
537 	void *allocated_ptr = NULL;
538 	struct region r1, r2;
539 
540 	PREFIX_PUSH();
541 
542 	phys_addr_t r3_size = SZ_256;
543 	phys_addr_t gap_size = SMP_CACHE_BYTES;
544 	phys_addr_t total_size;
545 	phys_addr_t max_addr;
546 	phys_addr_t min_addr;
547 
548 	setup_memblock();
549 
550 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
551 	r1.size = SMP_CACHE_BYTES;
552 
553 	r2.size = SZ_128;
554 	r2.base = r1.base - (r2.size + gap_size);
555 
556 	total_size = r1.size + r2.size + r3_size;
557 	min_addr = r2.base + r2.size;
558 	max_addr = r1.base;
559 
560 	memblock_reserve(r1.base, r1.size);
561 	memblock_reserve(r2.base, r2.size);
562 
563 	allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
564 						   min_addr, max_addr,
565 						   NUMA_NO_NODE);
566 
567 	ASSERT_NE(allocated_ptr, NULL);
568 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
569 
570 	ASSERT_EQ(rgn1->size, r1.size);
571 	ASSERT_EQ(rgn1->base, r1.base);
572 
573 	ASSERT_EQ(rgn2->size, r2.size + r3_size);
574 	ASSERT_EQ(rgn2->base, r2.base - r3_size);
575 
576 	ASSERT_EQ(memblock.reserved.cnt, 2);
577 	ASSERT_EQ(memblock.reserved.total_size, total_size);
578 
579 	test_pass_pop();
580 
581 	return 0;
582 }
583 
584 /*
585  * A test that tries to allocate memory within min_addr and max_add range, but
586  * it's too narrow and everything else is reserved:
587  *
588  *            +-----------+
589  *            |    new    |
590  *            +-----------+
591  *                 +      +
592  *  |--------------+      +----------|
593  *  |      r2      |      |    r1    |
594  *  +--------------+------+----------+
595  *                 ^      ^
596  *                 |      |
597  *                 |      max_addr
598  *                 |
599  *                 min_addr
600  *
601  * Expect no allocation to happen.
602  */
603 
604 static int alloc_try_nid_reserved_all_generic_check(void)
605 {
606 	void *allocated_ptr = NULL;
607 	struct region r1, r2;
608 
609 	PREFIX_PUSH();
610 
611 	phys_addr_t r3_size = SZ_256;
612 	phys_addr_t gap_size = SMP_CACHE_BYTES;
613 	phys_addr_t max_addr;
614 	phys_addr_t min_addr;
615 
616 	setup_memblock();
617 
618 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
619 	r1.size = SMP_CACHE_BYTES;
620 
621 	r2.size = MEM_SIZE - (r1.size + gap_size);
622 	r2.base = memblock_start_of_DRAM();
623 
624 	min_addr = r2.base + r2.size;
625 	max_addr = r1.base;
626 
627 	memblock_reserve(r1.base, r1.size);
628 	memblock_reserve(r2.base, r2.size);
629 
630 	allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
631 						   min_addr, max_addr,
632 						   NUMA_NO_NODE);
633 
634 	ASSERT_EQ(allocated_ptr, NULL);
635 
636 	test_pass_pop();
637 
638 	return 0;
639 }
640 
641 /*
642  * A test that tries to allocate a memory region, where max_addr is
643  * bigger than the end address of the available memory. Expect to allocate
644  * a cleared region that ends before the end of the memory.
645  */
646 static int alloc_try_nid_top_down_cap_max_check(void)
647 {
648 	struct memblock_region *rgn = &memblock.reserved.regions[0];
649 	void *allocated_ptr = NULL;
650 
651 	PREFIX_PUSH();
652 
653 	phys_addr_t size = SZ_256;
654 	phys_addr_t min_addr;
655 	phys_addr_t max_addr;
656 
657 	setup_memblock();
658 
659 	min_addr = memblock_end_of_DRAM() - SZ_1K;
660 	max_addr = memblock_end_of_DRAM() + SZ_256;
661 
662 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
663 						   min_addr, max_addr,
664 						   NUMA_NO_NODE);
665 
666 	ASSERT_NE(allocated_ptr, NULL);
667 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
668 
669 	ASSERT_EQ(rgn->size, size);
670 	ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
671 
672 	ASSERT_EQ(memblock.reserved.cnt, 1);
673 	ASSERT_EQ(memblock.reserved.total_size, size);
674 
675 	test_pass_pop();
676 
677 	return 0;
678 }
679 
680 /*
681  * A test that tries to allocate a memory region, where min_addr is
682  * smaller than the start address of the available memory. Expect to allocate
683  * a cleared region that ends before the end of the memory.
684  */
685 static int alloc_try_nid_top_down_cap_min_check(void)
686 {
687 	struct memblock_region *rgn = &memblock.reserved.regions[0];
688 	void *allocated_ptr = NULL;
689 
690 	PREFIX_PUSH();
691 
692 	phys_addr_t size = SZ_1K;
693 	phys_addr_t min_addr;
694 	phys_addr_t max_addr;
695 
696 	setup_memblock();
697 
698 	min_addr = memblock_start_of_DRAM() - SZ_256;
699 	max_addr = memblock_end_of_DRAM();
700 
701 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
702 						   min_addr, max_addr,
703 						   NUMA_NO_NODE);
704 
705 	ASSERT_NE(allocated_ptr, NULL);
706 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
707 
708 	ASSERT_EQ(rgn->size, size);
709 	ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
710 
711 	ASSERT_EQ(memblock.reserved.cnt, 1);
712 	ASSERT_EQ(memblock.reserved.total_size, size);
713 
714 	test_pass_pop();
715 
716 	return 0;
717 }
718 
719 /*
720  * A simple test that tries to allocate a memory region within min_addr and
721  * max_addr range:
722  *
723  *        +                       +
724  *   |    +-----------+           |      |
725  *   |    |    rgn    |           |      |
726  *   +----+-----------+-----------+------+
727  *        ^                       ^
728  *        |                       |
729  *        min_addr                max_addr
730  *
731  * Expect to allocate a cleared region that ends before max_addr.
732  */
733 static int alloc_try_nid_bottom_up_simple_check(void)
734 {
735 	struct memblock_region *rgn = &memblock.reserved.regions[0];
736 	void *allocated_ptr = NULL;
737 
738 	PREFIX_PUSH();
739 
740 	phys_addr_t size = SZ_128;
741 	phys_addr_t min_addr;
742 	phys_addr_t max_addr;
743 	phys_addr_t rgn_end;
744 
745 	setup_memblock();
746 
747 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
748 	max_addr = min_addr + SZ_512;
749 
750 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
751 						   min_addr, max_addr,
752 						   NUMA_NO_NODE);
753 	rgn_end = rgn->base + rgn->size;
754 
755 	ASSERT_NE(allocated_ptr, NULL);
756 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
757 
758 	ASSERT_EQ(rgn->size, size);
759 	ASSERT_EQ(rgn->base, min_addr);
760 	ASSERT_LT(rgn_end, max_addr);
761 
762 	ASSERT_EQ(memblock.reserved.cnt, 1);
763 	ASSERT_EQ(memblock.reserved.total_size, size);
764 
765 	test_pass_pop();
766 
767 	return 0;
768 }
769 
770 /*
771  * A simple test that tries to allocate a memory region within min_addr and
772  * max_addr range, where the start address is misaligned:
773  *
774  *        +                     +
775  *  |     +   +-----------+     +     |
776  *  |     |   |    rgn    |     |     |
777  *  +-----+---+-----------+-----+-----+
778  *        ^   ^----.            ^
779  *        |        |            |
780  *     min_add     |            max_addr
781  *                 |
782  *                 Aligned address
783  *                 boundary
784  *
785  * Expect to allocate a cleared, aligned region that ends before max_addr.
786  */
787 static int alloc_try_nid_bottom_up_start_misaligned_check(void)
788 {
789 	struct memblock_region *rgn = &memblock.reserved.regions[0];
790 	void *allocated_ptr = NULL;
791 
792 	PREFIX_PUSH();
793 
794 	phys_addr_t size = SZ_128;
795 	phys_addr_t misalign = SZ_2;
796 	phys_addr_t min_addr;
797 	phys_addr_t max_addr;
798 	phys_addr_t rgn_end;
799 
800 	setup_memblock();
801 
802 	min_addr = memblock_start_of_DRAM() + misalign;
803 	max_addr = min_addr + SZ_512;
804 
805 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
806 						   min_addr, max_addr,
807 						   NUMA_NO_NODE);
808 	rgn_end = rgn->base + rgn->size;
809 
810 	ASSERT_NE(allocated_ptr, NULL);
811 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
812 
813 	ASSERT_EQ(rgn->size, size);
814 	ASSERT_EQ(rgn->base, min_addr + (SMP_CACHE_BYTES - misalign));
815 	ASSERT_LT(rgn_end, max_addr);
816 
817 	ASSERT_EQ(memblock.reserved.cnt, 1);
818 	ASSERT_EQ(memblock.reserved.total_size, size);
819 
820 	test_pass_pop();
821 
822 	return 0;
823 }
824 
825 /*
826  * A test that tries to allocate a memory region, which can't fit into min_addr
827  * and max_addr range:
828  *
829  *                      +    +
830  *  |---------+         +    +      |
831  *  |   rgn   |         |    |      |
832  *  +---------+---------+----+------+
833  *                      ^    ^
834  *                      |    |
835  *                      |    max_addr
836  *                      |
837  *                      min_add
838  *
839  * Expect to drop the lower limit and allocate a cleared memory region which
840  * starts at the beginning of the available memory.
841  */
842 static int alloc_try_nid_bottom_up_narrow_range_check(void)
843 {
844 	struct memblock_region *rgn = &memblock.reserved.regions[0];
845 	void *allocated_ptr = NULL;
846 
847 	PREFIX_PUSH();
848 
849 	phys_addr_t size = SZ_256;
850 	phys_addr_t min_addr;
851 	phys_addr_t max_addr;
852 
853 	setup_memblock();
854 
855 	min_addr = memblock_start_of_DRAM() + SZ_512;
856 	max_addr = min_addr + SMP_CACHE_BYTES;
857 
858 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
859 						   min_addr, max_addr,
860 						   NUMA_NO_NODE);
861 
862 	ASSERT_NE(allocated_ptr, NULL);
863 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
864 
865 	ASSERT_EQ(rgn->size, size);
866 	ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
867 
868 	ASSERT_EQ(memblock.reserved.cnt, 1);
869 	ASSERT_EQ(memblock.reserved.total_size, size);
870 
871 	test_pass_pop();
872 
873 	return 0;
874 }
875 
876 /*
877  * A test that tries to allocate memory within min_addr and max_add range, when
878  * there are two reserved regions at the borders, with a gap big enough to fit
879  * a new region:
880  *
881  *                +           +
882  *  |    +--------+-------+   +------+  |
883  *  |    |   r2   |  rgn  |   |  r1  |  |
884  *  +----+--------+-------+---+------+--+
885  *                ^           ^
886  *                |           |
887  *                min_addr    max_addr
888  *
889  * Expect to merge the new region with r2. The second region does not get
890  * updated. The total size field gets updated.
891  */
892 
893 static int alloc_try_nid_bottom_up_reserved_with_space_check(void)
894 {
895 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
896 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
897 	void *allocated_ptr = NULL;
898 	struct region r1, r2;
899 
900 	PREFIX_PUSH();
901 
902 	phys_addr_t r3_size = SZ_64;
903 	phys_addr_t gap_size = SMP_CACHE_BYTES;
904 	phys_addr_t total_size;
905 	phys_addr_t max_addr;
906 	phys_addr_t min_addr;
907 
908 	setup_memblock();
909 
910 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
911 	r1.size = SMP_CACHE_BYTES;
912 
913 	r2.size = SZ_128;
914 	r2.base = r1.base - (r3_size + gap_size + r2.size);
915 
916 	total_size = r1.size + r2.size + r3_size;
917 	min_addr = r2.base + r2.size;
918 	max_addr = r1.base;
919 
920 	memblock_reserve(r1.base, r1.size);
921 	memblock_reserve(r2.base, r2.size);
922 
923 	allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
924 						   min_addr, max_addr,
925 						   NUMA_NO_NODE);
926 
927 	ASSERT_NE(allocated_ptr, NULL);
928 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
929 
930 	ASSERT_EQ(rgn1->size, r1.size);
931 	ASSERT_EQ(rgn1->base, max_addr);
932 
933 	ASSERT_EQ(rgn2->size, r2.size + r3_size);
934 	ASSERT_EQ(rgn2->base, r2.base);
935 
936 	ASSERT_EQ(memblock.reserved.cnt, 2);
937 	ASSERT_EQ(memblock.reserved.total_size, total_size);
938 
939 	test_pass_pop();
940 
941 	return 0;
942 }
943 
944 /*
945  * A test that tries to allocate memory within min_addr and max_add range, when
946  * there are two reserved regions at the borders, with a gap of a size equal to
947  * the size of the new region:
948  *
949  *                         +   +
950  *  |----------+    +------+   +----+  |
951  *  |    r3    |    |  r2  |   | r1 |  |
952  *  +----------+----+------+---+----+--+
953  *                         ^   ^
954  *                         |   |
955  *                         |  max_addr
956  *                         |
957  *                         min_addr
958  *
959  * Expect to drop the lower limit and allocate memory at the beginning of the
960  * available memory. The region counter and total size fields get updated.
961  * Other regions are not modified.
962  */
963 
964 static int alloc_try_nid_bottom_up_reserved_no_space_check(void)
965 {
966 	struct memblock_region *rgn1 = &memblock.reserved.regions[2];
967 	struct memblock_region *rgn2 = &memblock.reserved.regions[1];
968 	struct memblock_region *rgn3 = &memblock.reserved.regions[0];
969 	void *allocated_ptr = NULL;
970 	struct region r1, r2;
971 
972 	PREFIX_PUSH();
973 
974 	phys_addr_t r3_size = SZ_256;
975 	phys_addr_t gap_size = SMP_CACHE_BYTES;
976 	phys_addr_t total_size;
977 	phys_addr_t max_addr;
978 	phys_addr_t min_addr;
979 
980 	setup_memblock();
981 
982 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
983 	r1.size = SMP_CACHE_BYTES;
984 
985 	r2.size = SZ_128;
986 	r2.base = r1.base - (r2.size + gap_size);
987 
988 	total_size = r1.size + r2.size + r3_size;
989 	min_addr = r2.base + r2.size;
990 	max_addr = r1.base;
991 
992 	memblock_reserve(r1.base, r1.size);
993 	memblock_reserve(r2.base, r2.size);
994 
995 	allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
996 						   min_addr, max_addr,
997 						   NUMA_NO_NODE);
998 
999 	ASSERT_NE(allocated_ptr, NULL);
1000 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
1001 
1002 	ASSERT_EQ(rgn3->size, r3_size);
1003 	ASSERT_EQ(rgn3->base, memblock_start_of_DRAM());
1004 
1005 	ASSERT_EQ(rgn2->size, r2.size);
1006 	ASSERT_EQ(rgn2->base, r2.base);
1007 
1008 	ASSERT_EQ(rgn1->size, r1.size);
1009 	ASSERT_EQ(rgn1->base, r1.base);
1010 
1011 	ASSERT_EQ(memblock.reserved.cnt, 3);
1012 	ASSERT_EQ(memblock.reserved.total_size, total_size);
1013 
1014 	test_pass_pop();
1015 
1016 	return 0;
1017 }
1018 
1019 /*
1020  * A test that tries to allocate a memory region, where max_addr is
1021  * bigger than the end address of the available memory. Expect to allocate
1022  * a cleared region that starts at the min_addr
1023  */
1024 static int alloc_try_nid_bottom_up_cap_max_check(void)
1025 {
1026 	struct memblock_region *rgn = &memblock.reserved.regions[0];
1027 	void *allocated_ptr = NULL;
1028 
1029 	PREFIX_PUSH();
1030 
1031 	phys_addr_t size = SZ_256;
1032 	phys_addr_t min_addr;
1033 	phys_addr_t max_addr;
1034 
1035 	setup_memblock();
1036 
1037 	min_addr = memblock_start_of_DRAM() + SZ_1K;
1038 	max_addr = memblock_end_of_DRAM() + SZ_256;
1039 
1040 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1041 						   min_addr, max_addr,
1042 						   NUMA_NO_NODE);
1043 
1044 	ASSERT_NE(allocated_ptr, NULL);
1045 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1046 
1047 	ASSERT_EQ(rgn->size, size);
1048 	ASSERT_EQ(rgn->base, min_addr);
1049 
1050 	ASSERT_EQ(memblock.reserved.cnt, 1);
1051 	ASSERT_EQ(memblock.reserved.total_size, size);
1052 
1053 	test_pass_pop();
1054 
1055 	return 0;
1056 }
1057 
1058 /*
1059  * A test that tries to allocate a memory region, where min_addr is
1060  * smaller than the start address of the available memory. Expect to allocate
1061  * a cleared region at the beginning of the available memory.
1062  */
1063 static int alloc_try_nid_bottom_up_cap_min_check(void)
1064 {
1065 	struct memblock_region *rgn = &memblock.reserved.regions[0];
1066 	void *allocated_ptr = NULL;
1067 
1068 	PREFIX_PUSH();
1069 
1070 	phys_addr_t size = SZ_1K;
1071 	phys_addr_t min_addr;
1072 	phys_addr_t max_addr;
1073 
1074 	setup_memblock();
1075 
1076 	min_addr = memblock_start_of_DRAM();
1077 	max_addr = memblock_end_of_DRAM() - SZ_256;
1078 
1079 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1080 						   min_addr, max_addr,
1081 						   NUMA_NO_NODE);
1082 
1083 	ASSERT_NE(allocated_ptr, NULL);
1084 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1085 
1086 	ASSERT_EQ(rgn->size, size);
1087 	ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
1088 
1089 	ASSERT_EQ(memblock.reserved.cnt, 1);
1090 	ASSERT_EQ(memblock.reserved.total_size, size);
1091 
1092 	test_pass_pop();
1093 
1094 	return 0;
1095 }
1096 
1097 /* Test case wrappers */
1098 static int alloc_try_nid_simple_check(void)
1099 {
1100 	test_print("\tRunning %s...\n", __func__);
1101 	memblock_set_bottom_up(false);
1102 	alloc_try_nid_top_down_simple_check();
1103 	memblock_set_bottom_up(true);
1104 	alloc_try_nid_bottom_up_simple_check();
1105 
1106 	return 0;
1107 }
1108 
1109 static int alloc_try_nid_misaligned_check(void)
1110 {
1111 	test_print("\tRunning %s...\n", __func__);
1112 	memblock_set_bottom_up(false);
1113 	alloc_try_nid_top_down_end_misaligned_check();
1114 	memblock_set_bottom_up(true);
1115 	alloc_try_nid_bottom_up_start_misaligned_check();
1116 
1117 	return 0;
1118 }
1119 
1120 static int alloc_try_nid_narrow_range_check(void)
1121 {
1122 	test_print("\tRunning %s...\n", __func__);
1123 	memblock_set_bottom_up(false);
1124 	alloc_try_nid_top_down_narrow_range_check();
1125 	memblock_set_bottom_up(true);
1126 	alloc_try_nid_bottom_up_narrow_range_check();
1127 
1128 	return 0;
1129 }
1130 
1131 static int alloc_try_nid_reserved_with_space_check(void)
1132 {
1133 	test_print("\tRunning %s...\n", __func__);
1134 	memblock_set_bottom_up(false);
1135 	alloc_try_nid_top_down_reserved_with_space_check();
1136 	memblock_set_bottom_up(true);
1137 	alloc_try_nid_bottom_up_reserved_with_space_check();
1138 
1139 	return 0;
1140 }
1141 
1142 static int alloc_try_nid_reserved_no_space_check(void)
1143 {
1144 	test_print("\tRunning %s...\n", __func__);
1145 	memblock_set_bottom_up(false);
1146 	alloc_try_nid_top_down_reserved_no_space_check();
1147 	memblock_set_bottom_up(true);
1148 	alloc_try_nid_bottom_up_reserved_no_space_check();
1149 
1150 	return 0;
1151 }
1152 
1153 static int alloc_try_nid_cap_max_check(void)
1154 {
1155 	test_print("\tRunning %s...\n", __func__);
1156 	memblock_set_bottom_up(false);
1157 	alloc_try_nid_top_down_cap_max_check();
1158 	memblock_set_bottom_up(true);
1159 	alloc_try_nid_bottom_up_cap_max_check();
1160 
1161 	return 0;
1162 }
1163 
1164 static int alloc_try_nid_cap_min_check(void)
1165 {
1166 	test_print("\tRunning %s...\n", __func__);
1167 	memblock_set_bottom_up(false);
1168 	alloc_try_nid_top_down_cap_min_check();
1169 	memblock_set_bottom_up(true);
1170 	alloc_try_nid_bottom_up_cap_min_check();
1171 
1172 	return 0;
1173 }
1174 
1175 static int alloc_try_nid_min_reserved_check(void)
1176 {
1177 	test_print("\tRunning %s...\n", __func__);
1178 	run_top_down(alloc_try_nid_min_reserved_generic_check);
1179 	run_bottom_up(alloc_try_nid_min_reserved_generic_check);
1180 
1181 	return 0;
1182 }
1183 
1184 static int alloc_try_nid_max_reserved_check(void)
1185 {
1186 	test_print("\tRunning %s...\n", __func__);
1187 	run_top_down(alloc_try_nid_max_reserved_generic_check);
1188 	run_bottom_up(alloc_try_nid_max_reserved_generic_check);
1189 
1190 	return 0;
1191 }
1192 
1193 static int alloc_try_nid_exact_address_check(void)
1194 {
1195 	test_print("\tRunning %s...\n", __func__);
1196 	run_top_down(alloc_try_nid_exact_address_generic_check);
1197 	run_bottom_up(alloc_try_nid_exact_address_generic_check);
1198 
1199 	return 0;
1200 }
1201 
1202 static int alloc_try_nid_reserved_full_merge_check(void)
1203 {
1204 	test_print("\tRunning %s...\n", __func__);
1205 	run_top_down(alloc_try_nid_reserved_full_merge_generic_check);
1206 	run_bottom_up(alloc_try_nid_reserved_full_merge_generic_check);
1207 
1208 	return 0;
1209 }
1210 
1211 static int alloc_try_nid_reserved_all_check(void)
1212 {
1213 	test_print("\tRunning %s...\n", __func__);
1214 	run_top_down(alloc_try_nid_reserved_all_generic_check);
1215 	run_bottom_up(alloc_try_nid_reserved_all_generic_check);
1216 
1217 	return 0;
1218 }
1219 
1220 static int alloc_try_nid_low_max_check(void)
1221 {
1222 	test_print("\tRunning %s...\n", __func__);
1223 	run_top_down(alloc_try_nid_low_max_generic_check);
1224 	run_bottom_up(alloc_try_nid_low_max_generic_check);
1225 
1226 	return 0;
1227 }
1228 
1229 static int memblock_alloc_nid_checks_internal(int flags)
1230 {
1231 	const char *func = get_memblock_alloc_try_nid_name(flags);
1232 
1233 	alloc_nid_test_flags = flags;
1234 	prefix_reset();
1235 	prefix_push(func);
1236 	test_print("Running %s tests...\n", func);
1237 
1238 	reset_memblock_attributes();
1239 	dummy_physical_memory_init();
1240 
1241 	alloc_try_nid_simple_check();
1242 	alloc_try_nid_misaligned_check();
1243 	alloc_try_nid_narrow_range_check();
1244 	alloc_try_nid_reserved_with_space_check();
1245 	alloc_try_nid_reserved_no_space_check();
1246 	alloc_try_nid_cap_max_check();
1247 	alloc_try_nid_cap_min_check();
1248 
1249 	alloc_try_nid_min_reserved_check();
1250 	alloc_try_nid_max_reserved_check();
1251 	alloc_try_nid_exact_address_check();
1252 	alloc_try_nid_reserved_full_merge_check();
1253 	alloc_try_nid_reserved_all_check();
1254 	alloc_try_nid_low_max_check();
1255 
1256 	dummy_physical_memory_cleanup();
1257 
1258 	prefix_pop();
1259 
1260 	return 0;
1261 }
1262 
1263 int memblock_alloc_nid_checks(void)
1264 {
1265 	memblock_alloc_nid_checks_internal(TEST_F_NONE);
1266 	memblock_alloc_nid_checks_internal(TEST_F_RAW);
1267 
1268 	return 0;
1269 }
1270