xref: /linux/tools/testing/memblock/tests/alloc_nid_api.c (revision 50c80241f15890a64b9302187faaeb7cfe78b4b8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include "alloc_nid_api.h"
3 
4 static int alloc_nid_test_flags = TEST_F_NONE;
5 
6 /*
7  * contains the fraction of MEM_SIZE contained in each node in basis point
8  * units (one hundredth of 1% or 1/10000)
9  */
10 static const unsigned int node_fractions[] = {
11 	2500, /* 1/4  */
12 	 625, /* 1/16 */
13 	1250, /* 1/8  */
14 	1250, /* 1/8  */
15 	 625, /* 1/16 */
16 	 625, /* 1/16 */
17 	2500, /* 1/4  */
18 	 625, /* 1/16 */
19 };
20 
21 static inline const char * const get_memblock_alloc_try_nid_name(int flags)
22 {
23 	if (flags & TEST_F_RAW)
24 		return "memblock_alloc_try_nid_raw";
25 	return "memblock_alloc_try_nid";
26 }
27 
28 static inline void *run_memblock_alloc_try_nid(phys_addr_t size,
29 					       phys_addr_t align,
30 					       phys_addr_t min_addr,
31 					       phys_addr_t max_addr, int nid)
32 {
33 	if (alloc_nid_test_flags & TEST_F_RAW)
34 		return memblock_alloc_try_nid_raw(size, align, min_addr,
35 						  max_addr, nid);
36 	return memblock_alloc_try_nid(size, align, min_addr, max_addr, nid);
37 }
38 
39 /*
40  * A simple test that tries to allocate a memory region within min_addr and
41  * max_addr range:
42  *
43  *        +                   +
44  *   |    +       +-----------+      |
45  *   |    |       |    rgn    |      |
46  *   +----+-------+-----------+------+
47  *        ^                   ^
48  *        |                   |
49  *        min_addr           max_addr
50  *
51  * Expect to allocate a region that ends at max_addr.
52  */
53 static int alloc_try_nid_top_down_simple_check(void)
54 {
55 	struct memblock_region *rgn = &memblock.reserved.regions[0];
56 	void *allocated_ptr = NULL;
57 	phys_addr_t size = SZ_128;
58 	phys_addr_t min_addr;
59 	phys_addr_t max_addr;
60 	phys_addr_t rgn_end;
61 
62 	PREFIX_PUSH();
63 	setup_memblock();
64 
65 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
66 	max_addr = min_addr + SZ_512;
67 
68 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
69 						   min_addr, max_addr,
70 						   NUMA_NO_NODE);
71 	rgn_end = rgn->base + rgn->size;
72 
73 	ASSERT_NE(allocated_ptr, NULL);
74 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
75 
76 	ASSERT_EQ(rgn->size, size);
77 	ASSERT_EQ(rgn->base, max_addr - size);
78 	ASSERT_EQ(rgn_end, max_addr);
79 
80 	ASSERT_EQ(memblock.reserved.cnt, 1);
81 	ASSERT_EQ(memblock.reserved.total_size, size);
82 
83 	test_pass_pop();
84 
85 	return 0;
86 }
87 
88 /*
89  * A simple test that tries to allocate a memory region within min_addr and
90  * max_addr range, where the end address is misaligned:
91  *
92  *         +       +            +
93  *  |      +       +---------+  +    |
94  *  |      |       |   rgn   |  |    |
95  *  +------+-------+---------+--+----+
96  *         ^       ^            ^
97  *         |       |            |
98  *       min_add   |            max_addr
99  *                 |
100  *                 Aligned address
101  *                 boundary
102  *
103  * Expect to allocate an aligned region that ends before max_addr.
104  */
105 static int alloc_try_nid_top_down_end_misaligned_check(void)
106 {
107 	struct memblock_region *rgn = &memblock.reserved.regions[0];
108 	void *allocated_ptr = NULL;
109 	phys_addr_t size = SZ_128;
110 	phys_addr_t misalign = SZ_2;
111 	phys_addr_t min_addr;
112 	phys_addr_t max_addr;
113 	phys_addr_t rgn_end;
114 
115 	PREFIX_PUSH();
116 	setup_memblock();
117 
118 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
119 	max_addr = min_addr + SZ_512 + misalign;
120 
121 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
122 						   min_addr, max_addr,
123 						   NUMA_NO_NODE);
124 	rgn_end = rgn->base + rgn->size;
125 
126 	ASSERT_NE(allocated_ptr, NULL);
127 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
128 
129 	ASSERT_EQ(rgn->size, size);
130 	ASSERT_EQ(rgn->base, max_addr - size - misalign);
131 	ASSERT_LT(rgn_end, max_addr);
132 
133 	ASSERT_EQ(memblock.reserved.cnt, 1);
134 	ASSERT_EQ(memblock.reserved.total_size, size);
135 
136 	test_pass_pop();
137 
138 	return 0;
139 }
140 
141 /*
142  * A simple test that tries to allocate a memory region, which spans over the
143  * min_addr and max_addr range:
144  *
145  *         +               +
146  *  |      +---------------+       |
147  *  |      |      rgn      |       |
148  *  +------+---------------+-------+
149  *         ^               ^
150  *         |               |
151  *         min_addr        max_addr
152  *
153  * Expect to allocate a region that starts at min_addr and ends at
154  * max_addr, given that min_addr is aligned.
155  */
156 static int alloc_try_nid_exact_address_generic_check(void)
157 {
158 	struct memblock_region *rgn = &memblock.reserved.regions[0];
159 	void *allocated_ptr = NULL;
160 	phys_addr_t size = SZ_1K;
161 	phys_addr_t min_addr;
162 	phys_addr_t max_addr;
163 	phys_addr_t rgn_end;
164 
165 	PREFIX_PUSH();
166 	setup_memblock();
167 
168 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES;
169 	max_addr = min_addr + size;
170 
171 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
172 						   min_addr, max_addr,
173 						   NUMA_NO_NODE);
174 	rgn_end = rgn->base + rgn->size;
175 
176 	ASSERT_NE(allocated_ptr, NULL);
177 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
178 
179 	ASSERT_EQ(rgn->size, size);
180 	ASSERT_EQ(rgn->base, min_addr);
181 	ASSERT_EQ(rgn_end, max_addr);
182 
183 	ASSERT_EQ(memblock.reserved.cnt, 1);
184 	ASSERT_EQ(memblock.reserved.total_size, size);
185 
186 	test_pass_pop();
187 
188 	return 0;
189 }
190 
191 /*
192  * A test that tries to allocate a memory region, which can't fit into
193  * min_addr and max_addr range:
194  *
195  *           +          +     +
196  *  |        +----------+-----+    |
197  *  |        |   rgn    +     |    |
198  *  +--------+----------+-----+----+
199  *           ^          ^     ^
200  *           |          |     |
201  *           Aligned    |    max_addr
202  *           address    |
203  *           boundary   min_add
204  *
205  * Expect to drop the lower limit and allocate a memory region which
206  * ends at max_addr (if the address is aligned).
207  */
208 static int alloc_try_nid_top_down_narrow_range_check(void)
209 {
210 	struct memblock_region *rgn = &memblock.reserved.regions[0];
211 	void *allocated_ptr = NULL;
212 	phys_addr_t size = SZ_256;
213 	phys_addr_t min_addr;
214 	phys_addr_t max_addr;
215 
216 	PREFIX_PUSH();
217 	setup_memblock();
218 
219 	min_addr = memblock_start_of_DRAM() + SZ_512;
220 	max_addr = min_addr + SMP_CACHE_BYTES;
221 
222 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
223 						   min_addr, max_addr,
224 						   NUMA_NO_NODE);
225 
226 	ASSERT_NE(allocated_ptr, NULL);
227 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
228 
229 	ASSERT_EQ(rgn->size, size);
230 	ASSERT_EQ(rgn->base, max_addr - size);
231 
232 	ASSERT_EQ(memblock.reserved.cnt, 1);
233 	ASSERT_EQ(memblock.reserved.total_size, size);
234 
235 	test_pass_pop();
236 
237 	return 0;
238 }
239 
240 /*
241  * A test that tries to allocate a memory region, which can't fit into
242  * min_addr and max_addr range, with the latter being too close to the beginning
243  * of the available memory:
244  *
245  *   +-------------+
246  *   |     new     |
247  *   +-------------+
248  *         +       +
249  *         |       +              |
250  *         |       |              |
251  *         +-------+--------------+
252  *         ^       ^
253  *         |       |
254  *         |       max_addr
255  *         |
256  *         min_addr
257  *
258  * Expect no allocation to happen.
259  */
260 static int alloc_try_nid_low_max_generic_check(void)
261 {
262 	void *allocated_ptr = NULL;
263 	phys_addr_t size = SZ_1K;
264 	phys_addr_t min_addr;
265 	phys_addr_t max_addr;
266 
267 	PREFIX_PUSH();
268 	setup_memblock();
269 
270 	min_addr = memblock_start_of_DRAM();
271 	max_addr = min_addr + SMP_CACHE_BYTES;
272 
273 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
274 						   min_addr, max_addr,
275 						   NUMA_NO_NODE);
276 
277 	ASSERT_EQ(allocated_ptr, NULL);
278 
279 	test_pass_pop();
280 
281 	return 0;
282 }
283 
284 /*
285  * A test that tries to allocate a memory region within min_addr min_addr range,
286  * with min_addr being so close that it's next to an allocated region:
287  *
288  *          +                        +
289  *  |       +--------+---------------|
290  *  |       |   r1   |      rgn      |
291  *  +-------+--------+---------------+
292  *          ^                        ^
293  *          |                        |
294  *          min_addr                 max_addr
295  *
296  * Expect a merge of both regions. Only the region size gets updated.
297  */
298 static int alloc_try_nid_min_reserved_generic_check(void)
299 {
300 	struct memblock_region *rgn = &memblock.reserved.regions[0];
301 	void *allocated_ptr = NULL;
302 	phys_addr_t r1_size = SZ_128;
303 	phys_addr_t r2_size = SZ_64;
304 	phys_addr_t total_size = r1_size + r2_size;
305 	phys_addr_t min_addr;
306 	phys_addr_t max_addr;
307 	phys_addr_t reserved_base;
308 
309 	PREFIX_PUSH();
310 	setup_memblock();
311 
312 	max_addr = memblock_end_of_DRAM();
313 	min_addr = max_addr - r2_size;
314 	reserved_base = min_addr - r1_size;
315 
316 	memblock_reserve(reserved_base, r1_size);
317 
318 	allocated_ptr = run_memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES,
319 						   min_addr, max_addr,
320 						   NUMA_NO_NODE);
321 
322 	ASSERT_NE(allocated_ptr, NULL);
323 	assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags);
324 
325 	ASSERT_EQ(rgn->size, total_size);
326 	ASSERT_EQ(rgn->base, reserved_base);
327 
328 	ASSERT_EQ(memblock.reserved.cnt, 1);
329 	ASSERT_EQ(memblock.reserved.total_size, total_size);
330 
331 	test_pass_pop();
332 
333 	return 0;
334 }
335 
336 /*
337  * A test that tries to allocate a memory region within min_addr and max_addr,
338  * with max_addr being so close that it's next to an allocated region:
339  *
340  *             +             +
341  *  |          +-------------+--------|
342  *  |          |     rgn     |   r1   |
343  *  +----------+-------------+--------+
344  *             ^             ^
345  *             |             |
346  *             min_addr      max_addr
347  *
348  * Expect a merge of regions. Only the region size gets updated.
349  */
350 static int alloc_try_nid_max_reserved_generic_check(void)
351 {
352 	struct memblock_region *rgn = &memblock.reserved.regions[0];
353 	void *allocated_ptr = NULL;
354 	phys_addr_t r1_size = SZ_64;
355 	phys_addr_t r2_size = SZ_128;
356 	phys_addr_t total_size = r1_size + r2_size;
357 	phys_addr_t min_addr;
358 	phys_addr_t max_addr;
359 
360 	PREFIX_PUSH();
361 	setup_memblock();
362 
363 	max_addr = memblock_end_of_DRAM() - r1_size;
364 	min_addr = max_addr - r2_size;
365 
366 	memblock_reserve(max_addr, r1_size);
367 
368 	allocated_ptr = run_memblock_alloc_try_nid(r2_size, SMP_CACHE_BYTES,
369 						   min_addr, max_addr,
370 						   NUMA_NO_NODE);
371 
372 	ASSERT_NE(allocated_ptr, NULL);
373 	assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags);
374 
375 	ASSERT_EQ(rgn->size, total_size);
376 	ASSERT_EQ(rgn->base, min_addr);
377 
378 	ASSERT_EQ(memblock.reserved.cnt, 1);
379 	ASSERT_EQ(memblock.reserved.total_size, total_size);
380 
381 	test_pass_pop();
382 
383 	return 0;
384 }
385 
386 /*
387  * A test that tries to allocate memory within min_addr and max_add range, when
388  * there are two reserved regions at the borders, with a gap big enough to fit
389  * a new region:
390  *
391  *                +           +
392  *  |    +--------+   +-------+------+  |
393  *  |    |   r2   |   |  rgn  |  r1  |  |
394  *  +----+--------+---+-------+------+--+
395  *                ^           ^
396  *                |           |
397  *                min_addr    max_addr
398  *
399  * Expect to merge the new region with r1. The second region does not get
400  * updated. The total size field gets updated.
401  */
402 
403 static int alloc_try_nid_top_down_reserved_with_space_check(void)
404 {
405 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
406 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
407 	void *allocated_ptr = NULL;
408 	struct region r1, r2;
409 	phys_addr_t r3_size = SZ_64;
410 	phys_addr_t gap_size = SMP_CACHE_BYTES;
411 	phys_addr_t total_size;
412 	phys_addr_t max_addr;
413 	phys_addr_t min_addr;
414 
415 	PREFIX_PUSH();
416 	setup_memblock();
417 
418 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
419 	r1.size = SMP_CACHE_BYTES;
420 
421 	r2.size = SZ_128;
422 	r2.base = r1.base - (r3_size + gap_size + r2.size);
423 
424 	total_size = r1.size + r2.size + r3_size;
425 	min_addr = r2.base + r2.size;
426 	max_addr = r1.base;
427 
428 	memblock_reserve(r1.base, r1.size);
429 	memblock_reserve(r2.base, r2.size);
430 
431 	allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
432 						   min_addr, max_addr,
433 						   NUMA_NO_NODE);
434 
435 	ASSERT_NE(allocated_ptr, NULL);
436 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
437 
438 	ASSERT_EQ(rgn1->size, r1.size + r3_size);
439 	ASSERT_EQ(rgn1->base, max_addr - r3_size);
440 
441 	ASSERT_EQ(rgn2->size, r2.size);
442 	ASSERT_EQ(rgn2->base, r2.base);
443 
444 	ASSERT_EQ(memblock.reserved.cnt, 2);
445 	ASSERT_EQ(memblock.reserved.total_size, total_size);
446 
447 	test_pass_pop();
448 
449 	return 0;
450 }
451 
452 /*
453  * A test that tries to allocate memory within min_addr and max_add range, when
454  * there are two reserved regions at the borders, with a gap of a size equal to
455  * the size of the new region:
456  *
457  *                 +        +
458  *  |     +--------+--------+--------+     |
459  *  |     |   r2   |   r3   |   r1   |     |
460  *  +-----+--------+--------+--------+-----+
461  *                 ^        ^
462  *                 |        |
463  *                 min_addr max_addr
464  *
465  * Expect to merge all of the regions into one. The region counter and total
466  * size fields get updated.
467  */
468 static int alloc_try_nid_reserved_full_merge_generic_check(void)
469 {
470 	struct memblock_region *rgn = &memblock.reserved.regions[0];
471 	void *allocated_ptr = NULL;
472 	struct region r1, r2;
473 	phys_addr_t r3_size = SZ_64;
474 	phys_addr_t total_size;
475 	phys_addr_t max_addr;
476 	phys_addr_t min_addr;
477 
478 	PREFIX_PUSH();
479 	setup_memblock();
480 
481 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
482 	r1.size = SMP_CACHE_BYTES;
483 
484 	r2.size = SZ_128;
485 	r2.base = r1.base - (r3_size + r2.size);
486 
487 	total_size = r1.size + r2.size + r3_size;
488 	min_addr = r2.base + r2.size;
489 	max_addr = r1.base;
490 
491 	memblock_reserve(r1.base, r1.size);
492 	memblock_reserve(r2.base, r2.size);
493 
494 	allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
495 						   min_addr, max_addr,
496 						   NUMA_NO_NODE);
497 
498 	ASSERT_NE(allocated_ptr, NULL);
499 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
500 
501 	ASSERT_EQ(rgn->size, total_size);
502 	ASSERT_EQ(rgn->base, r2.base);
503 
504 	ASSERT_EQ(memblock.reserved.cnt, 1);
505 	ASSERT_EQ(memblock.reserved.total_size, total_size);
506 
507 	test_pass_pop();
508 
509 	return 0;
510 }
511 
512 /*
513  * A test that tries to allocate memory within min_addr and max_add range, when
514  * there are two reserved regions at the borders, with a gap that can't fit
515  * a new region:
516  *
517  *                       +    +
518  *  |  +----------+------+    +------+   |
519  *  |  |    r3    |  r2  |    |  r1  |   |
520  *  +--+----------+------+----+------+---+
521  *                       ^    ^
522  *                       |    |
523  *                       |    max_addr
524  *                       |
525  *                       min_addr
526  *
527  * Expect to merge the new region with r2. The second region does not get
528  * updated. The total size counter gets updated.
529  */
530 static int alloc_try_nid_top_down_reserved_no_space_check(void)
531 {
532 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
533 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
534 	void *allocated_ptr = NULL;
535 	struct region r1, r2;
536 	phys_addr_t r3_size = SZ_256;
537 	phys_addr_t gap_size = SMP_CACHE_BYTES;
538 	phys_addr_t total_size;
539 	phys_addr_t max_addr;
540 	phys_addr_t min_addr;
541 
542 	PREFIX_PUSH();
543 	setup_memblock();
544 
545 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
546 	r1.size = SMP_CACHE_BYTES;
547 
548 	r2.size = SZ_128;
549 	r2.base = r1.base - (r2.size + gap_size);
550 
551 	total_size = r1.size + r2.size + r3_size;
552 	min_addr = r2.base + r2.size;
553 	max_addr = r1.base;
554 
555 	memblock_reserve(r1.base, r1.size);
556 	memblock_reserve(r2.base, r2.size);
557 
558 	allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
559 						   min_addr, max_addr,
560 						   NUMA_NO_NODE);
561 
562 	ASSERT_NE(allocated_ptr, NULL);
563 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
564 
565 	ASSERT_EQ(rgn1->size, r1.size);
566 	ASSERT_EQ(rgn1->base, r1.base);
567 
568 	ASSERT_EQ(rgn2->size, r2.size + r3_size);
569 	ASSERT_EQ(rgn2->base, r2.base - r3_size);
570 
571 	ASSERT_EQ(memblock.reserved.cnt, 2);
572 	ASSERT_EQ(memblock.reserved.total_size, total_size);
573 
574 	test_pass_pop();
575 
576 	return 0;
577 }
578 
579 /*
580  * A test that tries to allocate memory within min_addr and max_add range, but
581  * it's too narrow and everything else is reserved:
582  *
583  *            +-----------+
584  *            |    new    |
585  *            +-----------+
586  *                 +      +
587  *  |--------------+      +----------|
588  *  |      r2      |      |    r1    |
589  *  +--------------+------+----------+
590  *                 ^      ^
591  *                 |      |
592  *                 |      max_addr
593  *                 |
594  *                 min_addr
595  *
596  * Expect no allocation to happen.
597  */
598 
599 static int alloc_try_nid_reserved_all_generic_check(void)
600 {
601 	void *allocated_ptr = NULL;
602 	struct region r1, r2;
603 	phys_addr_t r3_size = SZ_256;
604 	phys_addr_t gap_size = SMP_CACHE_BYTES;
605 	phys_addr_t max_addr;
606 	phys_addr_t min_addr;
607 
608 	PREFIX_PUSH();
609 	setup_memblock();
610 
611 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
612 	r1.size = SMP_CACHE_BYTES;
613 
614 	r2.size = MEM_SIZE - (r1.size + gap_size);
615 	r2.base = memblock_start_of_DRAM();
616 
617 	min_addr = r2.base + r2.size;
618 	max_addr = r1.base;
619 
620 	memblock_reserve(r1.base, r1.size);
621 	memblock_reserve(r2.base, r2.size);
622 
623 	allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
624 						   min_addr, max_addr,
625 						   NUMA_NO_NODE);
626 
627 	ASSERT_EQ(allocated_ptr, NULL);
628 
629 	test_pass_pop();
630 
631 	return 0;
632 }
633 
634 /*
635  * A test that tries to allocate a memory region, where max_addr is
636  * bigger than the end address of the available memory. Expect to allocate
637  * a region that ends before the end of the memory.
638  */
639 static int alloc_try_nid_top_down_cap_max_check(void)
640 {
641 	struct memblock_region *rgn = &memblock.reserved.regions[0];
642 	void *allocated_ptr = NULL;
643 	phys_addr_t size = SZ_256;
644 	phys_addr_t min_addr;
645 	phys_addr_t max_addr;
646 
647 	PREFIX_PUSH();
648 	setup_memblock();
649 
650 	min_addr = memblock_end_of_DRAM() - SZ_1K;
651 	max_addr = memblock_end_of_DRAM() + SZ_256;
652 
653 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
654 						   min_addr, max_addr,
655 						   NUMA_NO_NODE);
656 
657 	ASSERT_NE(allocated_ptr, NULL);
658 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
659 
660 	ASSERT_EQ(rgn->size, size);
661 	ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
662 
663 	ASSERT_EQ(memblock.reserved.cnt, 1);
664 	ASSERT_EQ(memblock.reserved.total_size, size);
665 
666 	test_pass_pop();
667 
668 	return 0;
669 }
670 
671 /*
672  * A test that tries to allocate a memory region, where min_addr is
673  * smaller than the start address of the available memory. Expect to allocate
674  * a region that ends before the end of the memory.
675  */
676 static int alloc_try_nid_top_down_cap_min_check(void)
677 {
678 	struct memblock_region *rgn = &memblock.reserved.regions[0];
679 	void *allocated_ptr = NULL;
680 	phys_addr_t size = SZ_1K;
681 	phys_addr_t min_addr;
682 	phys_addr_t max_addr;
683 
684 	PREFIX_PUSH();
685 	setup_memblock();
686 
687 	min_addr = memblock_start_of_DRAM() - SZ_256;
688 	max_addr = memblock_end_of_DRAM();
689 
690 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
691 						   min_addr, max_addr,
692 						   NUMA_NO_NODE);
693 
694 	ASSERT_NE(allocated_ptr, NULL);
695 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
696 
697 	ASSERT_EQ(rgn->size, size);
698 	ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
699 
700 	ASSERT_EQ(memblock.reserved.cnt, 1);
701 	ASSERT_EQ(memblock.reserved.total_size, size);
702 
703 	test_pass_pop();
704 
705 	return 0;
706 }
707 
708 /*
709  * A simple test that tries to allocate a memory region within min_addr and
710  * max_addr range:
711  *
712  *        +                       +
713  *   |    +-----------+           |      |
714  *   |    |    rgn    |           |      |
715  *   +----+-----------+-----------+------+
716  *        ^                       ^
717  *        |                       |
718  *        min_addr                max_addr
719  *
720  * Expect to allocate a region that ends before max_addr.
721  */
722 static int alloc_try_nid_bottom_up_simple_check(void)
723 {
724 	struct memblock_region *rgn = &memblock.reserved.regions[0];
725 	void *allocated_ptr = NULL;
726 	phys_addr_t size = SZ_128;
727 	phys_addr_t min_addr;
728 	phys_addr_t max_addr;
729 	phys_addr_t rgn_end;
730 
731 	PREFIX_PUSH();
732 	setup_memblock();
733 
734 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
735 	max_addr = min_addr + SZ_512;
736 
737 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
738 						   min_addr, max_addr,
739 						   NUMA_NO_NODE);
740 	rgn_end = rgn->base + rgn->size;
741 
742 	ASSERT_NE(allocated_ptr, NULL);
743 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
744 
745 	ASSERT_EQ(rgn->size, size);
746 	ASSERT_EQ(rgn->base, min_addr);
747 	ASSERT_LT(rgn_end, max_addr);
748 
749 	ASSERT_EQ(memblock.reserved.cnt, 1);
750 	ASSERT_EQ(memblock.reserved.total_size, size);
751 
752 	test_pass_pop();
753 
754 	return 0;
755 }
756 
757 /*
758  * A simple test that tries to allocate a memory region within min_addr and
759  * max_addr range, where the start address is misaligned:
760  *
761  *        +                     +
762  *  |     +   +-----------+     +     |
763  *  |     |   |    rgn    |     |     |
764  *  +-----+---+-----------+-----+-----+
765  *        ^   ^----.            ^
766  *        |        |            |
767  *     min_add     |            max_addr
768  *                 |
769  *                 Aligned address
770  *                 boundary
771  *
772  * Expect to allocate an aligned region that ends before max_addr.
773  */
774 static int alloc_try_nid_bottom_up_start_misaligned_check(void)
775 {
776 	struct memblock_region *rgn = &memblock.reserved.regions[0];
777 	void *allocated_ptr = NULL;
778 	phys_addr_t size = SZ_128;
779 	phys_addr_t misalign = SZ_2;
780 	phys_addr_t min_addr;
781 	phys_addr_t max_addr;
782 	phys_addr_t rgn_end;
783 
784 	PREFIX_PUSH();
785 	setup_memblock();
786 
787 	min_addr = memblock_start_of_DRAM() + misalign;
788 	max_addr = min_addr + SZ_512;
789 
790 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
791 						   min_addr, max_addr,
792 						   NUMA_NO_NODE);
793 	rgn_end = rgn->base + rgn->size;
794 
795 	ASSERT_NE(allocated_ptr, NULL);
796 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
797 
798 	ASSERT_EQ(rgn->size, size);
799 	ASSERT_EQ(rgn->base, min_addr + (SMP_CACHE_BYTES - misalign));
800 	ASSERT_LT(rgn_end, max_addr);
801 
802 	ASSERT_EQ(memblock.reserved.cnt, 1);
803 	ASSERT_EQ(memblock.reserved.total_size, size);
804 
805 	test_pass_pop();
806 
807 	return 0;
808 }
809 
810 /*
811  * A test that tries to allocate a memory region, which can't fit into min_addr
812  * and max_addr range:
813  *
814  *                      +    +
815  *  |---------+         +    +      |
816  *  |   rgn   |         |    |      |
817  *  +---------+---------+----+------+
818  *                      ^    ^
819  *                      |    |
820  *                      |    max_addr
821  *                      |
822  *                      min_add
823  *
824  * Expect to drop the lower limit and allocate a memory region which
825  * starts at the beginning of the available memory.
826  */
827 static int alloc_try_nid_bottom_up_narrow_range_check(void)
828 {
829 	struct memblock_region *rgn = &memblock.reserved.regions[0];
830 	void *allocated_ptr = NULL;
831 	phys_addr_t size = SZ_256;
832 	phys_addr_t min_addr;
833 	phys_addr_t max_addr;
834 
835 	PREFIX_PUSH();
836 	setup_memblock();
837 
838 	min_addr = memblock_start_of_DRAM() + SZ_512;
839 	max_addr = min_addr + SMP_CACHE_BYTES;
840 
841 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
842 						   min_addr, max_addr,
843 						   NUMA_NO_NODE);
844 
845 	ASSERT_NE(allocated_ptr, NULL);
846 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
847 
848 	ASSERT_EQ(rgn->size, size);
849 	ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
850 
851 	ASSERT_EQ(memblock.reserved.cnt, 1);
852 	ASSERT_EQ(memblock.reserved.total_size, size);
853 
854 	test_pass_pop();
855 
856 	return 0;
857 }
858 
859 /*
860  * A test that tries to allocate memory within min_addr and max_add range, when
861  * there are two reserved regions at the borders, with a gap big enough to fit
862  * a new region:
863  *
864  *                +           +
865  *  |    +--------+-------+   +------+  |
866  *  |    |   r2   |  rgn  |   |  r1  |  |
867  *  +----+--------+-------+---+------+--+
868  *                ^           ^
869  *                |           |
870  *                min_addr    max_addr
871  *
872  * Expect to merge the new region with r2. The second region does not get
873  * updated. The total size field gets updated.
874  */
875 
876 static int alloc_try_nid_bottom_up_reserved_with_space_check(void)
877 {
878 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
879 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
880 	void *allocated_ptr = NULL;
881 	struct region r1, r2;
882 	phys_addr_t r3_size = SZ_64;
883 	phys_addr_t gap_size = SMP_CACHE_BYTES;
884 	phys_addr_t total_size;
885 	phys_addr_t max_addr;
886 	phys_addr_t min_addr;
887 
888 	PREFIX_PUSH();
889 	setup_memblock();
890 
891 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
892 	r1.size = SMP_CACHE_BYTES;
893 
894 	r2.size = SZ_128;
895 	r2.base = r1.base - (r3_size + gap_size + r2.size);
896 
897 	total_size = r1.size + r2.size + r3_size;
898 	min_addr = r2.base + r2.size;
899 	max_addr = r1.base;
900 
901 	memblock_reserve(r1.base, r1.size);
902 	memblock_reserve(r2.base, r2.size);
903 
904 	allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
905 						   min_addr, max_addr,
906 						   NUMA_NO_NODE);
907 
908 	ASSERT_NE(allocated_ptr, NULL);
909 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
910 
911 	ASSERT_EQ(rgn1->size, r1.size);
912 	ASSERT_EQ(rgn1->base, max_addr);
913 
914 	ASSERT_EQ(rgn2->size, r2.size + r3_size);
915 	ASSERT_EQ(rgn2->base, r2.base);
916 
917 	ASSERT_EQ(memblock.reserved.cnt, 2);
918 	ASSERT_EQ(memblock.reserved.total_size, total_size);
919 
920 	test_pass_pop();
921 
922 	return 0;
923 }
924 
925 /*
926  * A test that tries to allocate memory within min_addr and max_add range, when
927  * there are two reserved regions at the borders, with a gap of a size equal to
928  * the size of the new region:
929  *
930  *                         +   +
931  *  |----------+    +------+   +----+  |
932  *  |    r3    |    |  r2  |   | r1 |  |
933  *  +----------+----+------+---+----+--+
934  *                         ^   ^
935  *                         |   |
936  *                         |  max_addr
937  *                         |
938  *                         min_addr
939  *
940  * Expect to drop the lower limit and allocate memory at the beginning of the
941  * available memory. The region counter and total size fields get updated.
942  * Other regions are not modified.
943  */
944 
945 static int alloc_try_nid_bottom_up_reserved_no_space_check(void)
946 {
947 	struct memblock_region *rgn1 = &memblock.reserved.regions[2];
948 	struct memblock_region *rgn2 = &memblock.reserved.regions[1];
949 	struct memblock_region *rgn3 = &memblock.reserved.regions[0];
950 	void *allocated_ptr = NULL;
951 	struct region r1, r2;
952 	phys_addr_t r3_size = SZ_256;
953 	phys_addr_t gap_size = SMP_CACHE_BYTES;
954 	phys_addr_t total_size;
955 	phys_addr_t max_addr;
956 	phys_addr_t min_addr;
957 
958 	PREFIX_PUSH();
959 	setup_memblock();
960 
961 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
962 	r1.size = SMP_CACHE_BYTES;
963 
964 	r2.size = SZ_128;
965 	r2.base = r1.base - (r2.size + gap_size);
966 
967 	total_size = r1.size + r2.size + r3_size;
968 	min_addr = r2.base + r2.size;
969 	max_addr = r1.base;
970 
971 	memblock_reserve(r1.base, r1.size);
972 	memblock_reserve(r2.base, r2.size);
973 
974 	allocated_ptr = run_memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
975 						   min_addr, max_addr,
976 						   NUMA_NO_NODE);
977 
978 	ASSERT_NE(allocated_ptr, NULL);
979 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
980 
981 	ASSERT_EQ(rgn3->size, r3_size);
982 	ASSERT_EQ(rgn3->base, memblock_start_of_DRAM());
983 
984 	ASSERT_EQ(rgn2->size, r2.size);
985 	ASSERT_EQ(rgn2->base, r2.base);
986 
987 	ASSERT_EQ(rgn1->size, r1.size);
988 	ASSERT_EQ(rgn1->base, r1.base);
989 
990 	ASSERT_EQ(memblock.reserved.cnt, 3);
991 	ASSERT_EQ(memblock.reserved.total_size, total_size);
992 
993 	test_pass_pop();
994 
995 	return 0;
996 }
997 
998 /*
999  * A test that tries to allocate a memory region, where max_addr is
1000  * bigger than the end address of the available memory. Expect to allocate
1001  * a region that starts at the min_addr.
1002  */
1003 static int alloc_try_nid_bottom_up_cap_max_check(void)
1004 {
1005 	struct memblock_region *rgn = &memblock.reserved.regions[0];
1006 	void *allocated_ptr = NULL;
1007 	phys_addr_t size = SZ_256;
1008 	phys_addr_t min_addr;
1009 	phys_addr_t max_addr;
1010 
1011 	PREFIX_PUSH();
1012 	setup_memblock();
1013 
1014 	min_addr = memblock_start_of_DRAM() + SZ_1K;
1015 	max_addr = memblock_end_of_DRAM() + SZ_256;
1016 
1017 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1018 						   min_addr, max_addr,
1019 						   NUMA_NO_NODE);
1020 
1021 	ASSERT_NE(allocated_ptr, NULL);
1022 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1023 
1024 	ASSERT_EQ(rgn->size, size);
1025 	ASSERT_EQ(rgn->base, min_addr);
1026 
1027 	ASSERT_EQ(memblock.reserved.cnt, 1);
1028 	ASSERT_EQ(memblock.reserved.total_size, size);
1029 
1030 	test_pass_pop();
1031 
1032 	return 0;
1033 }
1034 
1035 /*
1036  * A test that tries to allocate a memory region, where min_addr is
1037  * smaller than the start address of the available memory. Expect to allocate
1038  * a region at the beginning of the available memory.
1039  */
1040 static int alloc_try_nid_bottom_up_cap_min_check(void)
1041 {
1042 	struct memblock_region *rgn = &memblock.reserved.regions[0];
1043 	void *allocated_ptr = NULL;
1044 	phys_addr_t size = SZ_1K;
1045 	phys_addr_t min_addr;
1046 	phys_addr_t max_addr;
1047 
1048 	PREFIX_PUSH();
1049 	setup_memblock();
1050 
1051 	min_addr = memblock_start_of_DRAM();
1052 	max_addr = memblock_end_of_DRAM() - SZ_256;
1053 
1054 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1055 						   min_addr, max_addr,
1056 						   NUMA_NO_NODE);
1057 
1058 	ASSERT_NE(allocated_ptr, NULL);
1059 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1060 
1061 	ASSERT_EQ(rgn->size, size);
1062 	ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
1063 
1064 	ASSERT_EQ(memblock.reserved.cnt, 1);
1065 	ASSERT_EQ(memblock.reserved.total_size, size);
1066 
1067 	test_pass_pop();
1068 
1069 	return 0;
1070 }
1071 
1072 /* Test case wrappers for range tests */
1073 static int alloc_try_nid_simple_check(void)
1074 {
1075 	test_print("\tRunning %s...\n", __func__);
1076 	memblock_set_bottom_up(false);
1077 	alloc_try_nid_top_down_simple_check();
1078 	memblock_set_bottom_up(true);
1079 	alloc_try_nid_bottom_up_simple_check();
1080 
1081 	return 0;
1082 }
1083 
1084 static int alloc_try_nid_misaligned_check(void)
1085 {
1086 	test_print("\tRunning %s...\n", __func__);
1087 	memblock_set_bottom_up(false);
1088 	alloc_try_nid_top_down_end_misaligned_check();
1089 	memblock_set_bottom_up(true);
1090 	alloc_try_nid_bottom_up_start_misaligned_check();
1091 
1092 	return 0;
1093 }
1094 
1095 static int alloc_try_nid_narrow_range_check(void)
1096 {
1097 	test_print("\tRunning %s...\n", __func__);
1098 	memblock_set_bottom_up(false);
1099 	alloc_try_nid_top_down_narrow_range_check();
1100 	memblock_set_bottom_up(true);
1101 	alloc_try_nid_bottom_up_narrow_range_check();
1102 
1103 	return 0;
1104 }
1105 
1106 static int alloc_try_nid_reserved_with_space_check(void)
1107 {
1108 	test_print("\tRunning %s...\n", __func__);
1109 	memblock_set_bottom_up(false);
1110 	alloc_try_nid_top_down_reserved_with_space_check();
1111 	memblock_set_bottom_up(true);
1112 	alloc_try_nid_bottom_up_reserved_with_space_check();
1113 
1114 	return 0;
1115 }
1116 
1117 static int alloc_try_nid_reserved_no_space_check(void)
1118 {
1119 	test_print("\tRunning %s...\n", __func__);
1120 	memblock_set_bottom_up(false);
1121 	alloc_try_nid_top_down_reserved_no_space_check();
1122 	memblock_set_bottom_up(true);
1123 	alloc_try_nid_bottom_up_reserved_no_space_check();
1124 
1125 	return 0;
1126 }
1127 
1128 static int alloc_try_nid_cap_max_check(void)
1129 {
1130 	test_print("\tRunning %s...\n", __func__);
1131 	memblock_set_bottom_up(false);
1132 	alloc_try_nid_top_down_cap_max_check();
1133 	memblock_set_bottom_up(true);
1134 	alloc_try_nid_bottom_up_cap_max_check();
1135 
1136 	return 0;
1137 }
1138 
1139 static int alloc_try_nid_cap_min_check(void)
1140 {
1141 	test_print("\tRunning %s...\n", __func__);
1142 	memblock_set_bottom_up(false);
1143 	alloc_try_nid_top_down_cap_min_check();
1144 	memblock_set_bottom_up(true);
1145 	alloc_try_nid_bottom_up_cap_min_check();
1146 
1147 	return 0;
1148 }
1149 
1150 static int alloc_try_nid_min_reserved_check(void)
1151 {
1152 	test_print("\tRunning %s...\n", __func__);
1153 	run_top_down(alloc_try_nid_min_reserved_generic_check);
1154 	run_bottom_up(alloc_try_nid_min_reserved_generic_check);
1155 
1156 	return 0;
1157 }
1158 
1159 static int alloc_try_nid_max_reserved_check(void)
1160 {
1161 	test_print("\tRunning %s...\n", __func__);
1162 	run_top_down(alloc_try_nid_max_reserved_generic_check);
1163 	run_bottom_up(alloc_try_nid_max_reserved_generic_check);
1164 
1165 	return 0;
1166 }
1167 
1168 static int alloc_try_nid_exact_address_check(void)
1169 {
1170 	test_print("\tRunning %s...\n", __func__);
1171 	run_top_down(alloc_try_nid_exact_address_generic_check);
1172 	run_bottom_up(alloc_try_nid_exact_address_generic_check);
1173 
1174 	return 0;
1175 }
1176 
1177 static int alloc_try_nid_reserved_full_merge_check(void)
1178 {
1179 	test_print("\tRunning %s...\n", __func__);
1180 	run_top_down(alloc_try_nid_reserved_full_merge_generic_check);
1181 	run_bottom_up(alloc_try_nid_reserved_full_merge_generic_check);
1182 
1183 	return 0;
1184 }
1185 
1186 static int alloc_try_nid_reserved_all_check(void)
1187 {
1188 	test_print("\tRunning %s...\n", __func__);
1189 	run_top_down(alloc_try_nid_reserved_all_generic_check);
1190 	run_bottom_up(alloc_try_nid_reserved_all_generic_check);
1191 
1192 	return 0;
1193 }
1194 
1195 static int alloc_try_nid_low_max_check(void)
1196 {
1197 	test_print("\tRunning %s...\n", __func__);
1198 	run_top_down(alloc_try_nid_low_max_generic_check);
1199 	run_bottom_up(alloc_try_nid_low_max_generic_check);
1200 
1201 	return 0;
1202 }
1203 
1204 static int memblock_alloc_nid_range_checks(void)
1205 {
1206 	test_print("Running %s range tests...\n",
1207 		   get_memblock_alloc_try_nid_name(alloc_nid_test_flags));
1208 
1209 	alloc_try_nid_simple_check();
1210 	alloc_try_nid_misaligned_check();
1211 	alloc_try_nid_narrow_range_check();
1212 	alloc_try_nid_reserved_with_space_check();
1213 	alloc_try_nid_reserved_no_space_check();
1214 	alloc_try_nid_cap_max_check();
1215 	alloc_try_nid_cap_min_check();
1216 
1217 	alloc_try_nid_min_reserved_check();
1218 	alloc_try_nid_max_reserved_check();
1219 	alloc_try_nid_exact_address_check();
1220 	alloc_try_nid_reserved_full_merge_check();
1221 	alloc_try_nid_reserved_all_check();
1222 	alloc_try_nid_low_max_check();
1223 
1224 	return 0;
1225 }
1226 
1227 /*
1228  * A test that tries to allocate a memory region in a specific NUMA node that
1229  * has enough memory to allocate a region of the requested size.
1230  * Expect to allocate an aligned region at the end of the requested node.
1231  */
1232 static int alloc_try_nid_top_down_numa_simple_check(void)
1233 {
1234 	int nid_req = 3;
1235 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1236 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1237 	void *allocated_ptr = NULL;
1238 	phys_addr_t size;
1239 	phys_addr_t min_addr;
1240 	phys_addr_t max_addr;
1241 
1242 	PREFIX_PUSH();
1243 	setup_numa_memblock(node_fractions);
1244 
1245 	ASSERT_LE(SZ_4, req_node->size);
1246 	size = req_node->size / SZ_4;
1247 	min_addr = memblock_start_of_DRAM();
1248 	max_addr = memblock_end_of_DRAM();
1249 
1250 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1251 						   min_addr, max_addr, nid_req);
1252 
1253 	ASSERT_NE(allocated_ptr, NULL);
1254 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1255 
1256 	ASSERT_EQ(new_rgn->size, size);
1257 	ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
1258 	ASSERT_LE(req_node->base, new_rgn->base);
1259 
1260 	ASSERT_EQ(memblock.reserved.cnt, 1);
1261 	ASSERT_EQ(memblock.reserved.total_size, size);
1262 
1263 	test_pass_pop();
1264 
1265 	return 0;
1266 }
1267 
1268 /*
1269  * A test that tries to allocate a memory region in a specific NUMA node that
1270  * does not have enough memory to allocate a region of the requested size:
1271  *
1272  *  |   +-----+          +------------------+     |
1273  *  |   | req |          |     expected     |     |
1274  *  +---+-----+----------+------------------+-----+
1275  *
1276  *  |                             +---------+     |
1277  *  |                             |   rgn   |     |
1278  *  +-----------------------------+---------+-----+
1279  *
1280  * Expect to allocate an aligned region at the end of the last node that has
1281  * enough memory (in this case, nid = 6) after falling back to NUMA_NO_NODE.
1282  */
1283 static int alloc_try_nid_top_down_numa_small_node_check(void)
1284 {
1285 	int nid_req = 1;
1286 	int nid_exp = 6;
1287 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1288 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1289 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1290 	void *allocated_ptr = NULL;
1291 	phys_addr_t size;
1292 	phys_addr_t min_addr;
1293 	phys_addr_t max_addr;
1294 
1295 	PREFIX_PUSH();
1296 	setup_numa_memblock(node_fractions);
1297 
1298 	size = SZ_2 * req_node->size;
1299 	min_addr = memblock_start_of_DRAM();
1300 	max_addr = memblock_end_of_DRAM();
1301 
1302 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1303 						   min_addr, max_addr, nid_req);
1304 
1305 	ASSERT_NE(allocated_ptr, NULL);
1306 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1307 
1308 	ASSERT_EQ(new_rgn->size, size);
1309 	ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
1310 	ASSERT_LE(exp_node->base, new_rgn->base);
1311 
1312 	ASSERT_EQ(memblock.reserved.cnt, 1);
1313 	ASSERT_EQ(memblock.reserved.total_size, size);
1314 
1315 	test_pass_pop();
1316 
1317 	return 0;
1318 }
1319 
1320 /*
1321  * A test that tries to allocate a memory region in a specific NUMA node that
1322  * is fully reserved:
1323  *
1324  *  |              +---------+            +------------------+     |
1325  *  |              |requested|            |     expected     |     |
1326  *  +--------------+---------+------------+------------------+-----+
1327  *
1328  *  |              +---------+                     +---------+     |
1329  *  |              | reserved|                     |   new   |     |
1330  *  +--------------+---------+---------------------+---------+-----+
1331  *
1332  * Expect to allocate an aligned region at the end of the last node that is
1333  * large enough and has enough unreserved memory (in this case, nid = 6) after
1334  * falling back to NUMA_NO_NODE. The region count and total size get updated.
1335  */
1336 static int alloc_try_nid_top_down_numa_node_reserved_check(void)
1337 {
1338 	int nid_req = 2;
1339 	int nid_exp = 6;
1340 	struct memblock_region *new_rgn = &memblock.reserved.regions[1];
1341 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1342 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1343 	void *allocated_ptr = NULL;
1344 	phys_addr_t size;
1345 	phys_addr_t min_addr;
1346 	phys_addr_t max_addr;
1347 
1348 	PREFIX_PUSH();
1349 	setup_numa_memblock(node_fractions);
1350 
1351 	size = req_node->size;
1352 	min_addr = memblock_start_of_DRAM();
1353 	max_addr = memblock_end_of_DRAM();
1354 
1355 	memblock_reserve(req_node->base, req_node->size);
1356 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1357 						   min_addr, max_addr, nid_req);
1358 
1359 	ASSERT_NE(allocated_ptr, NULL);
1360 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1361 
1362 	ASSERT_EQ(new_rgn->size, size);
1363 	ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
1364 	ASSERT_LE(exp_node->base, new_rgn->base);
1365 
1366 	ASSERT_EQ(memblock.reserved.cnt, 2);
1367 	ASSERT_EQ(memblock.reserved.total_size, size + req_node->size);
1368 
1369 	test_pass_pop();
1370 
1371 	return 0;
1372 }
1373 
1374 /*
1375  * A test that tries to allocate a memory region in a specific NUMA node that
1376  * is partially reserved but has enough memory for the allocated region:
1377  *
1378  *  |           +---------------------------------------+          |
1379  *  |           |               requested               |          |
1380  *  +-----------+---------------------------------------+----------+
1381  *
1382  *  |           +------------------+              +-----+          |
1383  *  |           |     reserved     |              | new |          |
1384  *  +-----------+------------------+--------------+-----+----------+
1385  *
1386  * Expect to allocate an aligned region at the end of the requested node. The
1387  * region count and total size get updated.
1388  */
1389 static int alloc_try_nid_top_down_numa_part_reserved_check(void)
1390 {
1391 	int nid_req = 4;
1392 	struct memblock_region *new_rgn = &memblock.reserved.regions[1];
1393 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1394 	void *allocated_ptr = NULL;
1395 	struct region r1;
1396 	phys_addr_t size;
1397 	phys_addr_t min_addr;
1398 	phys_addr_t max_addr;
1399 
1400 	PREFIX_PUSH();
1401 	setup_numa_memblock(node_fractions);
1402 
1403 	ASSERT_LE(SZ_8, req_node->size);
1404 	r1.base = req_node->base;
1405 	r1.size = req_node->size / SZ_2;
1406 	size = r1.size / SZ_4;
1407 	min_addr = memblock_start_of_DRAM();
1408 	max_addr = memblock_end_of_DRAM();
1409 
1410 	memblock_reserve(r1.base, r1.size);
1411 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1412 						   min_addr, max_addr, nid_req);
1413 
1414 	ASSERT_NE(allocated_ptr, NULL);
1415 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1416 
1417 	ASSERT_EQ(new_rgn->size, size);
1418 	ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
1419 	ASSERT_LE(req_node->base, new_rgn->base);
1420 
1421 	ASSERT_EQ(memblock.reserved.cnt, 2);
1422 	ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
1423 
1424 	test_pass_pop();
1425 
1426 	return 0;
1427 }
1428 
1429 /*
1430  * A test that tries to allocate a memory region in a specific NUMA node that
1431  * is partially reserved and does not have enough contiguous memory for the
1432  * allocated region:
1433  *
1434  *  |           +-----------------------+         +----------------------|
1435  *  |           |       requested       |         |       expected       |
1436  *  +-----------+-----------------------+---------+----------------------+
1437  *
1438  *  |                 +----------+                           +-----------|
1439  *  |                 | reserved |                           |    new    |
1440  *  +-----------------+----------+---------------------------+-----------+
1441  *
1442  * Expect to allocate an aligned region at the end of the last node that is
1443  * large enough and has enough unreserved memory (in this case,
1444  * nid = NUMA_NODES - 1) after falling back to NUMA_NO_NODE. The region count
1445  * and total size get updated.
1446  */
1447 static int alloc_try_nid_top_down_numa_part_reserved_fallback_check(void)
1448 {
1449 	int nid_req = 4;
1450 	int nid_exp = NUMA_NODES - 1;
1451 	struct memblock_region *new_rgn = &memblock.reserved.regions[1];
1452 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1453 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1454 	void *allocated_ptr = NULL;
1455 	struct region r1;
1456 	phys_addr_t size;
1457 	phys_addr_t min_addr;
1458 	phys_addr_t max_addr;
1459 
1460 	PREFIX_PUSH();
1461 	setup_numa_memblock(node_fractions);
1462 
1463 	ASSERT_LE(SZ_4, req_node->size);
1464 	size = req_node->size / SZ_2;
1465 	r1.base = req_node->base + (size / SZ_2);
1466 	r1.size = size;
1467 
1468 	min_addr = memblock_start_of_DRAM();
1469 	max_addr = memblock_end_of_DRAM();
1470 
1471 	memblock_reserve(r1.base, r1.size);
1472 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1473 						   min_addr, max_addr, nid_req);
1474 
1475 	ASSERT_NE(allocated_ptr, NULL);
1476 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1477 
1478 	ASSERT_EQ(new_rgn->size, size);
1479 	ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
1480 	ASSERT_LE(exp_node->base, new_rgn->base);
1481 
1482 	ASSERT_EQ(memblock.reserved.cnt, 2);
1483 	ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
1484 
1485 	test_pass_pop();
1486 
1487 	return 0;
1488 }
1489 
1490 /*
1491  * A test that tries to allocate a memory region that spans over the min_addr
1492  * and max_addr range and overlaps with two different nodes, where the first
1493  * node is the requested node:
1494  *
1495  *                                min_addr
1496  *                                |           max_addr
1497  *                                |           |
1498  *                                v           v
1499  *  |           +-----------------------+-----------+              |
1500  *  |           |       requested       |   node3   |              |
1501  *  +-----------+-----------------------+-----------+--------------+
1502  *                                +           +
1503  *  |                       +-----------+                          |
1504  *  |                       |    rgn    |                          |
1505  *  +-----------------------+-----------+--------------------------+
1506  *
1507  * Expect to drop the lower limit and allocate a memory region that ends at
1508  * the end of the requested node.
1509  */
1510 static int alloc_try_nid_top_down_numa_split_range_low_check(void)
1511 {
1512 	int nid_req = 2;
1513 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1514 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1515 	void *allocated_ptr = NULL;
1516 	phys_addr_t size = SZ_512;
1517 	phys_addr_t min_addr;
1518 	phys_addr_t max_addr;
1519 	phys_addr_t req_node_end;
1520 
1521 	PREFIX_PUSH();
1522 	setup_numa_memblock(node_fractions);
1523 
1524 	req_node_end = region_end(req_node);
1525 	min_addr = req_node_end - SZ_256;
1526 	max_addr = min_addr + size;
1527 
1528 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1529 						   min_addr, max_addr, nid_req);
1530 
1531 	ASSERT_NE(allocated_ptr, NULL);
1532 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1533 
1534 	ASSERT_EQ(new_rgn->size, size);
1535 	ASSERT_EQ(new_rgn->base, req_node_end - size);
1536 	ASSERT_LE(req_node->base, new_rgn->base);
1537 
1538 	ASSERT_EQ(memblock.reserved.cnt, 1);
1539 	ASSERT_EQ(memblock.reserved.total_size, size);
1540 
1541 	test_pass_pop();
1542 
1543 	return 0;
1544 }
1545 
1546 /*
1547  * A test that tries to allocate a memory region that spans over the min_addr
1548  * and max_addr range and overlaps with two different nodes, where the second
1549  * node is the requested node:
1550  *
1551  *                               min_addr
1552  *                               |         max_addr
1553  *                               |         |
1554  *                               v         v
1555  *  |      +--------------------------+---------+                |
1556  *  |      |         expected         |requested|                |
1557  *  +------+--------------------------+---------+----------------+
1558  *                               +         +
1559  *  |                       +---------+                          |
1560  *  |                       |   rgn   |                          |
1561  *  +-----------------------+---------+--------------------------+
1562  *
1563  * Expect to drop the lower limit and allocate a memory region that
1564  * ends at the end of the first node that overlaps with the range.
1565  */
1566 static int alloc_try_nid_top_down_numa_split_range_high_check(void)
1567 {
1568 	int nid_req = 3;
1569 	int nid_exp = nid_req - 1;
1570 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1571 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1572 	void *allocated_ptr = NULL;
1573 	phys_addr_t size = SZ_512;
1574 	phys_addr_t min_addr;
1575 	phys_addr_t max_addr;
1576 	phys_addr_t exp_node_end;
1577 
1578 	PREFIX_PUSH();
1579 	setup_numa_memblock(node_fractions);
1580 
1581 	exp_node_end = region_end(exp_node);
1582 	min_addr = exp_node_end - SZ_256;
1583 	max_addr = min_addr + size;
1584 
1585 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1586 						   min_addr, max_addr, nid_req);
1587 
1588 	ASSERT_NE(allocated_ptr, NULL);
1589 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1590 
1591 	ASSERT_EQ(new_rgn->size, size);
1592 	ASSERT_EQ(new_rgn->base, exp_node_end - size);
1593 	ASSERT_LE(exp_node->base, new_rgn->base);
1594 
1595 	ASSERT_EQ(memblock.reserved.cnt, 1);
1596 	ASSERT_EQ(memblock.reserved.total_size, size);
1597 
1598 	test_pass_pop();
1599 
1600 	return 0;
1601 }
1602 
1603 /*
1604  * A test that tries to allocate a memory region that spans over the min_addr
1605  * and max_addr range and overlaps with two different nodes, where the requested
1606  * node ends before min_addr:
1607  *
1608  *                                         min_addr
1609  *                                         |         max_addr
1610  *                                         |         |
1611  *                                         v         v
1612  *  |    +---------------+        +-------------+---------+          |
1613  *  |    |   requested   |        |    node1    |  node2  |          |
1614  *  +----+---------------+--------+-------------+---------+----------+
1615  *                                         +         +
1616  *  |          +---------+                                           |
1617  *  |          |   rgn   |                                           |
1618  *  +----------+---------+-------------------------------------------+
1619  *
1620  * Expect to drop the lower limit and allocate a memory region that ends at
1621  * the end of the requested node.
1622  */
1623 static int alloc_try_nid_top_down_numa_no_overlap_split_check(void)
1624 {
1625 	int nid_req = 2;
1626 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1627 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1628 	struct memblock_region *node2 = &memblock.memory.regions[6];
1629 	void *allocated_ptr = NULL;
1630 	phys_addr_t size;
1631 	phys_addr_t min_addr;
1632 	phys_addr_t max_addr;
1633 
1634 	PREFIX_PUSH();
1635 	setup_numa_memblock(node_fractions);
1636 
1637 	size = SZ_512;
1638 	min_addr = node2->base - SZ_256;
1639 	max_addr = min_addr + size;
1640 
1641 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1642 						   min_addr, max_addr, nid_req);
1643 
1644 	ASSERT_NE(allocated_ptr, NULL);
1645 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1646 
1647 	ASSERT_EQ(new_rgn->size, size);
1648 	ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
1649 	ASSERT_LE(req_node->base, new_rgn->base);
1650 
1651 	ASSERT_EQ(memblock.reserved.cnt, 1);
1652 	ASSERT_EQ(memblock.reserved.total_size, size);
1653 
1654 	test_pass_pop();
1655 
1656 	return 0;
1657 }
1658 
1659 /*
1660  * A test that tries to allocate memory within min_addr and max_add range when
1661  * the requested node and the range do not overlap, and requested node ends
1662  * before min_addr. The range overlaps with multiple nodes along node
1663  * boundaries:
1664  *
1665  *                          min_addr
1666  *                          |                                 max_addr
1667  *                          |                                 |
1668  *                          v                                 v
1669  *  |-----------+           +----------+----...----+----------+      |
1670  *  | requested |           | min node |    ...    | max node |      |
1671  *  +-----------+-----------+----------+----...----+----------+------+
1672  *                          +                                 +
1673  *  |                                                   +-----+      |
1674  *  |                                                   | rgn |      |
1675  *  +---------------------------------------------------+-----+------+
1676  *
1677  * Expect to allocate a memory region at the end of the final node in
1678  * the range after falling back to NUMA_NO_NODE.
1679  */
1680 static int alloc_try_nid_top_down_numa_no_overlap_low_check(void)
1681 {
1682 	int nid_req = 0;
1683 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1684 	struct memblock_region *min_node = &memblock.memory.regions[2];
1685 	struct memblock_region *max_node = &memblock.memory.regions[5];
1686 	void *allocated_ptr = NULL;
1687 	phys_addr_t size = SZ_64;
1688 	phys_addr_t max_addr;
1689 	phys_addr_t min_addr;
1690 
1691 	PREFIX_PUSH();
1692 	setup_numa_memblock(node_fractions);
1693 
1694 	min_addr = min_node->base;
1695 	max_addr = region_end(max_node);
1696 
1697 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1698 						   min_addr, max_addr, nid_req);
1699 
1700 	ASSERT_NE(allocated_ptr, NULL);
1701 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1702 
1703 	ASSERT_EQ(new_rgn->size, size);
1704 	ASSERT_EQ(new_rgn->base, max_addr - size);
1705 	ASSERT_LE(max_node->base, new_rgn->base);
1706 
1707 	ASSERT_EQ(memblock.reserved.cnt, 1);
1708 	ASSERT_EQ(memblock.reserved.total_size, size);
1709 
1710 	test_pass_pop();
1711 
1712 	return 0;
1713 }
1714 
1715 /*
1716  * A test that tries to allocate memory within min_addr and max_add range when
1717  * the requested node and the range do not overlap, and requested node starts
1718  * after max_addr. The range overlaps with multiple nodes along node
1719  * boundaries:
1720  *
1721  *        min_addr
1722  *        |                                 max_addr
1723  *        |                                 |
1724  *        v                                 v
1725  *  |     +----------+----...----+----------+        +-----------+   |
1726  *  |     | min node |    ...    | max node |        | requested |   |
1727  *  +-----+----------+----...----+----------+--------+-----------+---+
1728  *        +                                 +
1729  *  |                                 +-----+                        |
1730  *  |                                 | rgn |                        |
1731  *  +---------------------------------+-----+------------------------+
1732  *
1733  * Expect to allocate a memory region at the end of the final node in
1734  * the range after falling back to NUMA_NO_NODE.
1735  */
1736 static int alloc_try_nid_top_down_numa_no_overlap_high_check(void)
1737 {
1738 	int nid_req = 7;
1739 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1740 	struct memblock_region *min_node = &memblock.memory.regions[2];
1741 	struct memblock_region *max_node = &memblock.memory.regions[5];
1742 	void *allocated_ptr = NULL;
1743 	phys_addr_t size = SZ_64;
1744 	phys_addr_t max_addr;
1745 	phys_addr_t min_addr;
1746 
1747 	PREFIX_PUSH();
1748 	setup_numa_memblock(node_fractions);
1749 
1750 	min_addr = min_node->base;
1751 	max_addr = region_end(max_node);
1752 
1753 	allocated_ptr = run_memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
1754 						   min_addr, max_addr, nid_req);
1755 
1756 	ASSERT_NE(allocated_ptr, NULL);
1757 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1758 
1759 	ASSERT_EQ(new_rgn->size, size);
1760 	ASSERT_EQ(new_rgn->base, max_addr - size);
1761 	ASSERT_LE(max_node->base, new_rgn->base);
1762 
1763 	ASSERT_EQ(memblock.reserved.cnt, 1);
1764 	ASSERT_EQ(memblock.reserved.total_size, size);
1765 
1766 	test_pass_pop();
1767 
1768 	return 0;
1769 }
1770 
1771 /* Test case wrappers for NUMA tests */
1772 static int alloc_try_nid_numa_simple_check(void)
1773 {
1774 	test_print("\tRunning %s...\n", __func__);
1775 	memblock_set_bottom_up(false);
1776 	alloc_try_nid_top_down_numa_simple_check();
1777 
1778 	return 0;
1779 }
1780 
1781 static int alloc_try_nid_numa_small_node_check(void)
1782 {
1783 	test_print("\tRunning %s...\n", __func__);
1784 	memblock_set_bottom_up(false);
1785 	alloc_try_nid_top_down_numa_small_node_check();
1786 
1787 	return 0;
1788 }
1789 
1790 static int alloc_try_nid_numa_node_reserved_check(void)
1791 {
1792 	test_print("\tRunning %s...\n", __func__);
1793 	memblock_set_bottom_up(false);
1794 	alloc_try_nid_top_down_numa_node_reserved_check();
1795 
1796 	return 0;
1797 }
1798 
1799 static int alloc_try_nid_numa_part_reserved_check(void)
1800 {
1801 	test_print("\tRunning %s...\n", __func__);
1802 	memblock_set_bottom_up(false);
1803 	alloc_try_nid_top_down_numa_part_reserved_check();
1804 
1805 	return 0;
1806 }
1807 
1808 static int alloc_try_nid_numa_part_reserved_fallback_check(void)
1809 {
1810 	test_print("\tRunning %s...\n", __func__);
1811 	memblock_set_bottom_up(false);
1812 	alloc_try_nid_top_down_numa_part_reserved_fallback_check();
1813 
1814 	return 0;
1815 }
1816 
1817 static int alloc_try_nid_numa_split_range_low_check(void)
1818 {
1819 	test_print("\tRunning %s...\n", __func__);
1820 	memblock_set_bottom_up(false);
1821 	alloc_try_nid_top_down_numa_split_range_low_check();
1822 
1823 	return 0;
1824 }
1825 
1826 static int alloc_try_nid_numa_split_range_high_check(void)
1827 {
1828 	test_print("\tRunning %s...\n", __func__);
1829 	memblock_set_bottom_up(false);
1830 	alloc_try_nid_top_down_numa_split_range_high_check();
1831 
1832 	return 0;
1833 }
1834 
1835 static int alloc_try_nid_numa_no_overlap_split_check(void)
1836 {
1837 	test_print("\tRunning %s...\n", __func__);
1838 	memblock_set_bottom_up(false);
1839 	alloc_try_nid_top_down_numa_no_overlap_split_check();
1840 
1841 	return 0;
1842 }
1843 
1844 static int alloc_try_nid_numa_no_overlap_low_check(void)
1845 {
1846 	test_print("\tRunning %s...\n", __func__);
1847 	memblock_set_bottom_up(false);
1848 	alloc_try_nid_top_down_numa_no_overlap_low_check();
1849 
1850 	return 0;
1851 }
1852 
1853 static int alloc_try_nid_numa_no_overlap_high_check(void)
1854 {
1855 	test_print("\tRunning %s...\n", __func__);
1856 	memblock_set_bottom_up(false);
1857 	alloc_try_nid_top_down_numa_no_overlap_high_check();
1858 
1859 	return 0;
1860 }
1861 
1862 int __memblock_alloc_nid_numa_checks(void)
1863 {
1864 	test_print("Running %s NUMA tests...\n",
1865 		   get_memblock_alloc_try_nid_name(alloc_nid_test_flags));
1866 
1867 	alloc_try_nid_numa_simple_check();
1868 	alloc_try_nid_numa_small_node_check();
1869 	alloc_try_nid_numa_node_reserved_check();
1870 	alloc_try_nid_numa_part_reserved_check();
1871 	alloc_try_nid_numa_part_reserved_fallback_check();
1872 	alloc_try_nid_numa_split_range_low_check();
1873 	alloc_try_nid_numa_split_range_high_check();
1874 
1875 	alloc_try_nid_numa_no_overlap_split_check();
1876 	alloc_try_nid_numa_no_overlap_low_check();
1877 	alloc_try_nid_numa_no_overlap_high_check();
1878 
1879 	return 0;
1880 }
1881 
1882 static int memblock_alloc_nid_checks_internal(int flags)
1883 {
1884 	alloc_nid_test_flags = flags;
1885 
1886 	prefix_reset();
1887 	prefix_push(get_memblock_alloc_try_nid_name(flags));
1888 
1889 	reset_memblock_attributes();
1890 	dummy_physical_memory_init();
1891 
1892 	memblock_alloc_nid_range_checks();
1893 	memblock_alloc_nid_numa_checks();
1894 
1895 	dummy_physical_memory_cleanup();
1896 
1897 	prefix_pop();
1898 
1899 	return 0;
1900 }
1901 
1902 int memblock_alloc_nid_checks(void)
1903 {
1904 	memblock_alloc_nid_checks_internal(TEST_F_NONE);
1905 	memblock_alloc_nid_checks_internal(TEST_F_RAW);
1906 
1907 	return 0;
1908 }
1909