xref: /linux/tools/testing/memblock/tests/alloc_nid_api.c (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include "alloc_nid_api.h"
3 
4 static int alloc_nid_test_flags = TEST_F_NONE;
5 
6 /*
7  * contains the fraction of MEM_SIZE contained in each node in basis point
8  * units (one hundredth of 1% or 1/10000)
9  */
10 static const unsigned int node_fractions[] = {
11 	2500, /* 1/4  */
12 	 625, /* 1/16 */
13 	1250, /* 1/8  */
14 	1250, /* 1/8  */
15 	 625, /* 1/16 */
16 	 625, /* 1/16 */
17 	2500, /* 1/4  */
18 	 625, /* 1/16 */
19 };
20 
get_memblock_alloc_nid_name(int flags)21 static inline const char * const get_memblock_alloc_nid_name(int flags)
22 {
23 	if (flags & TEST_F_EXACT)
24 		return "memblock_alloc_exact_nid_raw";
25 	if (flags & TEST_F_RAW)
26 		return "memblock_alloc_try_nid_raw";
27 	return "memblock_alloc_try_nid";
28 }
29 
run_memblock_alloc_nid(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid)30 static inline void *run_memblock_alloc_nid(phys_addr_t size,
31 					   phys_addr_t align,
32 					   phys_addr_t min_addr,
33 					   phys_addr_t max_addr, int nid)
34 {
35 	assert(!(alloc_nid_test_flags & TEST_F_EXACT) ||
36 	       (alloc_nid_test_flags & TEST_F_RAW));
37 	/*
38 	 * TEST_F_EXACT should be checked before TEST_F_RAW since
39 	 * memblock_alloc_exact_nid_raw() performs raw allocations.
40 	 */
41 	if (alloc_nid_test_flags & TEST_F_EXACT)
42 		return memblock_alloc_exact_nid_raw(size, align, min_addr,
43 						    max_addr, nid);
44 	if (alloc_nid_test_flags & TEST_F_RAW)
45 		return memblock_alloc_try_nid_raw(size, align, min_addr,
46 						  max_addr, nid);
47 	return memblock_alloc_try_nid(size, align, min_addr, max_addr, nid);
48 }
49 
50 /*
51  * A simple test that tries to allocate a memory region within min_addr and
52  * max_addr range:
53  *
54  *        +                   +
55  *   |    +       +-----------+      |
56  *   |    |       |    rgn    |      |
57  *   +----+-------+-----------+------+
58  *        ^                   ^
59  *        |                   |
60  *        min_addr           max_addr
61  *
62  * Expect to allocate a region that ends at max_addr.
63  */
alloc_nid_top_down_simple_check(void)64 static int alloc_nid_top_down_simple_check(void)
65 {
66 	struct memblock_region *rgn = &memblock.reserved.regions[0];
67 	void *allocated_ptr = NULL;
68 	phys_addr_t size = SZ_128;
69 	phys_addr_t min_addr;
70 	phys_addr_t max_addr;
71 	phys_addr_t rgn_end;
72 
73 	PREFIX_PUSH();
74 	setup_memblock();
75 
76 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
77 	max_addr = min_addr + SZ_512;
78 
79 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
80 					       min_addr, max_addr,
81 					       NUMA_NO_NODE);
82 	rgn_end = rgn->base + rgn->size;
83 
84 	ASSERT_NE(allocated_ptr, NULL);
85 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
86 
87 	ASSERT_EQ(rgn->size, size);
88 	ASSERT_EQ(rgn->base, max_addr - size);
89 	ASSERT_EQ(rgn_end, max_addr);
90 
91 	ASSERT_EQ(memblock.reserved.cnt, 1);
92 	ASSERT_EQ(memblock.reserved.total_size, size);
93 
94 	test_pass_pop();
95 
96 	return 0;
97 }
98 
99 /*
100  * A simple test that tries to allocate a memory region within min_addr and
101  * max_addr range, where the end address is misaligned:
102  *
103  *         +       +            +
104  *  |      +       +---------+  +    |
105  *  |      |       |   rgn   |  |    |
106  *  +------+-------+---------+--+----+
107  *         ^       ^            ^
108  *         |       |            |
109  *       min_add   |            max_addr
110  *                 |
111  *                 Aligned address
112  *                 boundary
113  *
114  * Expect to allocate an aligned region that ends before max_addr.
115  */
alloc_nid_top_down_end_misaligned_check(void)116 static int alloc_nid_top_down_end_misaligned_check(void)
117 {
118 	struct memblock_region *rgn = &memblock.reserved.regions[0];
119 	void *allocated_ptr = NULL;
120 	phys_addr_t size = SZ_128;
121 	phys_addr_t misalign = SZ_2;
122 	phys_addr_t min_addr;
123 	phys_addr_t max_addr;
124 	phys_addr_t rgn_end;
125 
126 	PREFIX_PUSH();
127 	setup_memblock();
128 
129 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
130 	max_addr = min_addr + SZ_512 + misalign;
131 
132 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
133 					       min_addr, max_addr,
134 					       NUMA_NO_NODE);
135 	rgn_end = rgn->base + rgn->size;
136 
137 	ASSERT_NE(allocated_ptr, NULL);
138 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
139 
140 	ASSERT_EQ(rgn->size, size);
141 	ASSERT_EQ(rgn->base, max_addr - size - misalign);
142 	ASSERT_LT(rgn_end, max_addr);
143 
144 	ASSERT_EQ(memblock.reserved.cnt, 1);
145 	ASSERT_EQ(memblock.reserved.total_size, size);
146 
147 	test_pass_pop();
148 
149 	return 0;
150 }
151 
152 /*
153  * A simple test that tries to allocate a memory region, which spans over the
154  * min_addr and max_addr range:
155  *
156  *         +               +
157  *  |      +---------------+       |
158  *  |      |      rgn      |       |
159  *  +------+---------------+-------+
160  *         ^               ^
161  *         |               |
162  *         min_addr        max_addr
163  *
164  * Expect to allocate a region that starts at min_addr and ends at
165  * max_addr, given that min_addr is aligned.
166  */
alloc_nid_exact_address_generic_check(void)167 static int alloc_nid_exact_address_generic_check(void)
168 {
169 	struct memblock_region *rgn = &memblock.reserved.regions[0];
170 	void *allocated_ptr = NULL;
171 	phys_addr_t size = SZ_1K;
172 	phys_addr_t min_addr;
173 	phys_addr_t max_addr;
174 	phys_addr_t rgn_end;
175 
176 	PREFIX_PUSH();
177 	setup_memblock();
178 
179 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES;
180 	max_addr = min_addr + size;
181 
182 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
183 					       min_addr, max_addr,
184 					       NUMA_NO_NODE);
185 	rgn_end = rgn->base + rgn->size;
186 
187 	ASSERT_NE(allocated_ptr, NULL);
188 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
189 
190 	ASSERT_EQ(rgn->size, size);
191 	ASSERT_EQ(rgn->base, min_addr);
192 	ASSERT_EQ(rgn_end, max_addr);
193 
194 	ASSERT_EQ(memblock.reserved.cnt, 1);
195 	ASSERT_EQ(memblock.reserved.total_size, size);
196 
197 	test_pass_pop();
198 
199 	return 0;
200 }
201 
202 /*
203  * A test that tries to allocate a memory region, which can't fit into
204  * min_addr and max_addr range:
205  *
206  *           +          +     +
207  *  |        +----------+-----+    |
208  *  |        |   rgn    +     |    |
209  *  +--------+----------+-----+----+
210  *           ^          ^     ^
211  *           |          |     |
212  *           Aligned    |    max_addr
213  *           address    |
214  *           boundary   min_add
215  *
216  * Expect to drop the lower limit and allocate a memory region which
217  * ends at max_addr (if the address is aligned).
218  */
alloc_nid_top_down_narrow_range_check(void)219 static int alloc_nid_top_down_narrow_range_check(void)
220 {
221 	struct memblock_region *rgn = &memblock.reserved.regions[0];
222 	void *allocated_ptr = NULL;
223 	phys_addr_t size = SZ_256;
224 	phys_addr_t min_addr;
225 	phys_addr_t max_addr;
226 
227 	PREFIX_PUSH();
228 	setup_memblock();
229 
230 	min_addr = memblock_start_of_DRAM() + SZ_512;
231 	max_addr = min_addr + SMP_CACHE_BYTES;
232 
233 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
234 					       min_addr, max_addr,
235 					       NUMA_NO_NODE);
236 
237 	ASSERT_NE(allocated_ptr, NULL);
238 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
239 
240 	ASSERT_EQ(rgn->size, size);
241 	ASSERT_EQ(rgn->base, max_addr - size);
242 
243 	ASSERT_EQ(memblock.reserved.cnt, 1);
244 	ASSERT_EQ(memblock.reserved.total_size, size);
245 
246 	test_pass_pop();
247 
248 	return 0;
249 }
250 
251 /*
252  * A test that tries to allocate a memory region, which can't fit into
253  * min_addr and max_addr range, with the latter being too close to the beginning
254  * of the available memory:
255  *
256  *   +-------------+
257  *   |     new     |
258  *   +-------------+
259  *         +       +
260  *         |       +              |
261  *         |       |              |
262  *         +-------+--------------+
263  *         ^       ^
264  *         |       |
265  *         |       max_addr
266  *         |
267  *         min_addr
268  *
269  * Expect no allocation to happen.
270  */
alloc_nid_low_max_generic_check(void)271 static int alloc_nid_low_max_generic_check(void)
272 {
273 	void *allocated_ptr = NULL;
274 	phys_addr_t size = SZ_1K;
275 	phys_addr_t min_addr;
276 	phys_addr_t max_addr;
277 
278 	PREFIX_PUSH();
279 	setup_memblock();
280 
281 	min_addr = memblock_start_of_DRAM();
282 	max_addr = min_addr + SMP_CACHE_BYTES;
283 
284 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
285 					       min_addr, max_addr,
286 					       NUMA_NO_NODE);
287 
288 	ASSERT_EQ(allocated_ptr, NULL);
289 
290 	test_pass_pop();
291 
292 	return 0;
293 }
294 
295 /*
296  * A test that tries to allocate a memory region within min_addr min_addr range,
297  * with min_addr being so close that it's next to an allocated region:
298  *
299  *          +                        +
300  *  |       +--------+---------------|
301  *  |       |   r1   |      rgn      |
302  *  +-------+--------+---------------+
303  *          ^                        ^
304  *          |                        |
305  *          min_addr                 max_addr
306  *
307  * Expect a merge of both regions. Only the region size gets updated.
308  */
alloc_nid_min_reserved_generic_check(void)309 static int alloc_nid_min_reserved_generic_check(void)
310 {
311 	struct memblock_region *rgn = &memblock.reserved.regions[0];
312 	void *allocated_ptr = NULL;
313 	phys_addr_t r1_size = SZ_128;
314 	phys_addr_t r2_size = SZ_64;
315 	phys_addr_t total_size = r1_size + r2_size;
316 	phys_addr_t min_addr;
317 	phys_addr_t max_addr;
318 	phys_addr_t reserved_base;
319 
320 	PREFIX_PUSH();
321 	setup_memblock();
322 
323 	max_addr = memblock_end_of_DRAM();
324 	min_addr = max_addr - r2_size;
325 	reserved_base = min_addr - r1_size;
326 
327 	memblock_reserve(reserved_base, r1_size);
328 
329 	allocated_ptr = run_memblock_alloc_nid(r2_size, SMP_CACHE_BYTES,
330 					       min_addr, max_addr,
331 					       NUMA_NO_NODE);
332 
333 	ASSERT_NE(allocated_ptr, NULL);
334 	assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags);
335 
336 	ASSERT_EQ(rgn->size, total_size);
337 	ASSERT_EQ(rgn->base, reserved_base);
338 
339 	ASSERT_EQ(memblock.reserved.cnt, 1);
340 	ASSERT_EQ(memblock.reserved.total_size, total_size);
341 
342 	test_pass_pop();
343 
344 	return 0;
345 }
346 
347 /*
348  * A test that tries to allocate a memory region within min_addr and max_addr,
349  * with max_addr being so close that it's next to an allocated region:
350  *
351  *             +             +
352  *  |          +-------------+--------|
353  *  |          |     rgn     |   r1   |
354  *  +----------+-------------+--------+
355  *             ^             ^
356  *             |             |
357  *             min_addr      max_addr
358  *
359  * Expect a merge of regions. Only the region size gets updated.
360  */
alloc_nid_max_reserved_generic_check(void)361 static int alloc_nid_max_reserved_generic_check(void)
362 {
363 	struct memblock_region *rgn = &memblock.reserved.regions[0];
364 	void *allocated_ptr = NULL;
365 	phys_addr_t r1_size = SZ_64;
366 	phys_addr_t r2_size = SZ_128;
367 	phys_addr_t total_size = r1_size + r2_size;
368 	phys_addr_t min_addr;
369 	phys_addr_t max_addr;
370 
371 	PREFIX_PUSH();
372 	setup_memblock();
373 
374 	max_addr = memblock_end_of_DRAM() - r1_size;
375 	min_addr = max_addr - r2_size;
376 
377 	memblock_reserve(max_addr, r1_size);
378 
379 	allocated_ptr = run_memblock_alloc_nid(r2_size, SMP_CACHE_BYTES,
380 					       min_addr, max_addr,
381 					       NUMA_NO_NODE);
382 
383 	ASSERT_NE(allocated_ptr, NULL);
384 	assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags);
385 
386 	ASSERT_EQ(rgn->size, total_size);
387 	ASSERT_EQ(rgn->base, min_addr);
388 
389 	ASSERT_EQ(memblock.reserved.cnt, 1);
390 	ASSERT_EQ(memblock.reserved.total_size, total_size);
391 
392 	test_pass_pop();
393 
394 	return 0;
395 }
396 
397 /*
398  * A test that tries to allocate memory within min_addr and max_add range, when
399  * there are two reserved regions at the borders, with a gap big enough to fit
400  * a new region:
401  *
402  *                +           +
403  *  |    +--------+   +-------+------+  |
404  *  |    |   r2   |   |  rgn  |  r1  |  |
405  *  +----+--------+---+-------+------+--+
406  *                ^           ^
407  *                |           |
408  *                min_addr    max_addr
409  *
410  * Expect to merge the new region with r1. The second region does not get
411  * updated. The total size field gets updated.
412  */
413 
alloc_nid_top_down_reserved_with_space_check(void)414 static int alloc_nid_top_down_reserved_with_space_check(void)
415 {
416 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
417 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
418 	void *allocated_ptr = NULL;
419 	struct region r1, r2;
420 	phys_addr_t r3_size = SZ_64;
421 	phys_addr_t gap_size = SMP_CACHE_BYTES;
422 	phys_addr_t total_size;
423 	phys_addr_t max_addr;
424 	phys_addr_t min_addr;
425 
426 	PREFIX_PUSH();
427 	setup_memblock();
428 
429 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
430 	r1.size = SMP_CACHE_BYTES;
431 
432 	r2.size = SZ_128;
433 	r2.base = r1.base - (r3_size + gap_size + r2.size);
434 
435 	total_size = r1.size + r2.size + r3_size;
436 	min_addr = r2.base + r2.size;
437 	max_addr = r1.base;
438 
439 	memblock_reserve(r1.base, r1.size);
440 	memblock_reserve(r2.base, r2.size);
441 
442 	allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
443 					       min_addr, max_addr,
444 					       NUMA_NO_NODE);
445 
446 	ASSERT_NE(allocated_ptr, NULL);
447 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
448 
449 	ASSERT_EQ(rgn1->size, r1.size + r3_size);
450 	ASSERT_EQ(rgn1->base, max_addr - r3_size);
451 
452 	ASSERT_EQ(rgn2->size, r2.size);
453 	ASSERT_EQ(rgn2->base, r2.base);
454 
455 	ASSERT_EQ(memblock.reserved.cnt, 2);
456 	ASSERT_EQ(memblock.reserved.total_size, total_size);
457 
458 	test_pass_pop();
459 
460 	return 0;
461 }
462 
463 /*
464  * A test that tries to allocate memory within min_addr and max_add range, when
465  * there are two reserved regions at the borders, with a gap of a size equal to
466  * the size of the new region:
467  *
468  *                 +        +
469  *  |     +--------+--------+--------+     |
470  *  |     |   r2   |   r3   |   r1   |     |
471  *  +-----+--------+--------+--------+-----+
472  *                 ^        ^
473  *                 |        |
474  *                 min_addr max_addr
475  *
476  * Expect to merge all of the regions into one. The region counter and total
477  * size fields get updated.
478  */
alloc_nid_reserved_full_merge_generic_check(void)479 static int alloc_nid_reserved_full_merge_generic_check(void)
480 {
481 	struct memblock_region *rgn = &memblock.reserved.regions[0];
482 	void *allocated_ptr = NULL;
483 	struct region r1, r2;
484 	phys_addr_t r3_size = SZ_64;
485 	phys_addr_t total_size;
486 	phys_addr_t max_addr;
487 	phys_addr_t min_addr;
488 
489 	PREFIX_PUSH();
490 	setup_memblock();
491 
492 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
493 	r1.size = SMP_CACHE_BYTES;
494 
495 	r2.size = SZ_128;
496 	r2.base = r1.base - (r3_size + r2.size);
497 
498 	total_size = r1.size + r2.size + r3_size;
499 	min_addr = r2.base + r2.size;
500 	max_addr = r1.base;
501 
502 	memblock_reserve(r1.base, r1.size);
503 	memblock_reserve(r2.base, r2.size);
504 
505 	allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
506 					       min_addr, max_addr,
507 					       NUMA_NO_NODE);
508 
509 	ASSERT_NE(allocated_ptr, NULL);
510 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
511 
512 	ASSERT_EQ(rgn->size, total_size);
513 	ASSERT_EQ(rgn->base, r2.base);
514 
515 	ASSERT_EQ(memblock.reserved.cnt, 1);
516 	ASSERT_EQ(memblock.reserved.total_size, total_size);
517 
518 	test_pass_pop();
519 
520 	return 0;
521 }
522 
523 /*
524  * A test that tries to allocate memory within min_addr and max_add range, when
525  * there are two reserved regions at the borders, with a gap that can't fit
526  * a new region:
527  *
528  *                       +    +
529  *  |  +----------+------+    +------+   |
530  *  |  |    r3    |  r2  |    |  r1  |   |
531  *  +--+----------+------+----+------+---+
532  *                       ^    ^
533  *                       |    |
534  *                       |    max_addr
535  *                       |
536  *                       min_addr
537  *
538  * Expect to merge the new region with r2. The second region does not get
539  * updated. The total size counter gets updated.
540  */
alloc_nid_top_down_reserved_no_space_check(void)541 static int alloc_nid_top_down_reserved_no_space_check(void)
542 {
543 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
544 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
545 	void *allocated_ptr = NULL;
546 	struct region r1, r2;
547 	phys_addr_t r3_size = SZ_256;
548 	phys_addr_t gap_size = SMP_CACHE_BYTES;
549 	phys_addr_t total_size;
550 	phys_addr_t max_addr;
551 	phys_addr_t min_addr;
552 
553 	PREFIX_PUSH();
554 	setup_memblock();
555 
556 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
557 	r1.size = SMP_CACHE_BYTES;
558 
559 	r2.size = SZ_128;
560 	r2.base = r1.base - (r2.size + gap_size);
561 
562 	total_size = r1.size + r2.size + r3_size;
563 	min_addr = r2.base + r2.size;
564 	max_addr = r1.base;
565 
566 	memblock_reserve(r1.base, r1.size);
567 	memblock_reserve(r2.base, r2.size);
568 
569 	allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
570 					       min_addr, max_addr,
571 					       NUMA_NO_NODE);
572 
573 	ASSERT_NE(allocated_ptr, NULL);
574 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
575 
576 	ASSERT_EQ(rgn1->size, r1.size);
577 	ASSERT_EQ(rgn1->base, r1.base);
578 
579 	ASSERT_EQ(rgn2->size, r2.size + r3_size);
580 	ASSERT_EQ(rgn2->base, r2.base - r3_size);
581 
582 	ASSERT_EQ(memblock.reserved.cnt, 2);
583 	ASSERT_EQ(memblock.reserved.total_size, total_size);
584 
585 	test_pass_pop();
586 
587 	return 0;
588 }
589 
590 /*
591  * A test that tries to allocate memory within min_addr and max_add range, but
592  * it's too narrow and everything else is reserved:
593  *
594  *            +-----------+
595  *            |    new    |
596  *            +-----------+
597  *                 +      +
598  *  |--------------+      +----------|
599  *  |      r2      |      |    r1    |
600  *  +--------------+------+----------+
601  *                 ^      ^
602  *                 |      |
603  *                 |      max_addr
604  *                 |
605  *                 min_addr
606  *
607  * Expect no allocation to happen.
608  */
609 
alloc_nid_reserved_all_generic_check(void)610 static int alloc_nid_reserved_all_generic_check(void)
611 {
612 	void *allocated_ptr = NULL;
613 	struct region r1, r2;
614 	phys_addr_t r3_size = SZ_256;
615 	phys_addr_t gap_size = SMP_CACHE_BYTES;
616 	phys_addr_t max_addr;
617 	phys_addr_t min_addr;
618 
619 	PREFIX_PUSH();
620 	setup_memblock();
621 
622 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
623 	r1.size = SMP_CACHE_BYTES;
624 
625 	r2.size = MEM_SIZE - (r1.size + gap_size);
626 	r2.base = memblock_start_of_DRAM();
627 
628 	min_addr = r2.base + r2.size;
629 	max_addr = r1.base;
630 
631 	memblock_reserve(r1.base, r1.size);
632 	memblock_reserve(r2.base, r2.size);
633 
634 	allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
635 					       min_addr, max_addr,
636 					       NUMA_NO_NODE);
637 
638 	ASSERT_EQ(allocated_ptr, NULL);
639 
640 	test_pass_pop();
641 
642 	return 0;
643 }
644 
645 /*
646  * A test that tries to allocate a memory region, where max_addr is
647  * bigger than the end address of the available memory. Expect to allocate
648  * a region that ends before the end of the memory.
649  */
alloc_nid_top_down_cap_max_check(void)650 static int alloc_nid_top_down_cap_max_check(void)
651 {
652 	struct memblock_region *rgn = &memblock.reserved.regions[0];
653 	void *allocated_ptr = NULL;
654 	phys_addr_t size = SZ_256;
655 	phys_addr_t min_addr;
656 	phys_addr_t max_addr;
657 
658 	PREFIX_PUSH();
659 	setup_memblock();
660 
661 	min_addr = memblock_end_of_DRAM() - SZ_1K;
662 	max_addr = memblock_end_of_DRAM() + SZ_256;
663 
664 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
665 					       min_addr, max_addr,
666 					       NUMA_NO_NODE);
667 
668 	ASSERT_NE(allocated_ptr, NULL);
669 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
670 
671 	ASSERT_EQ(rgn->size, size);
672 	ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
673 
674 	ASSERT_EQ(memblock.reserved.cnt, 1);
675 	ASSERT_EQ(memblock.reserved.total_size, size);
676 
677 	test_pass_pop();
678 
679 	return 0;
680 }
681 
682 /*
683  * A test that tries to allocate a memory region, where min_addr is
684  * smaller than the start address of the available memory. Expect to allocate
685  * a region that ends before the end of the memory.
686  */
alloc_nid_top_down_cap_min_check(void)687 static int alloc_nid_top_down_cap_min_check(void)
688 {
689 	struct memblock_region *rgn = &memblock.reserved.regions[0];
690 	void *allocated_ptr = NULL;
691 	phys_addr_t size = SZ_1K;
692 	phys_addr_t min_addr;
693 	phys_addr_t max_addr;
694 
695 	PREFIX_PUSH();
696 	setup_memblock();
697 
698 	min_addr = memblock_start_of_DRAM() - SZ_256;
699 	max_addr = memblock_end_of_DRAM();
700 
701 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
702 					       min_addr, max_addr,
703 					       NUMA_NO_NODE);
704 
705 	ASSERT_NE(allocated_ptr, NULL);
706 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
707 
708 	ASSERT_EQ(rgn->size, size);
709 	ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
710 
711 	ASSERT_EQ(memblock.reserved.cnt, 1);
712 	ASSERT_EQ(memblock.reserved.total_size, size);
713 
714 	test_pass_pop();
715 
716 	return 0;
717 }
718 
719 /*
720  * A simple test that tries to allocate a memory region within min_addr and
721  * max_addr range:
722  *
723  *        +                       +
724  *   |    +-----------+           |      |
725  *   |    |    rgn    |           |      |
726  *   +----+-----------+-----------+------+
727  *        ^                       ^
728  *        |                       |
729  *        min_addr                max_addr
730  *
731  * Expect to allocate a region that ends before max_addr.
732  */
alloc_nid_bottom_up_simple_check(void)733 static int alloc_nid_bottom_up_simple_check(void)
734 {
735 	struct memblock_region *rgn = &memblock.reserved.regions[0];
736 	void *allocated_ptr = NULL;
737 	phys_addr_t size = SZ_128;
738 	phys_addr_t min_addr;
739 	phys_addr_t max_addr;
740 	phys_addr_t rgn_end;
741 
742 	PREFIX_PUSH();
743 	setup_memblock();
744 
745 	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
746 	max_addr = min_addr + SZ_512;
747 
748 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
749 					       min_addr, max_addr,
750 					       NUMA_NO_NODE);
751 	rgn_end = rgn->base + rgn->size;
752 
753 	ASSERT_NE(allocated_ptr, NULL);
754 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
755 
756 	ASSERT_EQ(rgn->size, size);
757 	ASSERT_EQ(rgn->base, min_addr);
758 	ASSERT_LT(rgn_end, max_addr);
759 
760 	ASSERT_EQ(memblock.reserved.cnt, 1);
761 	ASSERT_EQ(memblock.reserved.total_size, size);
762 
763 	test_pass_pop();
764 
765 	return 0;
766 }
767 
768 /*
769  * A simple test that tries to allocate a memory region within min_addr and
770  * max_addr range, where the start address is misaligned:
771  *
772  *        +                     +
773  *  |     +   +-----------+     +     |
774  *  |     |   |    rgn    |     |     |
775  *  +-----+---+-----------+-----+-----+
776  *        ^   ^----.            ^
777  *        |        |            |
778  *     min_add     |            max_addr
779  *                 |
780  *                 Aligned address
781  *                 boundary
782  *
783  * Expect to allocate an aligned region that ends before max_addr.
784  */
alloc_nid_bottom_up_start_misaligned_check(void)785 static int alloc_nid_bottom_up_start_misaligned_check(void)
786 {
787 	struct memblock_region *rgn = &memblock.reserved.regions[0];
788 	void *allocated_ptr = NULL;
789 	phys_addr_t size = SZ_128;
790 	phys_addr_t misalign = SZ_2;
791 	phys_addr_t min_addr;
792 	phys_addr_t max_addr;
793 	phys_addr_t rgn_end;
794 
795 	PREFIX_PUSH();
796 	setup_memblock();
797 
798 	min_addr = memblock_start_of_DRAM() + misalign;
799 	max_addr = min_addr + SZ_512;
800 
801 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
802 					       min_addr, max_addr,
803 					       NUMA_NO_NODE);
804 	rgn_end = rgn->base + rgn->size;
805 
806 	ASSERT_NE(allocated_ptr, NULL);
807 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
808 
809 	ASSERT_EQ(rgn->size, size);
810 	ASSERT_EQ(rgn->base, min_addr + (SMP_CACHE_BYTES - misalign));
811 	ASSERT_LT(rgn_end, max_addr);
812 
813 	ASSERT_EQ(memblock.reserved.cnt, 1);
814 	ASSERT_EQ(memblock.reserved.total_size, size);
815 
816 	test_pass_pop();
817 
818 	return 0;
819 }
820 
821 /*
822  * A test that tries to allocate a memory region, which can't fit into min_addr
823  * and max_addr range:
824  *
825  *                      +    +
826  *  |---------+         +    +      |
827  *  |   rgn   |         |    |      |
828  *  +---------+---------+----+------+
829  *                      ^    ^
830  *                      |    |
831  *                      |    max_addr
832  *                      |
833  *                      min_add
834  *
835  * Expect to drop the lower limit and allocate a memory region which
836  * starts at the beginning of the available memory.
837  */
alloc_nid_bottom_up_narrow_range_check(void)838 static int alloc_nid_bottom_up_narrow_range_check(void)
839 {
840 	struct memblock_region *rgn = &memblock.reserved.regions[0];
841 	void *allocated_ptr = NULL;
842 	phys_addr_t size = SZ_256;
843 	phys_addr_t min_addr;
844 	phys_addr_t max_addr;
845 
846 	PREFIX_PUSH();
847 	setup_memblock();
848 
849 	min_addr = memblock_start_of_DRAM() + SZ_512;
850 	max_addr = min_addr + SMP_CACHE_BYTES;
851 
852 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
853 					       min_addr, max_addr,
854 					       NUMA_NO_NODE);
855 
856 	ASSERT_NE(allocated_ptr, NULL);
857 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
858 
859 	ASSERT_EQ(rgn->size, size);
860 	ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
861 
862 	ASSERT_EQ(memblock.reserved.cnt, 1);
863 	ASSERT_EQ(memblock.reserved.total_size, size);
864 
865 	test_pass_pop();
866 
867 	return 0;
868 }
869 
870 /*
871  * A test that tries to allocate memory within min_addr and max_add range, when
872  * there are two reserved regions at the borders, with a gap big enough to fit
873  * a new region:
874  *
875  *                +           +
876  *  |    +--------+-------+   +------+  |
877  *  |    |   r2   |  rgn  |   |  r1  |  |
878  *  +----+--------+-------+---+------+--+
879  *                ^           ^
880  *                |           |
881  *                min_addr    max_addr
882  *
883  * Expect to merge the new region with r2. The second region does not get
884  * updated. The total size field gets updated.
885  */
886 
alloc_nid_bottom_up_reserved_with_space_check(void)887 static int alloc_nid_bottom_up_reserved_with_space_check(void)
888 {
889 	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
890 	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
891 	void *allocated_ptr = NULL;
892 	struct region r1, r2;
893 	phys_addr_t r3_size = SZ_64;
894 	phys_addr_t gap_size = SMP_CACHE_BYTES;
895 	phys_addr_t total_size;
896 	phys_addr_t max_addr;
897 	phys_addr_t min_addr;
898 
899 	PREFIX_PUSH();
900 	setup_memblock();
901 
902 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
903 	r1.size = SMP_CACHE_BYTES;
904 
905 	r2.size = SZ_128;
906 	r2.base = r1.base - (r3_size + gap_size + r2.size);
907 
908 	total_size = r1.size + r2.size + r3_size;
909 	min_addr = r2.base + r2.size;
910 	max_addr = r1.base;
911 
912 	memblock_reserve(r1.base, r1.size);
913 	memblock_reserve(r2.base, r2.size);
914 
915 	allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
916 					       min_addr, max_addr,
917 					       NUMA_NO_NODE);
918 
919 	ASSERT_NE(allocated_ptr, NULL);
920 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
921 
922 	ASSERT_EQ(rgn1->size, r1.size);
923 	ASSERT_EQ(rgn1->base, max_addr);
924 
925 	ASSERT_EQ(rgn2->size, r2.size + r3_size);
926 	ASSERT_EQ(rgn2->base, r2.base);
927 
928 	ASSERT_EQ(memblock.reserved.cnt, 2);
929 	ASSERT_EQ(memblock.reserved.total_size, total_size);
930 
931 	test_pass_pop();
932 
933 	return 0;
934 }
935 
936 /*
937  * A test that tries to allocate memory within min_addr and max_add range, when
938  * there are two reserved regions at the borders, with a gap of a size equal to
939  * the size of the new region:
940  *
941  *                         +   +
942  *  |----------+    +------+   +----+  |
943  *  |    r3    |    |  r2  |   | r1 |  |
944  *  +----------+----+------+---+----+--+
945  *                         ^   ^
946  *                         |   |
947  *                         |  max_addr
948  *                         |
949  *                         min_addr
950  *
951  * Expect to drop the lower limit and allocate memory at the beginning of the
952  * available memory. The region counter and total size fields get updated.
953  * Other regions are not modified.
954  */
955 
alloc_nid_bottom_up_reserved_no_space_check(void)956 static int alloc_nid_bottom_up_reserved_no_space_check(void)
957 {
958 	struct memblock_region *rgn1 = &memblock.reserved.regions[2];
959 	struct memblock_region *rgn2 = &memblock.reserved.regions[1];
960 	struct memblock_region *rgn3 = &memblock.reserved.regions[0];
961 	void *allocated_ptr = NULL;
962 	struct region r1, r2;
963 	phys_addr_t r3_size = SZ_256;
964 	phys_addr_t gap_size = SMP_CACHE_BYTES;
965 	phys_addr_t total_size;
966 	phys_addr_t max_addr;
967 	phys_addr_t min_addr;
968 
969 	PREFIX_PUSH();
970 	setup_memblock();
971 
972 	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
973 	r1.size = SMP_CACHE_BYTES;
974 
975 	r2.size = SZ_128;
976 	r2.base = r1.base - (r2.size + gap_size);
977 
978 	total_size = r1.size + r2.size + r3_size;
979 	min_addr = r2.base + r2.size;
980 	max_addr = r1.base;
981 
982 	memblock_reserve(r1.base, r1.size);
983 	memblock_reserve(r2.base, r2.size);
984 
985 	allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
986 					       min_addr, max_addr,
987 					       NUMA_NO_NODE);
988 
989 	ASSERT_NE(allocated_ptr, NULL);
990 	assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
991 
992 	ASSERT_EQ(rgn3->size, r3_size);
993 	ASSERT_EQ(rgn3->base, memblock_start_of_DRAM());
994 
995 	ASSERT_EQ(rgn2->size, r2.size);
996 	ASSERT_EQ(rgn2->base, r2.base);
997 
998 	ASSERT_EQ(rgn1->size, r1.size);
999 	ASSERT_EQ(rgn1->base, r1.base);
1000 
1001 	ASSERT_EQ(memblock.reserved.cnt, 3);
1002 	ASSERT_EQ(memblock.reserved.total_size, total_size);
1003 
1004 	test_pass_pop();
1005 
1006 	return 0;
1007 }
1008 
1009 /*
1010  * A test that tries to allocate a memory region, where max_addr is
1011  * bigger than the end address of the available memory. Expect to allocate
1012  * a region that starts at the min_addr.
1013  */
alloc_nid_bottom_up_cap_max_check(void)1014 static int alloc_nid_bottom_up_cap_max_check(void)
1015 {
1016 	struct memblock_region *rgn = &memblock.reserved.regions[0];
1017 	void *allocated_ptr = NULL;
1018 	phys_addr_t size = SZ_256;
1019 	phys_addr_t min_addr;
1020 	phys_addr_t max_addr;
1021 
1022 	PREFIX_PUSH();
1023 	setup_memblock();
1024 
1025 	min_addr = memblock_start_of_DRAM() + SZ_1K;
1026 	max_addr = memblock_end_of_DRAM() + SZ_256;
1027 
1028 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1029 					       min_addr, max_addr,
1030 					       NUMA_NO_NODE);
1031 
1032 	ASSERT_NE(allocated_ptr, NULL);
1033 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1034 
1035 	ASSERT_EQ(rgn->size, size);
1036 	ASSERT_EQ(rgn->base, min_addr);
1037 
1038 	ASSERT_EQ(memblock.reserved.cnt, 1);
1039 	ASSERT_EQ(memblock.reserved.total_size, size);
1040 
1041 	test_pass_pop();
1042 
1043 	return 0;
1044 }
1045 
1046 /*
1047  * A test that tries to allocate a memory region, where min_addr is
1048  * smaller than the start address of the available memory. Expect to allocate
1049  * a region at the beginning of the available memory.
1050  */
alloc_nid_bottom_up_cap_min_check(void)1051 static int alloc_nid_bottom_up_cap_min_check(void)
1052 {
1053 	struct memblock_region *rgn = &memblock.reserved.regions[0];
1054 	void *allocated_ptr = NULL;
1055 	phys_addr_t size = SZ_1K;
1056 	phys_addr_t min_addr;
1057 	phys_addr_t max_addr;
1058 
1059 	PREFIX_PUSH();
1060 	setup_memblock();
1061 
1062 	min_addr = memblock_start_of_DRAM();
1063 	max_addr = memblock_end_of_DRAM() - SZ_256;
1064 
1065 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1066 					       min_addr, max_addr,
1067 					       NUMA_NO_NODE);
1068 
1069 	ASSERT_NE(allocated_ptr, NULL);
1070 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1071 
1072 	ASSERT_EQ(rgn->size, size);
1073 	ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
1074 
1075 	ASSERT_EQ(memblock.reserved.cnt, 1);
1076 	ASSERT_EQ(memblock.reserved.total_size, size);
1077 
1078 	test_pass_pop();
1079 
1080 	return 0;
1081 }
1082 
1083 /* Test case wrappers for range tests */
alloc_nid_simple_check(void)1084 static int alloc_nid_simple_check(void)
1085 {
1086 	test_print("\tRunning %s...\n", __func__);
1087 	memblock_set_bottom_up(false);
1088 	alloc_nid_top_down_simple_check();
1089 	memblock_set_bottom_up(true);
1090 	alloc_nid_bottom_up_simple_check();
1091 
1092 	return 0;
1093 }
1094 
alloc_nid_misaligned_check(void)1095 static int alloc_nid_misaligned_check(void)
1096 {
1097 	test_print("\tRunning %s...\n", __func__);
1098 	memblock_set_bottom_up(false);
1099 	alloc_nid_top_down_end_misaligned_check();
1100 	memblock_set_bottom_up(true);
1101 	alloc_nid_bottom_up_start_misaligned_check();
1102 
1103 	return 0;
1104 }
1105 
alloc_nid_narrow_range_check(void)1106 static int alloc_nid_narrow_range_check(void)
1107 {
1108 	test_print("\tRunning %s...\n", __func__);
1109 	memblock_set_bottom_up(false);
1110 	alloc_nid_top_down_narrow_range_check();
1111 	memblock_set_bottom_up(true);
1112 	alloc_nid_bottom_up_narrow_range_check();
1113 
1114 	return 0;
1115 }
1116 
alloc_nid_reserved_with_space_check(void)1117 static int alloc_nid_reserved_with_space_check(void)
1118 {
1119 	test_print("\tRunning %s...\n", __func__);
1120 	memblock_set_bottom_up(false);
1121 	alloc_nid_top_down_reserved_with_space_check();
1122 	memblock_set_bottom_up(true);
1123 	alloc_nid_bottom_up_reserved_with_space_check();
1124 
1125 	return 0;
1126 }
1127 
alloc_nid_reserved_no_space_check(void)1128 static int alloc_nid_reserved_no_space_check(void)
1129 {
1130 	test_print("\tRunning %s...\n", __func__);
1131 	memblock_set_bottom_up(false);
1132 	alloc_nid_top_down_reserved_no_space_check();
1133 	memblock_set_bottom_up(true);
1134 	alloc_nid_bottom_up_reserved_no_space_check();
1135 
1136 	return 0;
1137 }
1138 
alloc_nid_cap_max_check(void)1139 static int alloc_nid_cap_max_check(void)
1140 {
1141 	test_print("\tRunning %s...\n", __func__);
1142 	memblock_set_bottom_up(false);
1143 	alloc_nid_top_down_cap_max_check();
1144 	memblock_set_bottom_up(true);
1145 	alloc_nid_bottom_up_cap_max_check();
1146 
1147 	return 0;
1148 }
1149 
alloc_nid_cap_min_check(void)1150 static int alloc_nid_cap_min_check(void)
1151 {
1152 	test_print("\tRunning %s...\n", __func__);
1153 	memblock_set_bottom_up(false);
1154 	alloc_nid_top_down_cap_min_check();
1155 	memblock_set_bottom_up(true);
1156 	alloc_nid_bottom_up_cap_min_check();
1157 
1158 	return 0;
1159 }
1160 
alloc_nid_min_reserved_check(void)1161 static int alloc_nid_min_reserved_check(void)
1162 {
1163 	test_print("\tRunning %s...\n", __func__);
1164 	run_top_down(alloc_nid_min_reserved_generic_check);
1165 	run_bottom_up(alloc_nid_min_reserved_generic_check);
1166 
1167 	return 0;
1168 }
1169 
alloc_nid_max_reserved_check(void)1170 static int alloc_nid_max_reserved_check(void)
1171 {
1172 	test_print("\tRunning %s...\n", __func__);
1173 	run_top_down(alloc_nid_max_reserved_generic_check);
1174 	run_bottom_up(alloc_nid_max_reserved_generic_check);
1175 
1176 	return 0;
1177 }
1178 
alloc_nid_exact_address_check(void)1179 static int alloc_nid_exact_address_check(void)
1180 {
1181 	test_print("\tRunning %s...\n", __func__);
1182 	run_top_down(alloc_nid_exact_address_generic_check);
1183 	run_bottom_up(alloc_nid_exact_address_generic_check);
1184 
1185 	return 0;
1186 }
1187 
alloc_nid_reserved_full_merge_check(void)1188 static int alloc_nid_reserved_full_merge_check(void)
1189 {
1190 	test_print("\tRunning %s...\n", __func__);
1191 	run_top_down(alloc_nid_reserved_full_merge_generic_check);
1192 	run_bottom_up(alloc_nid_reserved_full_merge_generic_check);
1193 
1194 	return 0;
1195 }
1196 
alloc_nid_reserved_all_check(void)1197 static int alloc_nid_reserved_all_check(void)
1198 {
1199 	test_print("\tRunning %s...\n", __func__);
1200 	run_top_down(alloc_nid_reserved_all_generic_check);
1201 	run_bottom_up(alloc_nid_reserved_all_generic_check);
1202 
1203 	return 0;
1204 }
1205 
alloc_nid_low_max_check(void)1206 static int alloc_nid_low_max_check(void)
1207 {
1208 	test_print("\tRunning %s...\n", __func__);
1209 	run_top_down(alloc_nid_low_max_generic_check);
1210 	run_bottom_up(alloc_nid_low_max_generic_check);
1211 
1212 	return 0;
1213 }
1214 
memblock_alloc_nid_range_checks(void)1215 static int memblock_alloc_nid_range_checks(void)
1216 {
1217 	test_print("Running %s range tests...\n",
1218 		   get_memblock_alloc_nid_name(alloc_nid_test_flags));
1219 
1220 	alloc_nid_simple_check();
1221 	alloc_nid_misaligned_check();
1222 	alloc_nid_narrow_range_check();
1223 	alloc_nid_reserved_with_space_check();
1224 	alloc_nid_reserved_no_space_check();
1225 	alloc_nid_cap_max_check();
1226 	alloc_nid_cap_min_check();
1227 
1228 	alloc_nid_min_reserved_check();
1229 	alloc_nid_max_reserved_check();
1230 	alloc_nid_exact_address_check();
1231 	alloc_nid_reserved_full_merge_check();
1232 	alloc_nid_reserved_all_check();
1233 	alloc_nid_low_max_check();
1234 
1235 	return 0;
1236 }
1237 
1238 /*
1239  * A test that tries to allocate a memory region in a specific NUMA node that
1240  * has enough memory to allocate a region of the requested size.
1241  * Expect to allocate an aligned region at the end of the requested node.
1242  */
alloc_nid_top_down_numa_simple_check(void)1243 static int alloc_nid_top_down_numa_simple_check(void)
1244 {
1245 	int nid_req = 3;
1246 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1247 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1248 	void *allocated_ptr = NULL;
1249 	phys_addr_t size;
1250 	phys_addr_t min_addr;
1251 	phys_addr_t max_addr;
1252 
1253 	PREFIX_PUSH();
1254 	setup_numa_memblock(node_fractions);
1255 
1256 	ASSERT_LE(SZ_4, req_node->size);
1257 	size = req_node->size / SZ_4;
1258 	min_addr = memblock_start_of_DRAM();
1259 	max_addr = memblock_end_of_DRAM();
1260 
1261 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1262 					       min_addr, max_addr, nid_req);
1263 
1264 	ASSERT_NE(allocated_ptr, NULL);
1265 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1266 
1267 	ASSERT_EQ(new_rgn->size, size);
1268 	ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
1269 	ASSERT_LE(req_node->base, new_rgn->base);
1270 
1271 	ASSERT_EQ(memblock.reserved.cnt, 1);
1272 	ASSERT_EQ(memblock.reserved.total_size, size);
1273 
1274 	test_pass_pop();
1275 
1276 	return 0;
1277 }
1278 
1279 /*
1280  * A test that tries to allocate a memory region in a specific NUMA node that
1281  * does not have enough memory to allocate a region of the requested size:
1282  *
1283  *  |   +-----+          +------------------+     |
1284  *  |   | req |          |     expected     |     |
1285  *  +---+-----+----------+------------------+-----+
1286  *
1287  *  |                             +---------+     |
1288  *  |                             |   rgn   |     |
1289  *  +-----------------------------+---------+-----+
1290  *
1291  * Expect to allocate an aligned region at the end of the last node that has
1292  * enough memory (in this case, nid = 6) after falling back to NUMA_NO_NODE.
1293  */
alloc_nid_top_down_numa_small_node_check(void)1294 static int alloc_nid_top_down_numa_small_node_check(void)
1295 {
1296 	int nid_req = 1;
1297 	int nid_exp = 6;
1298 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1299 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1300 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1301 	void *allocated_ptr = NULL;
1302 	phys_addr_t size;
1303 	phys_addr_t min_addr;
1304 	phys_addr_t max_addr;
1305 
1306 	PREFIX_PUSH();
1307 	setup_numa_memblock(node_fractions);
1308 
1309 	size = SZ_2 * req_node->size;
1310 	min_addr = memblock_start_of_DRAM();
1311 	max_addr = memblock_end_of_DRAM();
1312 
1313 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1314 					       min_addr, max_addr, nid_req);
1315 
1316 	ASSERT_NE(allocated_ptr, NULL);
1317 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1318 
1319 	ASSERT_EQ(new_rgn->size, size);
1320 	ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
1321 	ASSERT_LE(exp_node->base, new_rgn->base);
1322 
1323 	ASSERT_EQ(memblock.reserved.cnt, 1);
1324 	ASSERT_EQ(memblock.reserved.total_size, size);
1325 
1326 	test_pass_pop();
1327 
1328 	return 0;
1329 }
1330 
1331 /*
1332  * A test that tries to allocate a memory region in a specific NUMA node that
1333  * is fully reserved:
1334  *
1335  *  |              +---------+            +------------------+     |
1336  *  |              |requested|            |     expected     |     |
1337  *  +--------------+---------+------------+------------------+-----+
1338  *
1339  *  |              +---------+                     +---------+     |
1340  *  |              | reserved|                     |   new   |     |
1341  *  +--------------+---------+---------------------+---------+-----+
1342  *
1343  * Expect to allocate an aligned region at the end of the last node that is
1344  * large enough and has enough unreserved memory (in this case, nid = 6) after
1345  * falling back to NUMA_NO_NODE. The region count and total size get updated.
1346  */
alloc_nid_top_down_numa_node_reserved_check(void)1347 static int alloc_nid_top_down_numa_node_reserved_check(void)
1348 {
1349 	int nid_req = 2;
1350 	int nid_exp = 6;
1351 	struct memblock_region *new_rgn = &memblock.reserved.regions[1];
1352 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1353 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1354 	void *allocated_ptr = NULL;
1355 	phys_addr_t size;
1356 	phys_addr_t min_addr;
1357 	phys_addr_t max_addr;
1358 
1359 	PREFIX_PUSH();
1360 	setup_numa_memblock(node_fractions);
1361 
1362 	size = req_node->size;
1363 	min_addr = memblock_start_of_DRAM();
1364 	max_addr = memblock_end_of_DRAM();
1365 
1366 	memblock_reserve(req_node->base, req_node->size);
1367 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1368 					       min_addr, max_addr, nid_req);
1369 
1370 	ASSERT_NE(allocated_ptr, NULL);
1371 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1372 
1373 	ASSERT_EQ(new_rgn->size, size);
1374 	ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
1375 	ASSERT_LE(exp_node->base, new_rgn->base);
1376 
1377 	ASSERT_EQ(memblock.reserved.cnt, 2);
1378 	ASSERT_EQ(memblock.reserved.total_size, size + req_node->size);
1379 
1380 	test_pass_pop();
1381 
1382 	return 0;
1383 }
1384 
1385 /*
1386  * A test that tries to allocate a memory region in a specific NUMA node that
1387  * is partially reserved but has enough memory for the allocated region:
1388  *
1389  *  |           +---------------------------------------+          |
1390  *  |           |               requested               |          |
1391  *  +-----------+---------------------------------------+----------+
1392  *
1393  *  |           +------------------+              +-----+          |
1394  *  |           |     reserved     |              | new |          |
1395  *  +-----------+------------------+--------------+-----+----------+
1396  *
1397  * Expect to allocate an aligned region at the end of the requested node. The
1398  * region count and total size get updated.
1399  */
alloc_nid_top_down_numa_part_reserved_check(void)1400 static int alloc_nid_top_down_numa_part_reserved_check(void)
1401 {
1402 	int nid_req = 4;
1403 	struct memblock_region *new_rgn = &memblock.reserved.regions[1];
1404 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1405 	void *allocated_ptr = NULL;
1406 	struct region r1;
1407 	phys_addr_t size;
1408 	phys_addr_t min_addr;
1409 	phys_addr_t max_addr;
1410 
1411 	PREFIX_PUSH();
1412 	setup_numa_memblock(node_fractions);
1413 
1414 	ASSERT_LE(SZ_8, req_node->size);
1415 	r1.base = req_node->base;
1416 	r1.size = req_node->size / SZ_2;
1417 	size = r1.size / SZ_4;
1418 	min_addr = memblock_start_of_DRAM();
1419 	max_addr = memblock_end_of_DRAM();
1420 
1421 	memblock_reserve(r1.base, r1.size);
1422 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1423 					       min_addr, max_addr, nid_req);
1424 
1425 	ASSERT_NE(allocated_ptr, NULL);
1426 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1427 
1428 	ASSERT_EQ(new_rgn->size, size);
1429 	ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
1430 	ASSERT_LE(req_node->base, new_rgn->base);
1431 
1432 	ASSERT_EQ(memblock.reserved.cnt, 2);
1433 	ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
1434 
1435 	test_pass_pop();
1436 
1437 	return 0;
1438 }
1439 
1440 /*
1441  * A test that tries to allocate a memory region in a specific NUMA node that
1442  * is partially reserved and does not have enough contiguous memory for the
1443  * allocated region:
1444  *
1445  *  |           +-----------------------+         +----------------------|
1446  *  |           |       requested       |         |       expected       |
1447  *  +-----------+-----------------------+---------+----------------------+
1448  *
1449  *  |                 +----------+                           +-----------|
1450  *  |                 | reserved |                           |    new    |
1451  *  +-----------------+----------+---------------------------+-----------+
1452  *
1453  * Expect to allocate an aligned region at the end of the last node that is
1454  * large enough and has enough unreserved memory (in this case,
1455  * nid = NUMA_NODES - 1) after falling back to NUMA_NO_NODE. The region count
1456  * and total size get updated.
1457  */
alloc_nid_top_down_numa_part_reserved_fallback_check(void)1458 static int alloc_nid_top_down_numa_part_reserved_fallback_check(void)
1459 {
1460 	int nid_req = 4;
1461 	int nid_exp = NUMA_NODES - 1;
1462 	struct memblock_region *new_rgn = &memblock.reserved.regions[1];
1463 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1464 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1465 	void *allocated_ptr = NULL;
1466 	struct region r1;
1467 	phys_addr_t size;
1468 	phys_addr_t min_addr;
1469 	phys_addr_t max_addr;
1470 
1471 	PREFIX_PUSH();
1472 	setup_numa_memblock(node_fractions);
1473 
1474 	ASSERT_LE(SZ_4, req_node->size);
1475 	size = req_node->size / SZ_2;
1476 	r1.base = req_node->base + (size / SZ_2);
1477 	r1.size = size;
1478 
1479 	min_addr = memblock_start_of_DRAM();
1480 	max_addr = memblock_end_of_DRAM();
1481 
1482 	memblock_reserve(r1.base, r1.size);
1483 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1484 					       min_addr, max_addr, nid_req);
1485 
1486 	ASSERT_NE(allocated_ptr, NULL);
1487 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1488 
1489 	ASSERT_EQ(new_rgn->size, size);
1490 	ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
1491 	ASSERT_LE(exp_node->base, new_rgn->base);
1492 
1493 	ASSERT_EQ(memblock.reserved.cnt, 2);
1494 	ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
1495 
1496 	test_pass_pop();
1497 
1498 	return 0;
1499 }
1500 
1501 /*
1502  * A test that tries to allocate a memory region that spans over the min_addr
1503  * and max_addr range and overlaps with two different nodes, where the first
1504  * node is the requested node:
1505  *
1506  *                                min_addr
1507  *                                |           max_addr
1508  *                                |           |
1509  *                                v           v
1510  *  |           +-----------------------+-----------+              |
1511  *  |           |       requested       |   node3   |              |
1512  *  +-----------+-----------------------+-----------+--------------+
1513  *                                +           +
1514  *  |                       +-----------+                          |
1515  *  |                       |    rgn    |                          |
1516  *  +-----------------------+-----------+--------------------------+
1517  *
1518  * Expect to drop the lower limit and allocate a memory region that ends at
1519  * the end of the requested node.
1520  */
alloc_nid_top_down_numa_split_range_low_check(void)1521 static int alloc_nid_top_down_numa_split_range_low_check(void)
1522 {
1523 	int nid_req = 2;
1524 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1525 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1526 	void *allocated_ptr = NULL;
1527 	phys_addr_t size = SZ_512;
1528 	phys_addr_t min_addr;
1529 	phys_addr_t max_addr;
1530 	phys_addr_t req_node_end;
1531 
1532 	PREFIX_PUSH();
1533 	setup_numa_memblock(node_fractions);
1534 
1535 	req_node_end = region_end(req_node);
1536 	min_addr = req_node_end - SZ_256;
1537 	max_addr = min_addr + size;
1538 
1539 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1540 					       min_addr, max_addr, nid_req);
1541 
1542 	ASSERT_NE(allocated_ptr, NULL);
1543 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1544 
1545 	ASSERT_EQ(new_rgn->size, size);
1546 	ASSERT_EQ(new_rgn->base, req_node_end - size);
1547 	ASSERT_LE(req_node->base, new_rgn->base);
1548 
1549 	ASSERT_EQ(memblock.reserved.cnt, 1);
1550 	ASSERT_EQ(memblock.reserved.total_size, size);
1551 
1552 	test_pass_pop();
1553 
1554 	return 0;
1555 }
1556 
1557 /*
1558  * A test that tries to allocate a memory region that spans over the min_addr
1559  * and max_addr range and overlaps with two different nodes, where the second
1560  * node is the requested node:
1561  *
1562  *                               min_addr
1563  *                               |         max_addr
1564  *                               |         |
1565  *                               v         v
1566  *  |      +--------------------------+---------+                |
1567  *  |      |         expected         |requested|                |
1568  *  +------+--------------------------+---------+----------------+
1569  *                               +         +
1570  *  |                       +---------+                          |
1571  *  |                       |   rgn   |                          |
1572  *  +-----------------------+---------+--------------------------+
1573  *
1574  * Expect to drop the lower limit and allocate a memory region that
1575  * ends at the end of the first node that overlaps with the range.
1576  */
alloc_nid_top_down_numa_split_range_high_check(void)1577 static int alloc_nid_top_down_numa_split_range_high_check(void)
1578 {
1579 	int nid_req = 3;
1580 	int nid_exp = nid_req - 1;
1581 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1582 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1583 	void *allocated_ptr = NULL;
1584 	phys_addr_t size = SZ_512;
1585 	phys_addr_t min_addr;
1586 	phys_addr_t max_addr;
1587 	phys_addr_t exp_node_end;
1588 
1589 	PREFIX_PUSH();
1590 	setup_numa_memblock(node_fractions);
1591 
1592 	exp_node_end = region_end(exp_node);
1593 	min_addr = exp_node_end - SZ_256;
1594 	max_addr = min_addr + size;
1595 
1596 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1597 					       min_addr, max_addr, nid_req);
1598 
1599 	ASSERT_NE(allocated_ptr, NULL);
1600 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1601 
1602 	ASSERT_EQ(new_rgn->size, size);
1603 	ASSERT_EQ(new_rgn->base, exp_node_end - size);
1604 	ASSERT_LE(exp_node->base, new_rgn->base);
1605 
1606 	ASSERT_EQ(memblock.reserved.cnt, 1);
1607 	ASSERT_EQ(memblock.reserved.total_size, size);
1608 
1609 	test_pass_pop();
1610 
1611 	return 0;
1612 }
1613 
1614 /*
1615  * A test that tries to allocate a memory region that spans over the min_addr
1616  * and max_addr range and overlaps with two different nodes, where the requested
1617  * node ends before min_addr:
1618  *
1619  *                                         min_addr
1620  *                                         |         max_addr
1621  *                                         |         |
1622  *                                         v         v
1623  *  |    +---------------+        +-------------+---------+          |
1624  *  |    |   requested   |        |    node1    |  node2  |          |
1625  *  +----+---------------+--------+-------------+---------+----------+
1626  *                                         +         +
1627  *  |          +---------+                                           |
1628  *  |          |   rgn   |                                           |
1629  *  +----------+---------+-------------------------------------------+
1630  *
1631  * Expect to drop the lower limit and allocate a memory region that ends at
1632  * the end of the requested node.
1633  */
alloc_nid_top_down_numa_no_overlap_split_check(void)1634 static int alloc_nid_top_down_numa_no_overlap_split_check(void)
1635 {
1636 	int nid_req = 2;
1637 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1638 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1639 	struct memblock_region *node2 = &memblock.memory.regions[6];
1640 	void *allocated_ptr = NULL;
1641 	phys_addr_t size;
1642 	phys_addr_t min_addr;
1643 	phys_addr_t max_addr;
1644 
1645 	PREFIX_PUSH();
1646 	setup_numa_memblock(node_fractions);
1647 
1648 	size = SZ_512;
1649 	min_addr = node2->base - SZ_256;
1650 	max_addr = min_addr + size;
1651 
1652 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1653 					       min_addr, max_addr, nid_req);
1654 
1655 	ASSERT_NE(allocated_ptr, NULL);
1656 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1657 
1658 	ASSERT_EQ(new_rgn->size, size);
1659 	ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
1660 	ASSERT_LE(req_node->base, new_rgn->base);
1661 
1662 	ASSERT_EQ(memblock.reserved.cnt, 1);
1663 	ASSERT_EQ(memblock.reserved.total_size, size);
1664 
1665 	test_pass_pop();
1666 
1667 	return 0;
1668 }
1669 
1670 /*
1671  * A test that tries to allocate memory within min_addr and max_add range when
1672  * the requested node and the range do not overlap, and requested node ends
1673  * before min_addr. The range overlaps with multiple nodes along node
1674  * boundaries:
1675  *
1676  *                          min_addr
1677  *                          |                                 max_addr
1678  *                          |                                 |
1679  *                          v                                 v
1680  *  |-----------+           +----------+----...----+----------+      |
1681  *  | requested |           | min node |    ...    | max node |      |
1682  *  +-----------+-----------+----------+----...----+----------+------+
1683  *                          +                                 +
1684  *  |                                                   +-----+      |
1685  *  |                                                   | rgn |      |
1686  *  +---------------------------------------------------+-----+------+
1687  *
1688  * Expect to allocate a memory region at the end of the final node in
1689  * the range after falling back to NUMA_NO_NODE.
1690  */
alloc_nid_top_down_numa_no_overlap_low_check(void)1691 static int alloc_nid_top_down_numa_no_overlap_low_check(void)
1692 {
1693 	int nid_req = 0;
1694 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1695 	struct memblock_region *min_node = &memblock.memory.regions[2];
1696 	struct memblock_region *max_node = &memblock.memory.regions[5];
1697 	void *allocated_ptr = NULL;
1698 	phys_addr_t size = SZ_64;
1699 	phys_addr_t max_addr;
1700 	phys_addr_t min_addr;
1701 
1702 	PREFIX_PUSH();
1703 	setup_numa_memblock(node_fractions);
1704 
1705 	min_addr = min_node->base;
1706 	max_addr = region_end(max_node);
1707 
1708 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1709 					       min_addr, max_addr, nid_req);
1710 
1711 	ASSERT_NE(allocated_ptr, NULL);
1712 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1713 
1714 	ASSERT_EQ(new_rgn->size, size);
1715 	ASSERT_EQ(new_rgn->base, max_addr - size);
1716 	ASSERT_LE(max_node->base, new_rgn->base);
1717 
1718 	ASSERT_EQ(memblock.reserved.cnt, 1);
1719 	ASSERT_EQ(memblock.reserved.total_size, size);
1720 
1721 	test_pass_pop();
1722 
1723 	return 0;
1724 }
1725 
1726 /*
1727  * A test that tries to allocate memory within min_addr and max_add range when
1728  * the requested node and the range do not overlap, and requested node starts
1729  * after max_addr. The range overlaps with multiple nodes along node
1730  * boundaries:
1731  *
1732  *        min_addr
1733  *        |                                 max_addr
1734  *        |                                 |
1735  *        v                                 v
1736  *  |     +----------+----...----+----------+        +-----------+   |
1737  *  |     | min node |    ...    | max node |        | requested |   |
1738  *  +-----+----------+----...----+----------+--------+-----------+---+
1739  *        +                                 +
1740  *  |                                 +-----+                        |
1741  *  |                                 | rgn |                        |
1742  *  +---------------------------------+-----+------------------------+
1743  *
1744  * Expect to allocate a memory region at the end of the final node in
1745  * the range after falling back to NUMA_NO_NODE.
1746  */
alloc_nid_top_down_numa_no_overlap_high_check(void)1747 static int alloc_nid_top_down_numa_no_overlap_high_check(void)
1748 {
1749 	int nid_req = 7;
1750 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1751 	struct memblock_region *min_node = &memblock.memory.regions[2];
1752 	struct memblock_region *max_node = &memblock.memory.regions[5];
1753 	void *allocated_ptr = NULL;
1754 	phys_addr_t size = SZ_64;
1755 	phys_addr_t max_addr;
1756 	phys_addr_t min_addr;
1757 
1758 	PREFIX_PUSH();
1759 	setup_numa_memblock(node_fractions);
1760 
1761 	min_addr = min_node->base;
1762 	max_addr = region_end(max_node);
1763 
1764 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1765 					       min_addr, max_addr, nid_req);
1766 
1767 	ASSERT_NE(allocated_ptr, NULL);
1768 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1769 
1770 	ASSERT_EQ(new_rgn->size, size);
1771 	ASSERT_EQ(new_rgn->base, max_addr - size);
1772 	ASSERT_LE(max_node->base, new_rgn->base);
1773 
1774 	ASSERT_EQ(memblock.reserved.cnt, 1);
1775 	ASSERT_EQ(memblock.reserved.total_size, size);
1776 
1777 	test_pass_pop();
1778 
1779 	return 0;
1780 }
1781 
1782 /*
1783  * A test that tries to allocate a memory region in a specific NUMA node that
1784  * has enough memory to allocate a region of the requested size.
1785  * Expect to allocate an aligned region at the beginning of the requested node.
1786  */
alloc_nid_bottom_up_numa_simple_check(void)1787 static int alloc_nid_bottom_up_numa_simple_check(void)
1788 {
1789 	int nid_req = 3;
1790 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1791 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1792 	void *allocated_ptr = NULL;
1793 	phys_addr_t size;
1794 	phys_addr_t min_addr;
1795 	phys_addr_t max_addr;
1796 
1797 	PREFIX_PUSH();
1798 	setup_numa_memblock(node_fractions);
1799 
1800 	ASSERT_LE(SZ_4, req_node->size);
1801 	size = req_node->size / SZ_4;
1802 	min_addr = memblock_start_of_DRAM();
1803 	max_addr = memblock_end_of_DRAM();
1804 
1805 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1806 					       min_addr, max_addr, nid_req);
1807 
1808 	ASSERT_NE(allocated_ptr, NULL);
1809 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1810 
1811 	ASSERT_EQ(new_rgn->size, size);
1812 	ASSERT_EQ(new_rgn->base, req_node->base);
1813 	ASSERT_LE(region_end(new_rgn), region_end(req_node));
1814 
1815 	ASSERT_EQ(memblock.reserved.cnt, 1);
1816 	ASSERT_EQ(memblock.reserved.total_size, size);
1817 
1818 	test_pass_pop();
1819 
1820 	return 0;
1821 }
1822 
1823 /*
1824  * A test that tries to allocate a memory region in a specific NUMA node that
1825  * does not have enough memory to allocate a region of the requested size:
1826  *
1827  *  |----------------------+-----+                |
1828  *  |       expected       | req |                |
1829  *  +----------------------+-----+----------------+
1830  *
1831  *  |---------+                                   |
1832  *  |   rgn   |                                   |
1833  *  +---------+-----------------------------------+
1834  *
1835  * Expect to allocate an aligned region at the beginning of the first node that
1836  * has enough memory (in this case, nid = 0) after falling back to NUMA_NO_NODE.
1837  */
alloc_nid_bottom_up_numa_small_node_check(void)1838 static int alloc_nid_bottom_up_numa_small_node_check(void)
1839 {
1840 	int nid_req = 1;
1841 	int nid_exp = 0;
1842 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1843 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1844 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1845 	void *allocated_ptr = NULL;
1846 	phys_addr_t size;
1847 	phys_addr_t min_addr;
1848 	phys_addr_t max_addr;
1849 
1850 	PREFIX_PUSH();
1851 	setup_numa_memblock(node_fractions);
1852 
1853 	size = SZ_2 * req_node->size;
1854 	min_addr = memblock_start_of_DRAM();
1855 	max_addr = memblock_end_of_DRAM();
1856 
1857 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1858 					       min_addr, max_addr, nid_req);
1859 
1860 	ASSERT_NE(allocated_ptr, NULL);
1861 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1862 
1863 	ASSERT_EQ(new_rgn->size, size);
1864 	ASSERT_EQ(new_rgn->base, exp_node->base);
1865 	ASSERT_LE(region_end(new_rgn), region_end(exp_node));
1866 
1867 	ASSERT_EQ(memblock.reserved.cnt, 1);
1868 	ASSERT_EQ(memblock.reserved.total_size, size);
1869 
1870 	test_pass_pop();
1871 
1872 	return 0;
1873 }
1874 
1875 /*
1876  * A test that tries to allocate a memory region in a specific NUMA node that
1877  * is fully reserved:
1878  *
1879  *  |----------------------+     +-----------+                    |
1880  *  |       expected       |     | requested |                    |
1881  *  +----------------------+-----+-----------+--------------------+
1882  *
1883  *  |-----------+                +-----------+                    |
1884  *  |    new    |                |  reserved |                    |
1885  *  +-----------+----------------+-----------+--------------------+
1886  *
1887  * Expect to allocate an aligned region at the beginning of the first node that
1888  * is large enough and has enough unreserved memory (in this case, nid = 0)
1889  * after falling back to NUMA_NO_NODE. The region count and total size get
1890  * updated.
1891  */
alloc_nid_bottom_up_numa_node_reserved_check(void)1892 static int alloc_nid_bottom_up_numa_node_reserved_check(void)
1893 {
1894 	int nid_req = 2;
1895 	int nid_exp = 0;
1896 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1897 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1898 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1899 	void *allocated_ptr = NULL;
1900 	phys_addr_t size;
1901 	phys_addr_t min_addr;
1902 	phys_addr_t max_addr;
1903 
1904 	PREFIX_PUSH();
1905 	setup_numa_memblock(node_fractions);
1906 
1907 	size = req_node->size;
1908 	min_addr = memblock_start_of_DRAM();
1909 	max_addr = memblock_end_of_DRAM();
1910 
1911 	memblock_reserve(req_node->base, req_node->size);
1912 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1913 					       min_addr, max_addr, nid_req);
1914 
1915 	ASSERT_NE(allocated_ptr, NULL);
1916 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1917 
1918 	ASSERT_EQ(new_rgn->size, size);
1919 	ASSERT_EQ(new_rgn->base, exp_node->base);
1920 	ASSERT_LE(region_end(new_rgn), region_end(exp_node));
1921 
1922 	ASSERT_EQ(memblock.reserved.cnt, 2);
1923 	ASSERT_EQ(memblock.reserved.total_size, size + req_node->size);
1924 
1925 	test_pass_pop();
1926 
1927 	return 0;
1928 }
1929 
1930 /*
1931  * A test that tries to allocate a memory region in a specific NUMA node that
1932  * is partially reserved but has enough memory for the allocated region:
1933  *
1934  *  |           +---------------------------------------+         |
1935  *  |           |               requested               |         |
1936  *  +-----------+---------------------------------------+---------+
1937  *
1938  *  |           +------------------+-----+                        |
1939  *  |           |     reserved     | new |                        |
1940  *  +-----------+------------------+-----+------------------------+
1941  *
1942  * Expect to allocate an aligned region in the requested node that merges with
1943  * the existing reserved region. The total size gets updated.
1944  */
alloc_nid_bottom_up_numa_part_reserved_check(void)1945 static int alloc_nid_bottom_up_numa_part_reserved_check(void)
1946 {
1947 	int nid_req = 4;
1948 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1949 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1950 	void *allocated_ptr = NULL;
1951 	struct region r1;
1952 	phys_addr_t size;
1953 	phys_addr_t min_addr;
1954 	phys_addr_t max_addr;
1955 	phys_addr_t total_size;
1956 
1957 	PREFIX_PUSH();
1958 	setup_numa_memblock(node_fractions);
1959 
1960 	ASSERT_LE(SZ_8, req_node->size);
1961 	r1.base = req_node->base;
1962 	r1.size = req_node->size / SZ_2;
1963 	size = r1.size / SZ_4;
1964 	min_addr = memblock_start_of_DRAM();
1965 	max_addr = memblock_end_of_DRAM();
1966 	total_size = size + r1.size;
1967 
1968 	memblock_reserve(r1.base, r1.size);
1969 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1970 					       min_addr, max_addr, nid_req);
1971 
1972 	ASSERT_NE(allocated_ptr, NULL);
1973 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1974 
1975 	ASSERT_EQ(new_rgn->size, total_size);
1976 	ASSERT_EQ(new_rgn->base, req_node->base);
1977 	ASSERT_LE(region_end(new_rgn), region_end(req_node));
1978 
1979 	ASSERT_EQ(memblock.reserved.cnt, 1);
1980 	ASSERT_EQ(memblock.reserved.total_size, total_size);
1981 
1982 	test_pass_pop();
1983 
1984 	return 0;
1985 }
1986 
1987 /*
1988  * A test that tries to allocate a memory region in a specific NUMA node that
1989  * is partially reserved and does not have enough contiguous memory for the
1990  * allocated region:
1991  *
1992  *  |----------------------+       +-----------------------+         |
1993  *  |       expected       |       |       requested       |         |
1994  *  +----------------------+-------+-----------------------+---------+
1995  *
1996  *  |-----------+                        +----------+                |
1997  *  |    new    |                        | reserved |                |
1998  *  +-----------+------------------------+----------+----------------+
1999  *
2000  * Expect to allocate an aligned region at the beginning of the first
2001  * node that is large enough and has enough unreserved memory (in this case,
2002  * nid = 0) after falling back to NUMA_NO_NODE. The region count and total size
2003  * get updated.
2004  */
alloc_nid_bottom_up_numa_part_reserved_fallback_check(void)2005 static int alloc_nid_bottom_up_numa_part_reserved_fallback_check(void)
2006 {
2007 	int nid_req = 4;
2008 	int nid_exp = 0;
2009 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2010 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2011 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
2012 	void *allocated_ptr = NULL;
2013 	struct region r1;
2014 	phys_addr_t size;
2015 	phys_addr_t min_addr;
2016 	phys_addr_t max_addr;
2017 
2018 	PREFIX_PUSH();
2019 	setup_numa_memblock(node_fractions);
2020 
2021 	ASSERT_LE(SZ_4, req_node->size);
2022 	size = req_node->size / SZ_2;
2023 	r1.base = req_node->base + (size / SZ_2);
2024 	r1.size = size;
2025 
2026 	min_addr = memblock_start_of_DRAM();
2027 	max_addr = memblock_end_of_DRAM();
2028 
2029 	memblock_reserve(r1.base, r1.size);
2030 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2031 					       min_addr, max_addr, nid_req);
2032 
2033 	ASSERT_NE(allocated_ptr, NULL);
2034 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2035 
2036 	ASSERT_EQ(new_rgn->size, size);
2037 	ASSERT_EQ(new_rgn->base, exp_node->base);
2038 	ASSERT_LE(region_end(new_rgn), region_end(exp_node));
2039 
2040 	ASSERT_EQ(memblock.reserved.cnt, 2);
2041 	ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
2042 
2043 	test_pass_pop();
2044 
2045 	return 0;
2046 }
2047 
2048 /*
2049  * A test that tries to allocate a memory region that spans over the min_addr
2050  * and max_addr range and overlaps with two different nodes, where the first
2051  * node is the requested node:
2052  *
2053  *                                min_addr
2054  *                                |           max_addr
2055  *                                |           |
2056  *                                v           v
2057  *  |           +-----------------------+-----------+              |
2058  *  |           |       requested       |   node3   |              |
2059  *  +-----------+-----------------------+-----------+--------------+
2060  *                                +           +
2061  *  |           +-----------+                                      |
2062  *  |           |    rgn    |                                      |
2063  *  +-----------+-----------+--------------------------------------+
2064  *
2065  * Expect to drop the lower limit and allocate a memory region at the beginning
2066  * of the requested node.
2067  */
alloc_nid_bottom_up_numa_split_range_low_check(void)2068 static int alloc_nid_bottom_up_numa_split_range_low_check(void)
2069 {
2070 	int nid_req = 2;
2071 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2072 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2073 	void *allocated_ptr = NULL;
2074 	phys_addr_t size = SZ_512;
2075 	phys_addr_t min_addr;
2076 	phys_addr_t max_addr;
2077 	phys_addr_t req_node_end;
2078 
2079 	PREFIX_PUSH();
2080 	setup_numa_memblock(node_fractions);
2081 
2082 	req_node_end = region_end(req_node);
2083 	min_addr = req_node_end - SZ_256;
2084 	max_addr = min_addr + size;
2085 
2086 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2087 					       min_addr, max_addr, nid_req);
2088 
2089 	ASSERT_NE(allocated_ptr, NULL);
2090 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2091 
2092 	ASSERT_EQ(new_rgn->size, size);
2093 	ASSERT_EQ(new_rgn->base, req_node->base);
2094 	ASSERT_LE(region_end(new_rgn), req_node_end);
2095 
2096 	ASSERT_EQ(memblock.reserved.cnt, 1);
2097 	ASSERT_EQ(memblock.reserved.total_size, size);
2098 
2099 	test_pass_pop();
2100 
2101 	return 0;
2102 }
2103 
2104 /*
2105  * A test that tries to allocate a memory region that spans over the min_addr
2106  * and max_addr range and overlaps with two different nodes, where the second
2107  * node is the requested node:
2108  *
2109  *                                                min_addr
2110  *                                                |         max_addr
2111  *                                                |         |
2112  *                                                v         v
2113  *  |------------------+        +----------------------+---------+      |
2114  *  |     expected     |        |       previous       |requested|      |
2115  *  +------------------+--------+----------------------+---------+------+
2116  *                                                +         +
2117  *  |---------+                                                         |
2118  *  |   rgn   |                                                         |
2119  *  +---------+---------------------------------------------------------+
2120  *
2121  * Expect to drop the lower limit and allocate a memory region at the beginning
2122  * of the first node that has enough memory.
2123  */
alloc_nid_bottom_up_numa_split_range_high_check(void)2124 static int alloc_nid_bottom_up_numa_split_range_high_check(void)
2125 {
2126 	int nid_req = 3;
2127 	int nid_exp = 0;
2128 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2129 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2130 	struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
2131 	void *allocated_ptr = NULL;
2132 	phys_addr_t size = SZ_512;
2133 	phys_addr_t min_addr;
2134 	phys_addr_t max_addr;
2135 	phys_addr_t exp_node_end;
2136 
2137 	PREFIX_PUSH();
2138 	setup_numa_memblock(node_fractions);
2139 
2140 	exp_node_end = region_end(req_node);
2141 	min_addr = req_node->base - SZ_256;
2142 	max_addr = min_addr + size;
2143 
2144 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2145 					       min_addr, max_addr, nid_req);
2146 
2147 	ASSERT_NE(allocated_ptr, NULL);
2148 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2149 
2150 	ASSERT_EQ(new_rgn->size, size);
2151 	ASSERT_EQ(new_rgn->base, exp_node->base);
2152 	ASSERT_LE(region_end(new_rgn), exp_node_end);
2153 
2154 	ASSERT_EQ(memblock.reserved.cnt, 1);
2155 	ASSERT_EQ(memblock.reserved.total_size, size);
2156 
2157 	test_pass_pop();
2158 
2159 	return 0;
2160 }
2161 
2162 /*
2163  * A test that tries to allocate a memory region that spans over the min_addr
2164  * and max_addr range and overlaps with two different nodes, where the requested
2165  * node ends before min_addr:
2166  *
2167  *                                          min_addr
2168  *                                         |         max_addr
2169  *                                         |         |
2170  *                                         v         v
2171  *  |    +---------------+        +-------------+---------+         |
2172  *  |    |   requested   |        |    node1    |  node2  |         |
2173  *  +----+---------------+--------+-------------+---------+---------+
2174  *                                         +         +
2175  *  |    +---------+                                                |
2176  *  |    |   rgn   |                                                |
2177  *  +----+---------+------------------------------------------------+
2178  *
2179  * Expect to drop the lower limit and allocate a memory region that starts at
2180  * the beginning of the requested node.
2181  */
alloc_nid_bottom_up_numa_no_overlap_split_check(void)2182 static int alloc_nid_bottom_up_numa_no_overlap_split_check(void)
2183 {
2184 	int nid_req = 2;
2185 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2186 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2187 	struct memblock_region *node2 = &memblock.memory.regions[6];
2188 	void *allocated_ptr = NULL;
2189 	phys_addr_t size;
2190 	phys_addr_t min_addr;
2191 	phys_addr_t max_addr;
2192 
2193 	PREFIX_PUSH();
2194 	setup_numa_memblock(node_fractions);
2195 
2196 	size = SZ_512;
2197 	min_addr = node2->base - SZ_256;
2198 	max_addr = min_addr + size;
2199 
2200 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2201 					       min_addr, max_addr, nid_req);
2202 
2203 	ASSERT_NE(allocated_ptr, NULL);
2204 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2205 
2206 	ASSERT_EQ(new_rgn->size, size);
2207 	ASSERT_EQ(new_rgn->base, req_node->base);
2208 	ASSERT_LE(region_end(new_rgn), region_end(req_node));
2209 
2210 	ASSERT_EQ(memblock.reserved.cnt, 1);
2211 	ASSERT_EQ(memblock.reserved.total_size, size);
2212 
2213 	test_pass_pop();
2214 
2215 	return 0;
2216 }
2217 
2218 /*
2219  * A test that tries to allocate memory within min_addr and max_add range when
2220  * the requested node and the range do not overlap, and requested node ends
2221  * before min_addr. The range overlaps with multiple nodes along node
2222  * boundaries:
2223  *
2224  *                          min_addr
2225  *                          |                                 max_addr
2226  *                          |                                 |
2227  *                          v                                 v
2228  *  |-----------+           +----------+----...----+----------+      |
2229  *  | requested |           | min node |    ...    | max node |      |
2230  *  +-----------+-----------+----------+----...----+----------+------+
2231  *                          +                                 +
2232  *  |                       +-----+                                  |
2233  *  |                       | rgn |                                  |
2234  *  +-----------------------+-----+----------------------------------+
2235  *
2236  * Expect to allocate a memory region at the beginning of the first node
2237  * in the range after falling back to NUMA_NO_NODE.
2238  */
alloc_nid_bottom_up_numa_no_overlap_low_check(void)2239 static int alloc_nid_bottom_up_numa_no_overlap_low_check(void)
2240 {
2241 	int nid_req = 0;
2242 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2243 	struct memblock_region *min_node = &memblock.memory.regions[2];
2244 	struct memblock_region *max_node = &memblock.memory.regions[5];
2245 	void *allocated_ptr = NULL;
2246 	phys_addr_t size = SZ_64;
2247 	phys_addr_t max_addr;
2248 	phys_addr_t min_addr;
2249 
2250 	PREFIX_PUSH();
2251 	setup_numa_memblock(node_fractions);
2252 
2253 	min_addr = min_node->base;
2254 	max_addr = region_end(max_node);
2255 
2256 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2257 					       min_addr, max_addr, nid_req);
2258 
2259 	ASSERT_NE(allocated_ptr, NULL);
2260 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2261 
2262 	ASSERT_EQ(new_rgn->size, size);
2263 	ASSERT_EQ(new_rgn->base, min_addr);
2264 	ASSERT_LE(region_end(new_rgn), region_end(min_node));
2265 
2266 	ASSERT_EQ(memblock.reserved.cnt, 1);
2267 	ASSERT_EQ(memblock.reserved.total_size, size);
2268 
2269 	test_pass_pop();
2270 
2271 	return 0;
2272 }
2273 
2274 /*
2275  * A test that tries to allocate memory within min_addr and max_add range when
2276  * the requested node and the range do not overlap, and requested node starts
2277  * after max_addr. The range overlaps with multiple nodes along node
2278  * boundaries:
2279  *
2280  *        min_addr
2281  *        |                                 max_addr
2282  *        |                                 |
2283  *        v                                 v
2284  *  |     +----------+----...----+----------+         +---------+   |
2285  *  |     | min node |    ...    | max node |         |requested|   |
2286  *  +-----+----------+----...----+----------+---------+---------+---+
2287  *        +                                 +
2288  *  |     +-----+                                                   |
2289  *  |     | rgn |                                                   |
2290  *  +-----+-----+---------------------------------------------------+
2291  *
2292  * Expect to allocate a memory region at the beginning of the first node
2293  * in the range after falling back to NUMA_NO_NODE.
2294  */
alloc_nid_bottom_up_numa_no_overlap_high_check(void)2295 static int alloc_nid_bottom_up_numa_no_overlap_high_check(void)
2296 {
2297 	int nid_req = 7;
2298 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2299 	struct memblock_region *min_node = &memblock.memory.regions[2];
2300 	struct memblock_region *max_node = &memblock.memory.regions[5];
2301 	void *allocated_ptr = NULL;
2302 	phys_addr_t size = SZ_64;
2303 	phys_addr_t max_addr;
2304 	phys_addr_t min_addr;
2305 
2306 	PREFIX_PUSH();
2307 	setup_numa_memblock(node_fractions);
2308 
2309 	min_addr = min_node->base;
2310 	max_addr = region_end(max_node);
2311 
2312 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2313 					       min_addr, max_addr, nid_req);
2314 
2315 	ASSERT_NE(allocated_ptr, NULL);
2316 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2317 
2318 	ASSERT_EQ(new_rgn->size, size);
2319 	ASSERT_EQ(new_rgn->base, min_addr);
2320 	ASSERT_LE(region_end(new_rgn), region_end(min_node));
2321 
2322 	ASSERT_EQ(memblock.reserved.cnt, 1);
2323 	ASSERT_EQ(memblock.reserved.total_size, size);
2324 
2325 	test_pass_pop();
2326 
2327 	return 0;
2328 }
2329 
2330 /*
2331  * A test that tries to allocate a memory region in a specific NUMA node that
2332  * does not have enough memory to allocate a region of the requested size.
2333  * Additionally, none of the nodes have enough memory to allocate the region:
2334  *
2335  * +-----------------------------------+
2336  * |                new                |
2337  * +-----------------------------------+
2338  *     |-------+-------+-------+-------+-------+-------+-------+-------|
2339  *     | node0 | node1 | node2 | node3 | node4 | node5 | node6 | node7 |
2340  *     +-------+-------+-------+-------+-------+-------+-------+-------+
2341  *
2342  * Expect no allocation to happen.
2343  */
alloc_nid_numa_large_region_generic_check(void)2344 static int alloc_nid_numa_large_region_generic_check(void)
2345 {
2346 	int nid_req = 3;
2347 	void *allocated_ptr = NULL;
2348 	phys_addr_t size = MEM_SIZE / SZ_2;
2349 	phys_addr_t min_addr;
2350 	phys_addr_t max_addr;
2351 
2352 	PREFIX_PUSH();
2353 	setup_numa_memblock(node_fractions);
2354 
2355 	min_addr = memblock_start_of_DRAM();
2356 	max_addr = memblock_end_of_DRAM();
2357 
2358 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2359 					       min_addr, max_addr, nid_req);
2360 	ASSERT_EQ(allocated_ptr, NULL);
2361 
2362 	test_pass_pop();
2363 
2364 	return 0;
2365 }
2366 
2367 /*
2368  * A test that tries to allocate memory within min_addr and max_addr range when
2369  * there are two reserved regions at the borders. The requested node starts at
2370  * min_addr and ends at max_addr and is the same size as the region to be
2371  * allocated:
2372  *
2373  *                     min_addr
2374  *                     |                       max_addr
2375  *                     |                       |
2376  *                     v                       v
2377  *  |      +-----------+-----------------------+-----------------------|
2378  *  |      |   node5   |       requested       |         node7         |
2379  *  +------+-----------+-----------------------+-----------------------+
2380  *                     +                       +
2381  *  |             +----+-----------------------+----+                  |
2382  *  |             | r2 |          new          | r1 |                  |
2383  *  +-------------+----+-----------------------+----+------------------+
2384  *
2385  * Expect to merge all of the regions into one. The region counter and total
2386  * size fields get updated.
2387  */
alloc_nid_numa_reserved_full_merge_generic_check(void)2388 static int alloc_nid_numa_reserved_full_merge_generic_check(void)
2389 {
2390 	int nid_req = 6;
2391 	int nid_next = nid_req + 1;
2392 	struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2393 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2394 	struct memblock_region *next_node = &memblock.memory.regions[nid_next];
2395 	void *allocated_ptr = NULL;
2396 	struct region r1, r2;
2397 	phys_addr_t size = req_node->size;
2398 	phys_addr_t total_size;
2399 	phys_addr_t max_addr;
2400 	phys_addr_t min_addr;
2401 
2402 	PREFIX_PUSH();
2403 	setup_numa_memblock(node_fractions);
2404 
2405 	r1.base = next_node->base;
2406 	r1.size = SZ_128;
2407 
2408 	r2.size = SZ_128;
2409 	r2.base = r1.base - (size + r2.size);
2410 
2411 	total_size = r1.size + r2.size + size;
2412 	min_addr = r2.base + r2.size;
2413 	max_addr = r1.base;
2414 
2415 	memblock_reserve(r1.base, r1.size);
2416 	memblock_reserve(r2.base, r2.size);
2417 
2418 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2419 					       min_addr, max_addr, nid_req);
2420 
2421 	ASSERT_NE(allocated_ptr, NULL);
2422 	assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2423 
2424 	ASSERT_EQ(new_rgn->size, total_size);
2425 	ASSERT_EQ(new_rgn->base, r2.base);
2426 
2427 	ASSERT_LE(new_rgn->base, req_node->base);
2428 	ASSERT_LE(region_end(req_node), region_end(new_rgn));
2429 
2430 	ASSERT_EQ(memblock.reserved.cnt, 1);
2431 	ASSERT_EQ(memblock.reserved.total_size, total_size);
2432 
2433 	test_pass_pop();
2434 
2435 	return 0;
2436 }
2437 
2438 /*
2439  * A test that tries to allocate memory within min_addr and max_add range,
2440  * where the total range can fit the region, but it is split between two nodes
2441  * and everything else is reserved. Additionally, nid is set to NUMA_NO_NODE
2442  * instead of requesting a specific node:
2443  *
2444  *                         +-----------+
2445  *                         |    new    |
2446  *                         +-----------+
2447  *  |      +---------------------+-----------|
2448  *  |      |      prev node      | next node |
2449  *  +------+---------------------+-----------+
2450  *                         +           +
2451  *  |----------------------+           +-----|
2452  *  |          r1          |           |  r2 |
2453  *  +----------------------+-----------+-----+
2454  *                         ^           ^
2455  *                         |           |
2456  *                         |           max_addr
2457  *                         |
2458  *                         min_addr
2459  *
2460  * Expect no allocation to happen.
2461  */
alloc_nid_numa_split_all_reserved_generic_check(void)2462 static int alloc_nid_numa_split_all_reserved_generic_check(void)
2463 {
2464 	void *allocated_ptr = NULL;
2465 	struct memblock_region *next_node = &memblock.memory.regions[7];
2466 	struct region r1, r2;
2467 	phys_addr_t size = SZ_256;
2468 	phys_addr_t max_addr;
2469 	phys_addr_t min_addr;
2470 
2471 	PREFIX_PUSH();
2472 	setup_numa_memblock(node_fractions);
2473 
2474 	r2.base = next_node->base + SZ_128;
2475 	r2.size = memblock_end_of_DRAM() - r2.base;
2476 
2477 	r1.size = MEM_SIZE - (r2.size + size);
2478 	r1.base = memblock_start_of_DRAM();
2479 
2480 	min_addr = r1.base + r1.size;
2481 	max_addr = r2.base;
2482 
2483 	memblock_reserve(r1.base, r1.size);
2484 	memblock_reserve(r2.base, r2.size);
2485 
2486 	allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2487 					       min_addr, max_addr,
2488 					       NUMA_NO_NODE);
2489 
2490 	ASSERT_EQ(allocated_ptr, NULL);
2491 
2492 	test_pass_pop();
2493 
2494 	return 0;
2495 }
2496 
2497 /*
2498  * A simple test that tries to allocate a memory region through the
2499  * memblock_alloc_node() on a NUMA node with id `nid`. Expected to have the
2500  * correct NUMA node set for the new region.
2501  */
alloc_node_on_correct_nid(void)2502 static int alloc_node_on_correct_nid(void)
2503 {
2504 	int nid_req = 2;
2505 	void *allocated_ptr = NULL;
2506 #ifdef CONFIG_NUMA
2507 	struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2508 #endif
2509 	phys_addr_t size = SZ_512;
2510 
2511 	PREFIX_PUSH();
2512 	setup_numa_memblock(node_fractions);
2513 
2514 	allocated_ptr = memblock_alloc_node(size, SMP_CACHE_BYTES, nid_req);
2515 
2516 	ASSERT_NE(allocated_ptr, NULL);
2517 #ifdef CONFIG_NUMA
2518 	ASSERT_EQ(nid_req, req_node->nid);
2519 #endif
2520 
2521 	test_pass_pop();
2522 
2523 	return 0;
2524 }
2525 
2526 /* Test case wrappers for NUMA tests */
alloc_nid_numa_simple_check(void)2527 static int alloc_nid_numa_simple_check(void)
2528 {
2529 	test_print("\tRunning %s...\n", __func__);
2530 	memblock_set_bottom_up(false);
2531 	alloc_nid_top_down_numa_simple_check();
2532 	memblock_set_bottom_up(true);
2533 	alloc_nid_bottom_up_numa_simple_check();
2534 
2535 	return 0;
2536 }
2537 
alloc_nid_numa_small_node_check(void)2538 static int alloc_nid_numa_small_node_check(void)
2539 {
2540 	test_print("\tRunning %s...\n", __func__);
2541 	memblock_set_bottom_up(false);
2542 	alloc_nid_top_down_numa_small_node_check();
2543 	memblock_set_bottom_up(true);
2544 	alloc_nid_bottom_up_numa_small_node_check();
2545 
2546 	return 0;
2547 }
2548 
alloc_nid_numa_node_reserved_check(void)2549 static int alloc_nid_numa_node_reserved_check(void)
2550 {
2551 	test_print("\tRunning %s...\n", __func__);
2552 	memblock_set_bottom_up(false);
2553 	alloc_nid_top_down_numa_node_reserved_check();
2554 	memblock_set_bottom_up(true);
2555 	alloc_nid_bottom_up_numa_node_reserved_check();
2556 
2557 	return 0;
2558 }
2559 
alloc_nid_numa_part_reserved_check(void)2560 static int alloc_nid_numa_part_reserved_check(void)
2561 {
2562 	test_print("\tRunning %s...\n", __func__);
2563 	memblock_set_bottom_up(false);
2564 	alloc_nid_top_down_numa_part_reserved_check();
2565 	memblock_set_bottom_up(true);
2566 	alloc_nid_bottom_up_numa_part_reserved_check();
2567 
2568 	return 0;
2569 }
2570 
alloc_nid_numa_part_reserved_fallback_check(void)2571 static int alloc_nid_numa_part_reserved_fallback_check(void)
2572 {
2573 	test_print("\tRunning %s...\n", __func__);
2574 	memblock_set_bottom_up(false);
2575 	alloc_nid_top_down_numa_part_reserved_fallback_check();
2576 	memblock_set_bottom_up(true);
2577 	alloc_nid_bottom_up_numa_part_reserved_fallback_check();
2578 
2579 	return 0;
2580 }
2581 
alloc_nid_numa_split_range_low_check(void)2582 static int alloc_nid_numa_split_range_low_check(void)
2583 {
2584 	test_print("\tRunning %s...\n", __func__);
2585 	memblock_set_bottom_up(false);
2586 	alloc_nid_top_down_numa_split_range_low_check();
2587 	memblock_set_bottom_up(true);
2588 	alloc_nid_bottom_up_numa_split_range_low_check();
2589 
2590 	return 0;
2591 }
2592 
alloc_nid_numa_split_range_high_check(void)2593 static int alloc_nid_numa_split_range_high_check(void)
2594 {
2595 	test_print("\tRunning %s...\n", __func__);
2596 	memblock_set_bottom_up(false);
2597 	alloc_nid_top_down_numa_split_range_high_check();
2598 	memblock_set_bottom_up(true);
2599 	alloc_nid_bottom_up_numa_split_range_high_check();
2600 
2601 	return 0;
2602 }
2603 
alloc_nid_numa_no_overlap_split_check(void)2604 static int alloc_nid_numa_no_overlap_split_check(void)
2605 {
2606 	test_print("\tRunning %s...\n", __func__);
2607 	memblock_set_bottom_up(false);
2608 	alloc_nid_top_down_numa_no_overlap_split_check();
2609 	memblock_set_bottom_up(true);
2610 	alloc_nid_bottom_up_numa_no_overlap_split_check();
2611 
2612 	return 0;
2613 }
2614 
alloc_nid_numa_no_overlap_low_check(void)2615 static int alloc_nid_numa_no_overlap_low_check(void)
2616 {
2617 	test_print("\tRunning %s...\n", __func__);
2618 	memblock_set_bottom_up(false);
2619 	alloc_nid_top_down_numa_no_overlap_low_check();
2620 	memblock_set_bottom_up(true);
2621 	alloc_nid_bottom_up_numa_no_overlap_low_check();
2622 
2623 	return 0;
2624 }
2625 
alloc_nid_numa_no_overlap_high_check(void)2626 static int alloc_nid_numa_no_overlap_high_check(void)
2627 {
2628 	test_print("\tRunning %s...\n", __func__);
2629 	memblock_set_bottom_up(false);
2630 	alloc_nid_top_down_numa_no_overlap_high_check();
2631 	memblock_set_bottom_up(true);
2632 	alloc_nid_bottom_up_numa_no_overlap_high_check();
2633 
2634 	return 0;
2635 }
2636 
alloc_nid_numa_large_region_check(void)2637 static int alloc_nid_numa_large_region_check(void)
2638 {
2639 	test_print("\tRunning %s...\n", __func__);
2640 	run_top_down(alloc_nid_numa_large_region_generic_check);
2641 	run_bottom_up(alloc_nid_numa_large_region_generic_check);
2642 
2643 	return 0;
2644 }
2645 
alloc_nid_numa_reserved_full_merge_check(void)2646 static int alloc_nid_numa_reserved_full_merge_check(void)
2647 {
2648 	test_print("\tRunning %s...\n", __func__);
2649 	run_top_down(alloc_nid_numa_reserved_full_merge_generic_check);
2650 	run_bottom_up(alloc_nid_numa_reserved_full_merge_generic_check);
2651 
2652 	return 0;
2653 }
2654 
alloc_nid_numa_split_all_reserved_check(void)2655 static int alloc_nid_numa_split_all_reserved_check(void)
2656 {
2657 	test_print("\tRunning %s...\n", __func__);
2658 	run_top_down(alloc_nid_numa_split_all_reserved_generic_check);
2659 	run_bottom_up(alloc_nid_numa_split_all_reserved_generic_check);
2660 
2661 	return 0;
2662 }
2663 
alloc_node_numa_on_correct_nid(void)2664 static int alloc_node_numa_on_correct_nid(void)
2665 {
2666 	test_print("\tRunning %s...\n", __func__);
2667 	run_top_down(alloc_node_on_correct_nid);
2668 	run_bottom_up(alloc_node_on_correct_nid);
2669 
2670 	return 0;
2671 }
2672 
__memblock_alloc_nid_numa_checks(void)2673 int __memblock_alloc_nid_numa_checks(void)
2674 {
2675 	test_print("Running %s NUMA tests...\n",
2676 		   get_memblock_alloc_nid_name(alloc_nid_test_flags));
2677 
2678 	alloc_nid_numa_simple_check();
2679 	alloc_nid_numa_small_node_check();
2680 	alloc_nid_numa_node_reserved_check();
2681 	alloc_nid_numa_part_reserved_check();
2682 	alloc_nid_numa_part_reserved_fallback_check();
2683 	alloc_nid_numa_split_range_low_check();
2684 	alloc_nid_numa_split_range_high_check();
2685 
2686 	alloc_nid_numa_no_overlap_split_check();
2687 	alloc_nid_numa_no_overlap_low_check();
2688 	alloc_nid_numa_no_overlap_high_check();
2689 	alloc_nid_numa_large_region_check();
2690 	alloc_nid_numa_reserved_full_merge_check();
2691 	alloc_nid_numa_split_all_reserved_check();
2692 
2693 	alloc_node_numa_on_correct_nid();
2694 
2695 	return 0;
2696 }
2697 
memblock_alloc_nid_checks_internal(int flags)2698 static int memblock_alloc_nid_checks_internal(int flags)
2699 {
2700 	alloc_nid_test_flags = flags;
2701 
2702 	prefix_reset();
2703 	prefix_push(get_memblock_alloc_nid_name(flags));
2704 
2705 	reset_memblock_attributes();
2706 	dummy_physical_memory_init();
2707 
2708 	memblock_alloc_nid_range_checks();
2709 	memblock_alloc_nid_numa_checks();
2710 
2711 	dummy_physical_memory_cleanup();
2712 
2713 	prefix_pop();
2714 
2715 	return 0;
2716 }
2717 
memblock_alloc_nid_checks(void)2718 int memblock_alloc_nid_checks(void)
2719 {
2720 	memblock_alloc_nid_checks_internal(TEST_F_NONE);
2721 	memblock_alloc_nid_checks_internal(TEST_F_RAW);
2722 
2723 	return 0;
2724 }
2725 
memblock_alloc_exact_nid_range_checks(void)2726 int memblock_alloc_exact_nid_range_checks(void)
2727 {
2728 	alloc_nid_test_flags = (TEST_F_RAW | TEST_F_EXACT);
2729 
2730 	memblock_alloc_nid_range_checks();
2731 
2732 	return 0;
2733 }
2734