1 // SPDX-License-Identifier: GPL-2.0-or-later 2 #include <string.h> 3 #include <linux/memblock.h> 4 #include "basic_api.h" 5 6 #define EXPECTED_MEMBLOCK_REGIONS 128 7 #define FUNC_ADD "memblock_add" 8 #define FUNC_RESERVE "memblock_reserve" 9 #define FUNC_REMOVE "memblock_remove" 10 #define FUNC_FREE "memblock_free" 11 12 static int memblock_initialization_check(void) 13 { 14 PREFIX_PUSH(); 15 16 ASSERT_NE(memblock.memory.regions, NULL); 17 ASSERT_EQ(memblock.memory.cnt, 1); 18 ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS); 19 ASSERT_EQ(strcmp(memblock.memory.name, "memory"), 0); 20 21 ASSERT_NE(memblock.reserved.regions, NULL); 22 ASSERT_EQ(memblock.reserved.cnt, 1); 23 ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS); 24 ASSERT_EQ(strcmp(memblock.reserved.name, "reserved"), 0); 25 26 ASSERT_EQ(memblock.bottom_up, false); 27 ASSERT_EQ(memblock.current_limit, MEMBLOCK_ALLOC_ANYWHERE); 28 29 test_pass_pop(); 30 31 return 0; 32 } 33 34 /* 35 * A simple test that adds a memory block of a specified base address 36 * and size to the collection of available memory regions (memblock.memory). 37 * Expect to create a new entry. The region counter and total memory get 38 * updated. 39 */ 40 static int memblock_add_simple_check(void) 41 { 42 struct memblock_region *rgn; 43 44 rgn = &memblock.memory.regions[0]; 45 46 struct region r = { 47 .base = SZ_1G, 48 .size = SZ_4M 49 }; 50 51 PREFIX_PUSH(); 52 53 reset_memblock_regions(); 54 memblock_add(r.base, r.size); 55 56 ASSERT_EQ(rgn->base, r.base); 57 ASSERT_EQ(rgn->size, r.size); 58 59 ASSERT_EQ(memblock.memory.cnt, 1); 60 ASSERT_EQ(memblock.memory.total_size, r.size); 61 62 test_pass_pop(); 63 64 return 0; 65 } 66 67 /* 68 * A simple test that adds a memory block of a specified base address, size, 69 * NUMA node and memory flags to the collection of available memory regions. 70 * Expect to create a new entry. The region counter and total memory get 71 * updated. 72 */ 73 static int memblock_add_node_simple_check(void) 74 { 75 struct memblock_region *rgn; 76 77 rgn = &memblock.memory.regions[0]; 78 79 struct region r = { 80 .base = SZ_1M, 81 .size = SZ_16M 82 }; 83 84 PREFIX_PUSH(); 85 86 reset_memblock_regions(); 87 memblock_add_node(r.base, r.size, 1, MEMBLOCK_HOTPLUG); 88 89 ASSERT_EQ(rgn->base, r.base); 90 ASSERT_EQ(rgn->size, r.size); 91 #ifdef CONFIG_NUMA 92 ASSERT_EQ(rgn->nid, 1); 93 #endif 94 ASSERT_EQ(rgn->flags, MEMBLOCK_HOTPLUG); 95 96 ASSERT_EQ(memblock.memory.cnt, 1); 97 ASSERT_EQ(memblock.memory.total_size, r.size); 98 99 test_pass_pop(); 100 101 return 0; 102 } 103 104 /* 105 * A test that tries to add two memory blocks that don't overlap with one 106 * another: 107 * 108 * | +--------+ +--------+ | 109 * | | r1 | | r2 | | 110 * +--------+--------+--------+--------+--+ 111 * 112 * Expect to add two correctly initialized entries to the collection of 113 * available memory regions (memblock.memory). The total size and 114 * region counter fields get updated. 115 */ 116 static int memblock_add_disjoint_check(void) 117 { 118 struct memblock_region *rgn1, *rgn2; 119 120 rgn1 = &memblock.memory.regions[0]; 121 rgn2 = &memblock.memory.regions[1]; 122 123 struct region r1 = { 124 .base = SZ_1G, 125 .size = SZ_8K 126 }; 127 struct region r2 = { 128 .base = SZ_1G + SZ_16K, 129 .size = SZ_8K 130 }; 131 132 PREFIX_PUSH(); 133 134 reset_memblock_regions(); 135 memblock_add(r1.base, r1.size); 136 memblock_add(r2.base, r2.size); 137 138 ASSERT_EQ(rgn1->base, r1.base); 139 ASSERT_EQ(rgn1->size, r1.size); 140 141 ASSERT_EQ(rgn2->base, r2.base); 142 ASSERT_EQ(rgn2->size, r2.size); 143 144 ASSERT_EQ(memblock.memory.cnt, 2); 145 ASSERT_EQ(memblock.memory.total_size, r1.size + r2.size); 146 147 test_pass_pop(); 148 149 return 0; 150 } 151 152 /* 153 * A test that tries to add two memory blocks r1 and r2, where r2 overlaps 154 * with the beginning of r1 (that is r1.base < r2.base + r2.size): 155 * 156 * | +----+----+------------+ | 157 * | | |r2 | r1 | | 158 * +----+----+----+------------+----------+ 159 * ^ ^ 160 * | | 161 * | r1.base 162 * | 163 * r2.base 164 * 165 * Expect to merge the two entries into one region that starts at r2.base 166 * and has size of two regions minus their intersection. The total size of 167 * the available memory is updated, and the region counter stays the same. 168 */ 169 static int memblock_add_overlap_top_check(void) 170 { 171 struct memblock_region *rgn; 172 phys_addr_t total_size; 173 174 rgn = &memblock.memory.regions[0]; 175 176 struct region r1 = { 177 .base = SZ_512M, 178 .size = SZ_1G 179 }; 180 struct region r2 = { 181 .base = SZ_256M, 182 .size = SZ_512M 183 }; 184 185 PREFIX_PUSH(); 186 187 total_size = (r1.base - r2.base) + r1.size; 188 189 reset_memblock_regions(); 190 memblock_add(r1.base, r1.size); 191 memblock_add(r2.base, r2.size); 192 193 ASSERT_EQ(rgn->base, r2.base); 194 ASSERT_EQ(rgn->size, total_size); 195 196 ASSERT_EQ(memblock.memory.cnt, 1); 197 ASSERT_EQ(memblock.memory.total_size, total_size); 198 199 test_pass_pop(); 200 201 return 0; 202 } 203 204 /* 205 * A test that tries to add two memory blocks r1 and r2, where r2 overlaps 206 * with the end of r1 (that is r2.base < r1.base + r1.size): 207 * 208 * | +--+------+----------+ | 209 * | | | r1 | r2 | | 210 * +--+--+------+----------+--------------+ 211 * ^ ^ 212 * | | 213 * | r2.base 214 * | 215 * r1.base 216 * 217 * Expect to merge the two entries into one region that starts at r1.base 218 * and has size of two regions minus their intersection. The total size of 219 * the available memory is updated, and the region counter stays the same. 220 */ 221 static int memblock_add_overlap_bottom_check(void) 222 { 223 struct memblock_region *rgn; 224 phys_addr_t total_size; 225 226 rgn = &memblock.memory.regions[0]; 227 228 struct region r1 = { 229 .base = SZ_128M, 230 .size = SZ_512M 231 }; 232 struct region r2 = { 233 .base = SZ_256M, 234 .size = SZ_1G 235 }; 236 237 PREFIX_PUSH(); 238 239 total_size = (r2.base - r1.base) + r2.size; 240 241 reset_memblock_regions(); 242 memblock_add(r1.base, r1.size); 243 memblock_add(r2.base, r2.size); 244 245 ASSERT_EQ(rgn->base, r1.base); 246 ASSERT_EQ(rgn->size, total_size); 247 248 ASSERT_EQ(memblock.memory.cnt, 1); 249 ASSERT_EQ(memblock.memory.total_size, total_size); 250 251 test_pass_pop(); 252 253 return 0; 254 } 255 256 /* 257 * A test that tries to add two memory blocks r1 and r2, where r2 is 258 * within the range of r1 (that is r1.base < r2.base && 259 * r2.base + r2.size < r1.base + r1.size): 260 * 261 * | +-------+--+-----------------------+ 262 * | | |r2| r1 | 263 * +---+-------+--+-----------------------+ 264 * ^ 265 * | 266 * r1.base 267 * 268 * Expect to merge two entries into one region that stays the same. 269 * The counter and total size of available memory are not updated. 270 */ 271 static int memblock_add_within_check(void) 272 { 273 struct memblock_region *rgn; 274 275 rgn = &memblock.memory.regions[0]; 276 277 struct region r1 = { 278 .base = SZ_8M, 279 .size = SZ_32M 280 }; 281 struct region r2 = { 282 .base = SZ_16M, 283 .size = SZ_1M 284 }; 285 286 PREFIX_PUSH(); 287 288 reset_memblock_regions(); 289 memblock_add(r1.base, r1.size); 290 memblock_add(r2.base, r2.size); 291 292 ASSERT_EQ(rgn->base, r1.base); 293 ASSERT_EQ(rgn->size, r1.size); 294 295 ASSERT_EQ(memblock.memory.cnt, 1); 296 ASSERT_EQ(memblock.memory.total_size, r1.size); 297 298 test_pass_pop(); 299 300 return 0; 301 } 302 303 /* 304 * A simple test that tries to add the same memory block twice. Expect 305 * the counter and total size of available memory to not be updated. 306 */ 307 static int memblock_add_twice_check(void) 308 { 309 struct region r = { 310 .base = SZ_16K, 311 .size = SZ_2M 312 }; 313 314 PREFIX_PUSH(); 315 316 reset_memblock_regions(); 317 318 memblock_add(r.base, r.size); 319 memblock_add(r.base, r.size); 320 321 ASSERT_EQ(memblock.memory.cnt, 1); 322 ASSERT_EQ(memblock.memory.total_size, r.size); 323 324 test_pass_pop(); 325 326 return 0; 327 } 328 329 /* 330 * A test that tries to add two memory blocks that don't overlap with one 331 * another and then add a third memory block in the space between the first two: 332 * 333 * | +--------+--------+--------+ | 334 * | | r1 | r3 | r2 | | 335 * +--------+--------+--------+--------+--+ 336 * 337 * Expect to merge the three entries into one region that starts at r1.base 338 * and has size of r1.size + r2.size + r3.size. The region counter and total 339 * size of the available memory are updated. 340 */ 341 static int memblock_add_between_check(void) 342 { 343 struct memblock_region *rgn; 344 phys_addr_t total_size; 345 346 rgn = &memblock.memory.regions[0]; 347 348 struct region r1 = { 349 .base = SZ_1G, 350 .size = SZ_8K 351 }; 352 struct region r2 = { 353 .base = SZ_1G + SZ_16K, 354 .size = SZ_8K 355 }; 356 struct region r3 = { 357 .base = SZ_1G + SZ_8K, 358 .size = SZ_8K 359 }; 360 361 PREFIX_PUSH(); 362 363 total_size = r1.size + r2.size + r3.size; 364 365 reset_memblock_regions(); 366 memblock_add(r1.base, r1.size); 367 memblock_add(r2.base, r2.size); 368 memblock_add(r3.base, r3.size); 369 370 ASSERT_EQ(rgn->base, r1.base); 371 ASSERT_EQ(rgn->size, total_size); 372 373 ASSERT_EQ(memblock.memory.cnt, 1); 374 ASSERT_EQ(memblock.memory.total_size, total_size); 375 376 test_pass_pop(); 377 378 return 0; 379 } 380 381 /* 382 * A simple test that tries to add a memory block r when r extends past 383 * PHYS_ADDR_MAX: 384 * 385 * +--------+ 386 * | r | 387 * +--------+ 388 * | +----+ 389 * | | rgn| 390 * +----------------------------+----+ 391 * 392 * Expect to add a memory block of size PHYS_ADDR_MAX - r.base. Expect the 393 * total size of available memory and the counter to be updated. 394 */ 395 static int memblock_add_near_max_check(void) 396 { 397 struct memblock_region *rgn; 398 phys_addr_t total_size; 399 400 rgn = &memblock.memory.regions[0]; 401 402 struct region r = { 403 .base = PHYS_ADDR_MAX - SZ_1M, 404 .size = SZ_2M 405 }; 406 407 PREFIX_PUSH(); 408 409 total_size = PHYS_ADDR_MAX - r.base; 410 411 reset_memblock_regions(); 412 memblock_add(r.base, r.size); 413 414 ASSERT_EQ(rgn->base, r.base); 415 ASSERT_EQ(rgn->size, total_size); 416 417 ASSERT_EQ(memblock.memory.cnt, 1); 418 ASSERT_EQ(memblock.memory.total_size, total_size); 419 420 test_pass_pop(); 421 422 return 0; 423 } 424 425 static int memblock_add_checks(void) 426 { 427 prefix_reset(); 428 prefix_push(FUNC_ADD); 429 test_print("Running %s tests...\n", FUNC_ADD); 430 431 memblock_add_simple_check(); 432 memblock_add_node_simple_check(); 433 memblock_add_disjoint_check(); 434 memblock_add_overlap_top_check(); 435 memblock_add_overlap_bottom_check(); 436 memblock_add_within_check(); 437 memblock_add_twice_check(); 438 memblock_add_between_check(); 439 memblock_add_near_max_check(); 440 441 prefix_pop(); 442 443 return 0; 444 } 445 446 /* 447 * A simple test that marks a memory block of a specified base address 448 * and size as reserved and to the collection of reserved memory regions 449 * (memblock.reserved). Expect to create a new entry. The region counter 450 * and total memory size are updated. 451 */ 452 static int memblock_reserve_simple_check(void) 453 { 454 struct memblock_region *rgn; 455 456 rgn = &memblock.reserved.regions[0]; 457 458 struct region r = { 459 .base = SZ_2G, 460 .size = SZ_128M 461 }; 462 463 PREFIX_PUSH(); 464 465 reset_memblock_regions(); 466 memblock_reserve(r.base, r.size); 467 468 ASSERT_EQ(rgn->base, r.base); 469 ASSERT_EQ(rgn->size, r.size); 470 471 test_pass_pop(); 472 473 return 0; 474 } 475 476 /* 477 * A test that tries to mark two memory blocks that don't overlap as reserved: 478 * 479 * | +--+ +----------------+ | 480 * | |r1| | r2 | | 481 * +--------+--+------+----------------+--+ 482 * 483 * Expect to add two entries to the collection of reserved memory regions 484 * (memblock.reserved). The total size and region counter for 485 * memblock.reserved are updated. 486 */ 487 static int memblock_reserve_disjoint_check(void) 488 { 489 struct memblock_region *rgn1, *rgn2; 490 491 rgn1 = &memblock.reserved.regions[0]; 492 rgn2 = &memblock.reserved.regions[1]; 493 494 struct region r1 = { 495 .base = SZ_256M, 496 .size = SZ_16M 497 }; 498 struct region r2 = { 499 .base = SZ_512M, 500 .size = SZ_512M 501 }; 502 503 PREFIX_PUSH(); 504 505 reset_memblock_regions(); 506 memblock_reserve(r1.base, r1.size); 507 memblock_reserve(r2.base, r2.size); 508 509 ASSERT_EQ(rgn1->base, r1.base); 510 ASSERT_EQ(rgn1->size, r1.size); 511 512 ASSERT_EQ(rgn2->base, r2.base); 513 ASSERT_EQ(rgn2->size, r2.size); 514 515 ASSERT_EQ(memblock.reserved.cnt, 2); 516 ASSERT_EQ(memblock.reserved.total_size, r1.size + r2.size); 517 518 test_pass_pop(); 519 520 return 0; 521 } 522 523 /* 524 * A test that tries to mark two memory blocks r1 and r2 as reserved, 525 * where r2 overlaps with the beginning of r1 (that is 526 * r1.base < r2.base + r2.size): 527 * 528 * | +--------------+--+--------------+ | 529 * | | r2 | | r1 | | 530 * +--+--------------+--+--------------+--+ 531 * ^ ^ 532 * | | 533 * | r1.base 534 * | 535 * r2.base 536 * 537 * Expect to merge two entries into one region that starts at r2.base and 538 * has size of two regions minus their intersection. The total size of the 539 * reserved memory is updated, and the region counter is not updated. 540 */ 541 static int memblock_reserve_overlap_top_check(void) 542 { 543 struct memblock_region *rgn; 544 phys_addr_t total_size; 545 546 rgn = &memblock.reserved.regions[0]; 547 548 struct region r1 = { 549 .base = SZ_1G, 550 .size = SZ_1G 551 }; 552 struct region r2 = { 553 .base = SZ_128M, 554 .size = SZ_1G 555 }; 556 557 PREFIX_PUSH(); 558 559 total_size = (r1.base - r2.base) + r1.size; 560 561 reset_memblock_regions(); 562 memblock_reserve(r1.base, r1.size); 563 memblock_reserve(r2.base, r2.size); 564 565 ASSERT_EQ(rgn->base, r2.base); 566 ASSERT_EQ(rgn->size, total_size); 567 568 ASSERT_EQ(memblock.reserved.cnt, 1); 569 ASSERT_EQ(memblock.reserved.total_size, total_size); 570 571 test_pass_pop(); 572 573 return 0; 574 } 575 576 /* 577 * A test that tries to mark two memory blocks r1 and r2 as reserved, 578 * where r2 overlaps with the end of r1 (that is 579 * r2.base < r1.base + r1.size): 580 * 581 * | +--------------+--+--------------+ | 582 * | | r1 | | r2 | | 583 * +--+--------------+--+--------------+--+ 584 * ^ ^ 585 * | | 586 * | r2.base 587 * | 588 * r1.base 589 * 590 * Expect to merge two entries into one region that starts at r1.base and 591 * has size of two regions minus their intersection. The total size of the 592 * reserved memory is updated, and the region counter is not updated. 593 */ 594 static int memblock_reserve_overlap_bottom_check(void) 595 { 596 struct memblock_region *rgn; 597 phys_addr_t total_size; 598 599 rgn = &memblock.reserved.regions[0]; 600 601 struct region r1 = { 602 .base = SZ_2K, 603 .size = SZ_128K 604 }; 605 struct region r2 = { 606 .base = SZ_128K, 607 .size = SZ_128K 608 }; 609 610 PREFIX_PUSH(); 611 612 total_size = (r2.base - r1.base) + r2.size; 613 614 reset_memblock_regions(); 615 memblock_reserve(r1.base, r1.size); 616 memblock_reserve(r2.base, r2.size); 617 618 ASSERT_EQ(rgn->base, r1.base); 619 ASSERT_EQ(rgn->size, total_size); 620 621 ASSERT_EQ(memblock.reserved.cnt, 1); 622 ASSERT_EQ(memblock.reserved.total_size, total_size); 623 624 test_pass_pop(); 625 626 return 0; 627 } 628 629 /* 630 * A test that tries to mark two memory blocks r1 and r2 as reserved, 631 * where r2 is within the range of r1 (that is 632 * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)): 633 * 634 * | +-----+--+---------------------------| 635 * | | |r2| r1 | 636 * +-+-----+--+---------------------------+ 637 * ^ ^ 638 * | | 639 * | r2.base 640 * | 641 * r1.base 642 * 643 * Expect to merge two entries into one region that stays the same. The 644 * counter and total size of available memory are not updated. 645 */ 646 static int memblock_reserve_within_check(void) 647 { 648 struct memblock_region *rgn; 649 650 rgn = &memblock.reserved.regions[0]; 651 652 struct region r1 = { 653 .base = SZ_1M, 654 .size = SZ_8M 655 }; 656 struct region r2 = { 657 .base = SZ_2M, 658 .size = SZ_64K 659 }; 660 661 PREFIX_PUSH(); 662 663 reset_memblock_regions(); 664 memblock_reserve(r1.base, r1.size); 665 memblock_reserve(r2.base, r2.size); 666 667 ASSERT_EQ(rgn->base, r1.base); 668 ASSERT_EQ(rgn->size, r1.size); 669 670 ASSERT_EQ(memblock.reserved.cnt, 1); 671 ASSERT_EQ(memblock.reserved.total_size, r1.size); 672 673 test_pass_pop(); 674 675 return 0; 676 } 677 678 /* 679 * A simple test that tries to reserve the same memory block twice. 680 * Expect the region counter and total size of reserved memory to not 681 * be updated. 682 */ 683 static int memblock_reserve_twice_check(void) 684 { 685 struct region r = { 686 .base = SZ_16K, 687 .size = SZ_2M 688 }; 689 690 PREFIX_PUSH(); 691 692 reset_memblock_regions(); 693 694 memblock_reserve(r.base, r.size); 695 memblock_reserve(r.base, r.size); 696 697 ASSERT_EQ(memblock.reserved.cnt, 1); 698 ASSERT_EQ(memblock.reserved.total_size, r.size); 699 700 test_pass_pop(); 701 702 return 0; 703 } 704 705 /* 706 * A test that tries to mark two memory blocks that don't overlap as reserved 707 * and then reserve a third memory block in the space between the first two: 708 * 709 * | +--------+--------+--------+ | 710 * | | r1 | r3 | r2 | | 711 * +--------+--------+--------+--------+--+ 712 * 713 * Expect to merge the three entries into one reserved region that starts at 714 * r1.base and has size of r1.size + r2.size + r3.size. The region counter and 715 * total for memblock.reserved are updated. 716 */ 717 static int memblock_reserve_between_check(void) 718 { 719 struct memblock_region *rgn; 720 phys_addr_t total_size; 721 722 rgn = &memblock.reserved.regions[0]; 723 724 struct region r1 = { 725 .base = SZ_1G, 726 .size = SZ_8K 727 }; 728 struct region r2 = { 729 .base = SZ_1G + SZ_16K, 730 .size = SZ_8K 731 }; 732 struct region r3 = { 733 .base = SZ_1G + SZ_8K, 734 .size = SZ_8K 735 }; 736 737 PREFIX_PUSH(); 738 739 total_size = r1.size + r2.size + r3.size; 740 741 reset_memblock_regions(); 742 memblock_reserve(r1.base, r1.size); 743 memblock_reserve(r2.base, r2.size); 744 memblock_reserve(r3.base, r3.size); 745 746 ASSERT_EQ(rgn->base, r1.base); 747 ASSERT_EQ(rgn->size, total_size); 748 749 ASSERT_EQ(memblock.reserved.cnt, 1); 750 ASSERT_EQ(memblock.reserved.total_size, total_size); 751 752 test_pass_pop(); 753 754 return 0; 755 } 756 757 /* 758 * A simple test that tries to reserve a memory block r when r extends past 759 * PHYS_ADDR_MAX: 760 * 761 * +--------+ 762 * | r | 763 * +--------+ 764 * | +----+ 765 * | | rgn| 766 * +----------------------------+----+ 767 * 768 * Expect to reserve a memory block of size PHYS_ADDR_MAX - r.base. Expect the 769 * total size of reserved memory and the counter to be updated. 770 */ 771 static int memblock_reserve_near_max_check(void) 772 { 773 struct memblock_region *rgn; 774 phys_addr_t total_size; 775 776 rgn = &memblock.reserved.regions[0]; 777 778 struct region r = { 779 .base = PHYS_ADDR_MAX - SZ_1M, 780 .size = SZ_2M 781 }; 782 783 PREFIX_PUSH(); 784 785 total_size = PHYS_ADDR_MAX - r.base; 786 787 reset_memblock_regions(); 788 memblock_reserve(r.base, r.size); 789 790 ASSERT_EQ(rgn->base, r.base); 791 ASSERT_EQ(rgn->size, total_size); 792 793 ASSERT_EQ(memblock.reserved.cnt, 1); 794 ASSERT_EQ(memblock.reserved.total_size, total_size); 795 796 test_pass_pop(); 797 798 return 0; 799 } 800 801 static int memblock_reserve_checks(void) 802 { 803 prefix_reset(); 804 prefix_push(FUNC_RESERVE); 805 test_print("Running %s tests...\n", FUNC_RESERVE); 806 807 memblock_reserve_simple_check(); 808 memblock_reserve_disjoint_check(); 809 memblock_reserve_overlap_top_check(); 810 memblock_reserve_overlap_bottom_check(); 811 memblock_reserve_within_check(); 812 memblock_reserve_twice_check(); 813 memblock_reserve_between_check(); 814 memblock_reserve_near_max_check(); 815 816 prefix_pop(); 817 818 return 0; 819 } 820 821 /* 822 * A simple test that tries to remove a region r1 from the array of 823 * available memory regions. By "removing" a region we mean overwriting it 824 * with the next region r2 in memblock.memory: 825 * 826 * | ...... +----------------+ | 827 * | : r1 : | r2 | | 828 * +--+----+----------+----------------+--+ 829 * ^ 830 * | 831 * rgn.base 832 * 833 * Expect to add two memory blocks r1 and r2 and then remove r1 so that 834 * r2 is the first available region. The region counter and total size 835 * are updated. 836 */ 837 static int memblock_remove_simple_check(void) 838 { 839 struct memblock_region *rgn; 840 841 rgn = &memblock.memory.regions[0]; 842 843 struct region r1 = { 844 .base = SZ_2K, 845 .size = SZ_4K 846 }; 847 struct region r2 = { 848 .base = SZ_128K, 849 .size = SZ_4M 850 }; 851 852 PREFIX_PUSH(); 853 854 reset_memblock_regions(); 855 memblock_add(r1.base, r1.size); 856 memblock_add(r2.base, r2.size); 857 memblock_remove(r1.base, r1.size); 858 859 ASSERT_EQ(rgn->base, r2.base); 860 ASSERT_EQ(rgn->size, r2.size); 861 862 ASSERT_EQ(memblock.memory.cnt, 1); 863 ASSERT_EQ(memblock.memory.total_size, r2.size); 864 865 test_pass_pop(); 866 867 return 0; 868 } 869 870 /* 871 * A test that tries to remove a region r2 that was not registered as 872 * available memory (i.e. has no corresponding entry in memblock.memory): 873 * 874 * +----------------+ 875 * | r2 | 876 * +----------------+ 877 * | +----+ | 878 * | | r1 | | 879 * +--+----+------------------------------+ 880 * ^ 881 * | 882 * rgn.base 883 * 884 * Expect the array, regions counter and total size to not be modified. 885 */ 886 static int memblock_remove_absent_check(void) 887 { 888 struct memblock_region *rgn; 889 890 rgn = &memblock.memory.regions[0]; 891 892 struct region r1 = { 893 .base = SZ_512K, 894 .size = SZ_4M 895 }; 896 struct region r2 = { 897 .base = SZ_64M, 898 .size = SZ_1G 899 }; 900 901 PREFIX_PUSH(); 902 903 reset_memblock_regions(); 904 memblock_add(r1.base, r1.size); 905 memblock_remove(r2.base, r2.size); 906 907 ASSERT_EQ(rgn->base, r1.base); 908 ASSERT_EQ(rgn->size, r1.size); 909 910 ASSERT_EQ(memblock.memory.cnt, 1); 911 ASSERT_EQ(memblock.memory.total_size, r1.size); 912 913 test_pass_pop(); 914 915 return 0; 916 } 917 918 /* 919 * A test that tries to remove a region r2 that overlaps with the 920 * beginning of the already existing entry r1 921 * (that is r1.base < r2.base + r2.size): 922 * 923 * +-----------------+ 924 * | r2 | 925 * +-----------------+ 926 * | .........+--------+ | 927 * | : r1 | rgn | | 928 * +-----------------+--------+--------+--+ 929 * ^ ^ 930 * | | 931 * | rgn.base 932 * r1.base 933 * 934 * Expect that only the intersection of both regions is removed from the 935 * available memory pool. The regions counter and total size are updated. 936 */ 937 static int memblock_remove_overlap_top_check(void) 938 { 939 struct memblock_region *rgn; 940 phys_addr_t r1_end, r2_end, total_size; 941 942 rgn = &memblock.memory.regions[0]; 943 944 struct region r1 = { 945 .base = SZ_32M, 946 .size = SZ_32M 947 }; 948 struct region r2 = { 949 .base = SZ_16M, 950 .size = SZ_32M 951 }; 952 953 PREFIX_PUSH(); 954 955 r1_end = r1.base + r1.size; 956 r2_end = r2.base + r2.size; 957 total_size = r1_end - r2_end; 958 959 reset_memblock_regions(); 960 memblock_add(r1.base, r1.size); 961 memblock_remove(r2.base, r2.size); 962 963 ASSERT_EQ(rgn->base, r1.base + r2.base); 964 ASSERT_EQ(rgn->size, total_size); 965 966 ASSERT_EQ(memblock.memory.cnt, 1); 967 ASSERT_EQ(memblock.memory.total_size, total_size); 968 969 test_pass_pop(); 970 971 return 0; 972 } 973 974 /* 975 * A test that tries to remove a region r2 that overlaps with the end of 976 * the already existing region r1 (that is r2.base < r1.base + r1.size): 977 * 978 * +--------------------------------+ 979 * | r2 | 980 * +--------------------------------+ 981 * | +---+..... | 982 * | |rgn| r1 : | 983 * +-+---+----+---------------------------+ 984 * ^ 985 * | 986 * r1.base 987 * 988 * Expect that only the intersection of both regions is removed from the 989 * available memory pool. The regions counter and total size are updated. 990 */ 991 static int memblock_remove_overlap_bottom_check(void) 992 { 993 struct memblock_region *rgn; 994 phys_addr_t total_size; 995 996 rgn = &memblock.memory.regions[0]; 997 998 struct region r1 = { 999 .base = SZ_2M, 1000 .size = SZ_64M 1001 }; 1002 struct region r2 = { 1003 .base = SZ_32M, 1004 .size = SZ_256M 1005 }; 1006 1007 PREFIX_PUSH(); 1008 1009 total_size = r2.base - r1.base; 1010 1011 reset_memblock_regions(); 1012 memblock_add(r1.base, r1.size); 1013 memblock_remove(r2.base, r2.size); 1014 1015 ASSERT_EQ(rgn->base, r1.base); 1016 ASSERT_EQ(rgn->size, total_size); 1017 1018 ASSERT_EQ(memblock.memory.cnt, 1); 1019 ASSERT_EQ(memblock.memory.total_size, total_size); 1020 1021 test_pass_pop(); 1022 1023 return 0; 1024 } 1025 1026 /* 1027 * A test that tries to remove a region r2 that is within the range of 1028 * the already existing entry r1 (that is 1029 * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)): 1030 * 1031 * +----+ 1032 * | r2 | 1033 * +----+ 1034 * | +-------------+....+---------------+ | 1035 * | | rgn1 | r1 | rgn2 | | 1036 * +-+-------------+----+---------------+-+ 1037 * ^ 1038 * | 1039 * r1.base 1040 * 1041 * Expect that the region is split into two - one that ends at r2.base and 1042 * another that starts at r2.base + r2.size, with appropriate sizes. The 1043 * region counter and total size are updated. 1044 */ 1045 static int memblock_remove_within_check(void) 1046 { 1047 struct memblock_region *rgn1, *rgn2; 1048 phys_addr_t r1_size, r2_size, total_size; 1049 1050 rgn1 = &memblock.memory.regions[0]; 1051 rgn2 = &memblock.memory.regions[1]; 1052 1053 struct region r1 = { 1054 .base = SZ_1M, 1055 .size = SZ_32M 1056 }; 1057 struct region r2 = { 1058 .base = SZ_16M, 1059 .size = SZ_1M 1060 }; 1061 1062 PREFIX_PUSH(); 1063 1064 r1_size = r2.base - r1.base; 1065 r2_size = (r1.base + r1.size) - (r2.base + r2.size); 1066 total_size = r1_size + r2_size; 1067 1068 reset_memblock_regions(); 1069 memblock_add(r1.base, r1.size); 1070 memblock_remove(r2.base, r2.size); 1071 1072 ASSERT_EQ(rgn1->base, r1.base); 1073 ASSERT_EQ(rgn1->size, r1_size); 1074 1075 ASSERT_EQ(rgn2->base, r2.base + r2.size); 1076 ASSERT_EQ(rgn2->size, r2_size); 1077 1078 ASSERT_EQ(memblock.memory.cnt, 2); 1079 ASSERT_EQ(memblock.memory.total_size, total_size); 1080 1081 test_pass_pop(); 1082 1083 return 0; 1084 } 1085 1086 /* 1087 * A simple test that tries to remove a region r1 from the array of 1088 * available memory regions when r1 is the only available region. 1089 * Expect to add a memory block r1 and then remove r1 so that a dummy 1090 * region is added. The region counter stays the same, and the total size 1091 * is updated. 1092 */ 1093 static int memblock_remove_only_region_check(void) 1094 { 1095 struct memblock_region *rgn; 1096 1097 rgn = &memblock.memory.regions[0]; 1098 1099 struct region r1 = { 1100 .base = SZ_2K, 1101 .size = SZ_4K 1102 }; 1103 1104 PREFIX_PUSH(); 1105 1106 reset_memblock_regions(); 1107 memblock_add(r1.base, r1.size); 1108 memblock_remove(r1.base, r1.size); 1109 1110 ASSERT_EQ(rgn->base, 0); 1111 ASSERT_EQ(rgn->size, 0); 1112 1113 ASSERT_EQ(memblock.memory.cnt, 1); 1114 ASSERT_EQ(memblock.memory.total_size, 0); 1115 1116 test_pass_pop(); 1117 1118 return 0; 1119 } 1120 1121 /* 1122 * A simple test that tries remove a region r2 from the array of available 1123 * memory regions when r2 extends past PHYS_ADDR_MAX: 1124 * 1125 * +--------+ 1126 * | r2 | 1127 * +--------+ 1128 * | +---+....+ 1129 * | |rgn| | 1130 * +------------------------+---+----+ 1131 * 1132 * Expect that only the portion between PHYS_ADDR_MAX and r2.base is removed. 1133 * Expect the total size of available memory to be updated and the counter to 1134 * not be updated. 1135 */ 1136 static int memblock_remove_near_max_check(void) 1137 { 1138 struct memblock_region *rgn; 1139 phys_addr_t total_size; 1140 1141 rgn = &memblock.memory.regions[0]; 1142 1143 struct region r1 = { 1144 .base = PHYS_ADDR_MAX - SZ_2M, 1145 .size = SZ_2M 1146 }; 1147 1148 struct region r2 = { 1149 .base = PHYS_ADDR_MAX - SZ_1M, 1150 .size = SZ_2M 1151 }; 1152 1153 PREFIX_PUSH(); 1154 1155 total_size = r1.size - (PHYS_ADDR_MAX - r2.base); 1156 1157 reset_memblock_regions(); 1158 memblock_add(r1.base, r1.size); 1159 memblock_remove(r2.base, r2.size); 1160 1161 ASSERT_EQ(rgn->base, r1.base); 1162 ASSERT_EQ(rgn->size, total_size); 1163 1164 ASSERT_EQ(memblock.memory.cnt, 1); 1165 ASSERT_EQ(memblock.memory.total_size, total_size); 1166 1167 test_pass_pop(); 1168 1169 return 0; 1170 } 1171 1172 /* 1173 * A test that tries to remove a region r3 that overlaps with two existing 1174 * regions r1 and r2: 1175 * 1176 * +----------------+ 1177 * | r3 | 1178 * +----------------+ 1179 * | +----+..... ........+--------+ 1180 * | | |r1 : : |r2 | | 1181 * +----+----+----+---+-------+--------+-----+ 1182 * 1183 * Expect that only the intersections of r1 with r3 and r2 with r3 are removed 1184 * from the available memory pool. Expect the total size of available memory to 1185 * be updated and the counter to not be updated. 1186 */ 1187 static int memblock_remove_overlap_two_check(void) 1188 { 1189 struct memblock_region *rgn1, *rgn2; 1190 phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size; 1191 1192 rgn1 = &memblock.memory.regions[0]; 1193 rgn2 = &memblock.memory.regions[1]; 1194 1195 struct region r1 = { 1196 .base = SZ_16M, 1197 .size = SZ_32M 1198 }; 1199 struct region r2 = { 1200 .base = SZ_64M, 1201 .size = SZ_64M 1202 }; 1203 struct region r3 = { 1204 .base = SZ_32M, 1205 .size = SZ_64M 1206 }; 1207 1208 PREFIX_PUSH(); 1209 1210 r2_end = r2.base + r2.size; 1211 r3_end = r3.base + r3.size; 1212 new_r1_size = r3.base - r1.base; 1213 new_r2_size = r2_end - r3_end; 1214 total_size = new_r1_size + new_r2_size; 1215 1216 reset_memblock_regions(); 1217 memblock_add(r1.base, r1.size); 1218 memblock_add(r2.base, r2.size); 1219 memblock_remove(r3.base, r3.size); 1220 1221 ASSERT_EQ(rgn1->base, r1.base); 1222 ASSERT_EQ(rgn1->size, new_r1_size); 1223 1224 ASSERT_EQ(rgn2->base, r3_end); 1225 ASSERT_EQ(rgn2->size, new_r2_size); 1226 1227 ASSERT_EQ(memblock.memory.cnt, 2); 1228 ASSERT_EQ(memblock.memory.total_size, total_size); 1229 1230 test_pass_pop(); 1231 1232 return 0; 1233 } 1234 1235 static int memblock_remove_checks(void) 1236 { 1237 prefix_reset(); 1238 prefix_push(FUNC_REMOVE); 1239 test_print("Running %s tests...\n", FUNC_REMOVE); 1240 1241 memblock_remove_simple_check(); 1242 memblock_remove_absent_check(); 1243 memblock_remove_overlap_top_check(); 1244 memblock_remove_overlap_bottom_check(); 1245 memblock_remove_within_check(); 1246 memblock_remove_only_region_check(); 1247 memblock_remove_near_max_check(); 1248 memblock_remove_overlap_two_check(); 1249 1250 prefix_pop(); 1251 1252 return 0; 1253 } 1254 1255 /* 1256 * A simple test that tries to free a memory block r1 that was marked 1257 * earlier as reserved. By "freeing" a region we mean overwriting it with 1258 * the next entry r2 in memblock.reserved: 1259 * 1260 * | ...... +----+ | 1261 * | : r1 : | r2 | | 1262 * +--------------+----+-----------+----+-+ 1263 * ^ 1264 * | 1265 * rgn.base 1266 * 1267 * Expect to reserve two memory regions and then erase r1 region with the 1268 * value of r2. The region counter and total size are updated. 1269 */ 1270 static int memblock_free_simple_check(void) 1271 { 1272 struct memblock_region *rgn; 1273 1274 rgn = &memblock.reserved.regions[0]; 1275 1276 struct region r1 = { 1277 .base = SZ_4M, 1278 .size = SZ_1M 1279 }; 1280 struct region r2 = { 1281 .base = SZ_8M, 1282 .size = SZ_1M 1283 }; 1284 1285 PREFIX_PUSH(); 1286 1287 reset_memblock_regions(); 1288 memblock_reserve(r1.base, r1.size); 1289 memblock_reserve(r2.base, r2.size); 1290 memblock_free((void *)r1.base, r1.size); 1291 1292 ASSERT_EQ(rgn->base, r2.base); 1293 ASSERT_EQ(rgn->size, r2.size); 1294 1295 ASSERT_EQ(memblock.reserved.cnt, 1); 1296 ASSERT_EQ(memblock.reserved.total_size, r2.size); 1297 1298 test_pass_pop(); 1299 1300 return 0; 1301 } 1302 1303 /* 1304 * A test that tries to free a region r2 that was not marked as reserved 1305 * (i.e. has no corresponding entry in memblock.reserved): 1306 * 1307 * +----------------+ 1308 * | r2 | 1309 * +----------------+ 1310 * | +----+ | 1311 * | | r1 | | 1312 * +--+----+------------------------------+ 1313 * ^ 1314 * | 1315 * rgn.base 1316 * 1317 * The array, regions counter and total size are not modified. 1318 */ 1319 static int memblock_free_absent_check(void) 1320 { 1321 struct memblock_region *rgn; 1322 1323 rgn = &memblock.reserved.regions[0]; 1324 1325 struct region r1 = { 1326 .base = SZ_2M, 1327 .size = SZ_8K 1328 }; 1329 struct region r2 = { 1330 .base = SZ_16M, 1331 .size = SZ_128M 1332 }; 1333 1334 PREFIX_PUSH(); 1335 1336 reset_memblock_regions(); 1337 memblock_reserve(r1.base, r1.size); 1338 memblock_free((void *)r2.base, r2.size); 1339 1340 ASSERT_EQ(rgn->base, r1.base); 1341 ASSERT_EQ(rgn->size, r1.size); 1342 1343 ASSERT_EQ(memblock.reserved.cnt, 1); 1344 ASSERT_EQ(memblock.reserved.total_size, r1.size); 1345 1346 test_pass_pop(); 1347 1348 return 0; 1349 } 1350 1351 /* 1352 * A test that tries to free a region r2 that overlaps with the beginning 1353 * of the already existing entry r1 (that is r1.base < r2.base + r2.size): 1354 * 1355 * +----+ 1356 * | r2 | 1357 * +----+ 1358 * | ...+--------------+ | 1359 * | : | r1 | | 1360 * +----+--+--------------+---------------+ 1361 * ^ ^ 1362 * | | 1363 * | rgn.base 1364 * | 1365 * r1.base 1366 * 1367 * Expect that only the intersection of both regions is freed. The 1368 * regions counter and total size are updated. 1369 */ 1370 static int memblock_free_overlap_top_check(void) 1371 { 1372 struct memblock_region *rgn; 1373 phys_addr_t total_size; 1374 1375 rgn = &memblock.reserved.regions[0]; 1376 1377 struct region r1 = { 1378 .base = SZ_8M, 1379 .size = SZ_32M 1380 }; 1381 struct region r2 = { 1382 .base = SZ_1M, 1383 .size = SZ_8M 1384 }; 1385 1386 PREFIX_PUSH(); 1387 1388 total_size = (r1.size + r1.base) - (r2.base + r2.size); 1389 1390 reset_memblock_regions(); 1391 memblock_reserve(r1.base, r1.size); 1392 memblock_free((void *)r2.base, r2.size); 1393 1394 ASSERT_EQ(rgn->base, r2.base + r2.size); 1395 ASSERT_EQ(rgn->size, total_size); 1396 1397 ASSERT_EQ(memblock.reserved.cnt, 1); 1398 ASSERT_EQ(memblock.reserved.total_size, total_size); 1399 1400 test_pass_pop(); 1401 1402 return 0; 1403 } 1404 1405 /* 1406 * A test that tries to free a region r2 that overlaps with the end of 1407 * the already existing entry r1 (that is r2.base < r1.base + r1.size): 1408 * 1409 * +----------------+ 1410 * | r2 | 1411 * +----------------+ 1412 * | +-----------+..... | 1413 * | | r1 | : | 1414 * +----+-----------+----+----------------+ 1415 * 1416 * Expect that only the intersection of both regions is freed. The 1417 * regions counter and total size are updated. 1418 */ 1419 static int memblock_free_overlap_bottom_check(void) 1420 { 1421 struct memblock_region *rgn; 1422 phys_addr_t total_size; 1423 1424 rgn = &memblock.reserved.regions[0]; 1425 1426 struct region r1 = { 1427 .base = SZ_8M, 1428 .size = SZ_32M 1429 }; 1430 struct region r2 = { 1431 .base = SZ_32M, 1432 .size = SZ_32M 1433 }; 1434 1435 PREFIX_PUSH(); 1436 1437 total_size = r2.base - r1.base; 1438 1439 reset_memblock_regions(); 1440 memblock_reserve(r1.base, r1.size); 1441 memblock_free((void *)r2.base, r2.size); 1442 1443 ASSERT_EQ(rgn->base, r1.base); 1444 ASSERT_EQ(rgn->size, total_size); 1445 1446 ASSERT_EQ(memblock.reserved.cnt, 1); 1447 ASSERT_EQ(memblock.reserved.total_size, total_size); 1448 1449 test_pass_pop(); 1450 1451 return 0; 1452 } 1453 1454 /* 1455 * A test that tries to free a region r2 that is within the range of the 1456 * already existing entry r1 (that is 1457 * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)): 1458 * 1459 * +----+ 1460 * | r2 | 1461 * +----+ 1462 * | +------------+....+---------------+ 1463 * | | rgn1 | r1 | rgn2 | 1464 * +----+------------+----+---------------+ 1465 * ^ 1466 * | 1467 * r1.base 1468 * 1469 * Expect that the region is split into two - one that ends at r2.base and 1470 * another that starts at r2.base + r2.size, with appropriate sizes. The 1471 * region counter and total size fields are updated. 1472 */ 1473 static int memblock_free_within_check(void) 1474 { 1475 struct memblock_region *rgn1, *rgn2; 1476 phys_addr_t r1_size, r2_size, total_size; 1477 1478 rgn1 = &memblock.reserved.regions[0]; 1479 rgn2 = &memblock.reserved.regions[1]; 1480 1481 struct region r1 = { 1482 .base = SZ_1M, 1483 .size = SZ_8M 1484 }; 1485 struct region r2 = { 1486 .base = SZ_4M, 1487 .size = SZ_1M 1488 }; 1489 1490 PREFIX_PUSH(); 1491 1492 r1_size = r2.base - r1.base; 1493 r2_size = (r1.base + r1.size) - (r2.base + r2.size); 1494 total_size = r1_size + r2_size; 1495 1496 reset_memblock_regions(); 1497 memblock_reserve(r1.base, r1.size); 1498 memblock_free((void *)r2.base, r2.size); 1499 1500 ASSERT_EQ(rgn1->base, r1.base); 1501 ASSERT_EQ(rgn1->size, r1_size); 1502 1503 ASSERT_EQ(rgn2->base, r2.base + r2.size); 1504 ASSERT_EQ(rgn2->size, r2_size); 1505 1506 ASSERT_EQ(memblock.reserved.cnt, 2); 1507 ASSERT_EQ(memblock.reserved.total_size, total_size); 1508 1509 test_pass_pop(); 1510 1511 return 0; 1512 } 1513 1514 /* 1515 * A simple test that tries to free a memory block r1 that was marked 1516 * earlier as reserved when r1 is the only available region. 1517 * Expect to reserve a memory block r1 and then free r1 so that r1 is 1518 * overwritten with a dummy region. The region counter stays the same, 1519 * and the total size is updated. 1520 */ 1521 static int memblock_free_only_region_check(void) 1522 { 1523 struct memblock_region *rgn; 1524 1525 rgn = &memblock.reserved.regions[0]; 1526 1527 struct region r1 = { 1528 .base = SZ_2K, 1529 .size = SZ_4K 1530 }; 1531 1532 PREFIX_PUSH(); 1533 1534 reset_memblock_regions(); 1535 memblock_reserve(r1.base, r1.size); 1536 memblock_free((void *)r1.base, r1.size); 1537 1538 ASSERT_EQ(rgn->base, 0); 1539 ASSERT_EQ(rgn->size, 0); 1540 1541 ASSERT_EQ(memblock.reserved.cnt, 1); 1542 ASSERT_EQ(memblock.reserved.total_size, 0); 1543 1544 test_pass_pop(); 1545 1546 return 0; 1547 } 1548 1549 /* 1550 * A simple test that tries free a region r2 when r2 extends past PHYS_ADDR_MAX: 1551 * 1552 * +--------+ 1553 * | r2 | 1554 * +--------+ 1555 * | +---+....+ 1556 * | |rgn| | 1557 * +------------------------+---+----+ 1558 * 1559 * Expect that only the portion between PHYS_ADDR_MAX and r2.base is freed. 1560 * Expect the total size of reserved memory to be updated and the counter to 1561 * not be updated. 1562 */ 1563 static int memblock_free_near_max_check(void) 1564 { 1565 struct memblock_region *rgn; 1566 phys_addr_t total_size; 1567 1568 rgn = &memblock.reserved.regions[0]; 1569 1570 struct region r1 = { 1571 .base = PHYS_ADDR_MAX - SZ_2M, 1572 .size = SZ_2M 1573 }; 1574 1575 struct region r2 = { 1576 .base = PHYS_ADDR_MAX - SZ_1M, 1577 .size = SZ_2M 1578 }; 1579 1580 PREFIX_PUSH(); 1581 1582 total_size = r1.size - (PHYS_ADDR_MAX - r2.base); 1583 1584 reset_memblock_regions(); 1585 memblock_reserve(r1.base, r1.size); 1586 memblock_free((void *)r2.base, r2.size); 1587 1588 ASSERT_EQ(rgn->base, r1.base); 1589 ASSERT_EQ(rgn->size, total_size); 1590 1591 ASSERT_EQ(memblock.reserved.cnt, 1); 1592 ASSERT_EQ(memblock.reserved.total_size, total_size); 1593 1594 test_pass_pop(); 1595 1596 return 0; 1597 } 1598 1599 /* 1600 * A test that tries to free a reserved region r3 that overlaps with two 1601 * existing reserved regions r1 and r2: 1602 * 1603 * +----------------+ 1604 * | r3 | 1605 * +----------------+ 1606 * | +----+..... ........+--------+ 1607 * | | |r1 : : |r2 | | 1608 * +----+----+----+---+-------+--------+-----+ 1609 * 1610 * Expect that only the intersections of r1 with r3 and r2 with r3 are freed 1611 * from the collection of reserved memory. Expect the total size of reserved 1612 * memory to be updated and the counter to not be updated. 1613 */ 1614 static int memblock_free_overlap_two_check(void) 1615 { 1616 struct memblock_region *rgn1, *rgn2; 1617 phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size; 1618 1619 rgn1 = &memblock.reserved.regions[0]; 1620 rgn2 = &memblock.reserved.regions[1]; 1621 1622 struct region r1 = { 1623 .base = SZ_16M, 1624 .size = SZ_32M 1625 }; 1626 struct region r2 = { 1627 .base = SZ_64M, 1628 .size = SZ_64M 1629 }; 1630 struct region r3 = { 1631 .base = SZ_32M, 1632 .size = SZ_64M 1633 }; 1634 1635 PREFIX_PUSH(); 1636 1637 r2_end = r2.base + r2.size; 1638 r3_end = r3.base + r3.size; 1639 new_r1_size = r3.base - r1.base; 1640 new_r2_size = r2_end - r3_end; 1641 total_size = new_r1_size + new_r2_size; 1642 1643 reset_memblock_regions(); 1644 memblock_reserve(r1.base, r1.size); 1645 memblock_reserve(r2.base, r2.size); 1646 memblock_free((void *)r3.base, r3.size); 1647 1648 ASSERT_EQ(rgn1->base, r1.base); 1649 ASSERT_EQ(rgn1->size, new_r1_size); 1650 1651 ASSERT_EQ(rgn2->base, r3_end); 1652 ASSERT_EQ(rgn2->size, new_r2_size); 1653 1654 ASSERT_EQ(memblock.reserved.cnt, 2); 1655 ASSERT_EQ(memblock.reserved.total_size, total_size); 1656 1657 test_pass_pop(); 1658 1659 return 0; 1660 } 1661 1662 static int memblock_free_checks(void) 1663 { 1664 prefix_reset(); 1665 prefix_push(FUNC_FREE); 1666 test_print("Running %s tests...\n", FUNC_FREE); 1667 1668 memblock_free_simple_check(); 1669 memblock_free_absent_check(); 1670 memblock_free_overlap_top_check(); 1671 memblock_free_overlap_bottom_check(); 1672 memblock_free_within_check(); 1673 memblock_free_only_region_check(); 1674 memblock_free_near_max_check(); 1675 memblock_free_overlap_two_check(); 1676 1677 prefix_pop(); 1678 1679 return 0; 1680 } 1681 1682 static int memblock_set_bottom_up_check(void) 1683 { 1684 prefix_push("memblock_set_bottom_up"); 1685 1686 memblock_set_bottom_up(false); 1687 ASSERT_EQ(memblock.bottom_up, false); 1688 memblock_set_bottom_up(true); 1689 ASSERT_EQ(memblock.bottom_up, true); 1690 1691 reset_memblock_attributes(); 1692 test_pass_pop(); 1693 1694 return 0; 1695 } 1696 1697 static int memblock_bottom_up_check(void) 1698 { 1699 prefix_push("memblock_bottom_up"); 1700 1701 memblock_set_bottom_up(false); 1702 ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up); 1703 ASSERT_EQ(memblock_bottom_up(), false); 1704 memblock_set_bottom_up(true); 1705 ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up); 1706 ASSERT_EQ(memblock_bottom_up(), true); 1707 1708 reset_memblock_attributes(); 1709 test_pass_pop(); 1710 1711 return 0; 1712 } 1713 1714 static int memblock_bottom_up_checks(void) 1715 { 1716 test_print("Running memblock_*bottom_up tests...\n"); 1717 1718 prefix_reset(); 1719 memblock_set_bottom_up_check(); 1720 prefix_reset(); 1721 memblock_bottom_up_check(); 1722 1723 return 0; 1724 } 1725 1726 int memblock_basic_checks(void) 1727 { 1728 memblock_initialization_check(); 1729 memblock_add_checks(); 1730 memblock_reserve_checks(); 1731 memblock_remove_checks(); 1732 memblock_free_checks(); 1733 memblock_bottom_up_checks(); 1734 1735 return 0; 1736 } 1737