1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 * Copyright © 2022 Maíra Canal <mairacanal@riseup.net> 5 */ 6 7 #include <kunit/test.h> 8 9 #include <linux/prime_numbers.h> 10 #include <linux/sched/signal.h> 11 #include <linux/sizes.h> 12 13 #include <drm/drm_buddy.h> 14 15 #include "../lib/drm_random.h" 16 17 static unsigned int random_seed; 18 19 static inline u64 get_size(int order, u64 chunk_size) 20 { 21 return (1 << order) * chunk_size; 22 } 23 24 static void drm_test_buddy_alloc_range_bias(struct kunit *test) 25 { 26 u32 mm_size, ps, bias_size, bias_start, bias_end, bias_rem; 27 DRM_RND_STATE(prng, random_seed); 28 unsigned int i, count, *order; 29 struct drm_buddy mm; 30 LIST_HEAD(allocated); 31 32 bias_size = SZ_1M; 33 ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size); 34 ps = max(SZ_4K, ps); 35 mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */ 36 37 kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps); 38 39 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), 40 "buddy_init failed\n"); 41 42 count = mm_size / bias_size; 43 order = drm_random_order(count, &prng); 44 KUNIT_EXPECT_TRUE(test, order); 45 46 /* 47 * Idea is to split the address space into uniform bias ranges, and then 48 * in some random order allocate within each bias, using various 49 * patterns within. This should detect if allocations leak out from a 50 * given bias, for example. 51 */ 52 53 for (i = 0; i < count; i++) { 54 LIST_HEAD(tmp); 55 u32 size; 56 57 bias_start = order[i] * bias_size; 58 bias_end = bias_start + bias_size; 59 bias_rem = bias_size; 60 61 /* internal round_up too big */ 62 KUNIT_ASSERT_TRUE_MSG(test, 63 drm_buddy_alloc_blocks(&mm, bias_start, 64 bias_end, bias_size + ps, bias_size, 65 &allocated, 66 DRM_BUDDY_RANGE_ALLOCATION), 67 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", 68 bias_start, bias_end, bias_size, bias_size); 69 70 /* size too big */ 71 KUNIT_ASSERT_TRUE_MSG(test, 72 drm_buddy_alloc_blocks(&mm, bias_start, 73 bias_end, bias_size + ps, ps, 74 &allocated, 75 DRM_BUDDY_RANGE_ALLOCATION), 76 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", 77 bias_start, bias_end, bias_size + ps, ps); 78 79 /* bias range too small for size */ 80 KUNIT_ASSERT_TRUE_MSG(test, 81 drm_buddy_alloc_blocks(&mm, bias_start + ps, 82 bias_end, bias_size, ps, 83 &allocated, 84 DRM_BUDDY_RANGE_ALLOCATION), 85 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", 86 bias_start + ps, bias_end, bias_size, ps); 87 88 /* bias misaligned */ 89 KUNIT_ASSERT_TRUE_MSG(test, 90 drm_buddy_alloc_blocks(&mm, bias_start + ps, 91 bias_end - ps, 92 bias_size >> 1, bias_size >> 1, 93 &allocated, 94 DRM_BUDDY_RANGE_ALLOCATION), 95 "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n", 96 bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1); 97 98 /* single big page */ 99 KUNIT_ASSERT_FALSE_MSG(test, 100 drm_buddy_alloc_blocks(&mm, bias_start, 101 bias_end, bias_size, bias_size, 102 &tmp, 103 DRM_BUDDY_RANGE_ALLOCATION), 104 "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n", 105 bias_start, bias_end, bias_size, bias_size); 106 drm_buddy_free_list(&mm, &tmp, 0); 107 108 /* single page with internal round_up */ 109 KUNIT_ASSERT_FALSE_MSG(test, 110 drm_buddy_alloc_blocks(&mm, bias_start, 111 bias_end, ps, bias_size, 112 &tmp, 113 DRM_BUDDY_RANGE_ALLOCATION), 114 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", 115 bias_start, bias_end, ps, bias_size); 116 drm_buddy_free_list(&mm, &tmp, 0); 117 118 /* random size within */ 119 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); 120 if (size) 121 KUNIT_ASSERT_FALSE_MSG(test, 122 drm_buddy_alloc_blocks(&mm, bias_start, 123 bias_end, size, ps, 124 &tmp, 125 DRM_BUDDY_RANGE_ALLOCATION), 126 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", 127 bias_start, bias_end, size, ps); 128 129 bias_rem -= size; 130 /* too big for current avail */ 131 KUNIT_ASSERT_TRUE_MSG(test, 132 drm_buddy_alloc_blocks(&mm, bias_start, 133 bias_end, bias_rem + ps, ps, 134 &allocated, 135 DRM_BUDDY_RANGE_ALLOCATION), 136 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", 137 bias_start, bias_end, bias_rem + ps, ps); 138 139 if (bias_rem) { 140 /* random fill of the remainder */ 141 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); 142 size = max(size, ps); 143 144 KUNIT_ASSERT_FALSE_MSG(test, 145 drm_buddy_alloc_blocks(&mm, bias_start, 146 bias_end, size, ps, 147 &allocated, 148 DRM_BUDDY_RANGE_ALLOCATION), 149 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", 150 bias_start, bias_end, size, ps); 151 /* 152 * Intentionally allow some space to be left 153 * unallocated, and ideally not always on the bias 154 * boundaries. 155 */ 156 drm_buddy_free_list(&mm, &tmp, 0); 157 } else { 158 list_splice_tail(&tmp, &allocated); 159 } 160 } 161 162 kfree(order); 163 drm_buddy_free_list(&mm, &allocated, 0); 164 drm_buddy_fini(&mm); 165 166 /* 167 * Something more free-form. Idea is to pick a random starting bias 168 * range within the address space and then start filling it up. Also 169 * randomly grow the bias range in both directions as we go along. This 170 * should give us bias start/end which is not always uniform like above, 171 * and in some cases will require the allocator to jump over already 172 * allocated nodes in the middle of the address space. 173 */ 174 175 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), 176 "buddy_init failed\n"); 177 178 bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps); 179 bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps); 180 bias_end = max(bias_end, bias_start + ps); 181 bias_rem = bias_end - bias_start; 182 183 do { 184 u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); 185 186 KUNIT_ASSERT_FALSE_MSG(test, 187 drm_buddy_alloc_blocks(&mm, bias_start, 188 bias_end, size, ps, 189 &allocated, 190 DRM_BUDDY_RANGE_ALLOCATION), 191 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", 192 bias_start, bias_end, size, ps); 193 bias_rem -= size; 194 195 /* 196 * Try to randomly grow the bias range in both directions, or 197 * only one, or perhaps don't grow at all. 198 */ 199 do { 200 u32 old_bias_start = bias_start; 201 u32 old_bias_end = bias_end; 202 203 if (bias_start) 204 bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps); 205 if (bias_end != mm_size) 206 bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps); 207 208 bias_rem += old_bias_start - bias_start; 209 bias_rem += bias_end - old_bias_end; 210 } while (!bias_rem && (bias_start || bias_end != mm_size)); 211 } while (bias_rem); 212 213 KUNIT_ASSERT_EQ(test, bias_start, 0); 214 KUNIT_ASSERT_EQ(test, bias_end, mm_size); 215 KUNIT_ASSERT_TRUE_MSG(test, 216 drm_buddy_alloc_blocks(&mm, bias_start, bias_end, 217 ps, ps, 218 &allocated, 219 DRM_BUDDY_RANGE_ALLOCATION), 220 "buddy_alloc passed with bias(%x-%x), size=%u\n", 221 bias_start, bias_end, ps); 222 223 drm_buddy_free_list(&mm, &allocated, 0); 224 drm_buddy_fini(&mm); 225 } 226 227 static void drm_test_buddy_alloc_clear(struct kunit *test) 228 { 229 unsigned long n_pages, total, i = 0; 230 DRM_RND_STATE(prng, random_seed); 231 const unsigned long ps = SZ_4K; 232 struct drm_buddy_block *block; 233 const int max_order = 12; 234 LIST_HEAD(allocated); 235 struct drm_buddy mm; 236 unsigned int order; 237 u32 mm_size, size; 238 LIST_HEAD(dirty); 239 LIST_HEAD(clean); 240 241 mm_size = SZ_4K << max_order; 242 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); 243 244 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); 245 246 /* 247 * Idea is to allocate and free some random portion of the address space, 248 * returning those pages as non-dirty and randomly alternate between 249 * requesting dirty and non-dirty pages (not going over the limit 250 * we freed as non-dirty), putting that into two separate lists. 251 * Loop over both lists at the end checking that the dirty list 252 * is indeed all dirty pages and vice versa. Free it all again, 253 * keeping the dirty/clear status. 254 */ 255 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 256 5 * ps, ps, &allocated, 257 DRM_BUDDY_TOPDOWN_ALLOCATION), 258 "buddy_alloc hit an error size=%lu\n", 5 * ps); 259 drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED); 260 261 n_pages = 10; 262 do { 263 unsigned long flags; 264 struct list_head *list; 265 int slot = i % 2; 266 267 if (slot == 0) { 268 list = &dirty; 269 flags = 0; 270 } else { 271 list = &clean; 272 flags = DRM_BUDDY_CLEAR_ALLOCATION; 273 } 274 275 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 276 ps, ps, list, 277 flags), 278 "buddy_alloc hit an error size=%lu\n", ps); 279 } while (++i < n_pages); 280 281 list_for_each_entry(block, &clean, link) 282 KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true); 283 284 list_for_each_entry(block, &dirty, link) 285 KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false); 286 287 drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED); 288 289 /* 290 * Trying to go over the clear limit for some allocation. 291 * The allocation should never fail with reasonable page-size. 292 */ 293 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 294 10 * ps, ps, &clean, 295 DRM_BUDDY_CLEAR_ALLOCATION), 296 "buddy_alloc hit an error size=%lu\n", 10 * ps); 297 298 drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED); 299 drm_buddy_free_list(&mm, &dirty, 0); 300 drm_buddy_fini(&mm); 301 302 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); 303 304 /* 305 * Create a new mm. Intentionally fragment the address space by creating 306 * two alternating lists. Free both lists, one as dirty the other as clean. 307 * Try to allocate double the previous size with matching min_page_size. The 308 * allocation should never fail as it calls the force_merge. Also check that 309 * the page is always dirty after force_merge. Free the page as dirty, then 310 * repeat the whole thing, increment the order until we hit the max_order. 311 */ 312 313 i = 0; 314 n_pages = mm_size / ps; 315 do { 316 struct list_head *list; 317 int slot = i % 2; 318 319 if (slot == 0) 320 list = &dirty; 321 else 322 list = &clean; 323 324 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 325 ps, ps, list, 0), 326 "buddy_alloc hit an error size=%lu\n", ps); 327 } while (++i < n_pages); 328 329 drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED); 330 drm_buddy_free_list(&mm, &dirty, 0); 331 332 order = 1; 333 do { 334 size = SZ_4K << order; 335 336 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 337 size, size, &allocated, 338 DRM_BUDDY_CLEAR_ALLOCATION), 339 "buddy_alloc hit an error size=%u\n", size); 340 total = 0; 341 list_for_each_entry(block, &allocated, link) { 342 if (size != mm_size) 343 KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false); 344 total += drm_buddy_block_size(&mm, block); 345 } 346 KUNIT_EXPECT_EQ(test, total, size); 347 348 drm_buddy_free_list(&mm, &allocated, 0); 349 } while (++order <= max_order); 350 351 drm_buddy_fini(&mm); 352 353 /* 354 * Create a new mm with a non power-of-two size. Allocate a random size, free as 355 * cleared and then call fini. This will ensure the multi-root force merge during 356 * fini. 357 */ 358 mm_size = 12 * SZ_4K; 359 size = max(round_up(prandom_u32_state(&prng) % mm_size, ps), ps); 360 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); 361 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 362 size, ps, &allocated, 363 DRM_BUDDY_TOPDOWN_ALLOCATION), 364 "buddy_alloc hit an error size=%u\n", size); 365 drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED); 366 drm_buddy_fini(&mm); 367 } 368 369 static void drm_test_buddy_alloc_contiguous(struct kunit *test) 370 { 371 const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K; 372 unsigned long i, n_pages, total; 373 struct drm_buddy_block *block; 374 struct drm_buddy mm; 375 LIST_HEAD(left); 376 LIST_HEAD(middle); 377 LIST_HEAD(right); 378 LIST_HEAD(allocated); 379 380 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); 381 382 /* 383 * Idea is to fragment the address space by alternating block 384 * allocations between three different lists; one for left, middle and 385 * right. We can then free a list to simulate fragmentation. In 386 * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION, 387 * including the try_harder path. 388 */ 389 390 i = 0; 391 n_pages = mm_size / ps; 392 do { 393 struct list_head *list; 394 int slot = i % 3; 395 396 if (slot == 0) 397 list = &left; 398 else if (slot == 1) 399 list = &middle; 400 else 401 list = &right; 402 KUNIT_ASSERT_FALSE_MSG(test, 403 drm_buddy_alloc_blocks(&mm, 0, mm_size, 404 ps, ps, list, 0), 405 "buddy_alloc hit an error size=%lu\n", 406 ps); 407 } while (++i < n_pages); 408 409 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 410 3 * ps, ps, &allocated, 411 DRM_BUDDY_CONTIGUOUS_ALLOCATION), 412 "buddy_alloc didn't error size=%lu\n", 3 * ps); 413 414 drm_buddy_free_list(&mm, &middle, 0); 415 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 416 3 * ps, ps, &allocated, 417 DRM_BUDDY_CONTIGUOUS_ALLOCATION), 418 "buddy_alloc didn't error size=%lu\n", 3 * ps); 419 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 420 2 * ps, ps, &allocated, 421 DRM_BUDDY_CONTIGUOUS_ALLOCATION), 422 "buddy_alloc didn't error size=%lu\n", 2 * ps); 423 424 drm_buddy_free_list(&mm, &right, 0); 425 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 426 3 * ps, ps, &allocated, 427 DRM_BUDDY_CONTIGUOUS_ALLOCATION), 428 "buddy_alloc didn't error size=%lu\n", 3 * ps); 429 /* 430 * At this point we should have enough contiguous space for 2 blocks, 431 * however they are never buddies (since we freed middle and right) so 432 * will require the try_harder logic to find them. 433 */ 434 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 435 2 * ps, ps, &allocated, 436 DRM_BUDDY_CONTIGUOUS_ALLOCATION), 437 "buddy_alloc hit an error size=%lu\n", 2 * ps); 438 439 drm_buddy_free_list(&mm, &left, 0); 440 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 441 3 * ps, ps, &allocated, 442 DRM_BUDDY_CONTIGUOUS_ALLOCATION), 443 "buddy_alloc hit an error size=%lu\n", 3 * ps); 444 445 total = 0; 446 list_for_each_entry(block, &allocated, link) 447 total += drm_buddy_block_size(&mm, block); 448 449 KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3); 450 451 drm_buddy_free_list(&mm, &allocated, 0); 452 drm_buddy_fini(&mm); 453 } 454 455 static void drm_test_buddy_alloc_pathological(struct kunit *test) 456 { 457 u64 mm_size, size, start = 0; 458 struct drm_buddy_block *block; 459 const int max_order = 3; 460 unsigned long flags = 0; 461 int order, top; 462 struct drm_buddy mm; 463 LIST_HEAD(blocks); 464 LIST_HEAD(holes); 465 LIST_HEAD(tmp); 466 467 /* 468 * Create a pot-sized mm, then allocate one of each possible 469 * order within. This should leave the mm with exactly one 470 * page left. Free the largest block, then whittle down again. 471 * Eventually we will have a fully 50% fragmented mm. 472 */ 473 474 mm_size = PAGE_SIZE << max_order; 475 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE), 476 "buddy_init failed\n"); 477 478 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); 479 480 for (top = max_order; top; top--) { 481 /* Make room by freeing the largest allocated block */ 482 block = list_first_entry_or_null(&blocks, typeof(*block), link); 483 if (block) { 484 list_del(&block->link); 485 drm_buddy_free_block(&mm, block); 486 } 487 488 for (order = top; order--;) { 489 size = get_size(order, PAGE_SIZE); 490 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, 491 mm_size, size, size, 492 &tmp, flags), 493 "buddy_alloc hit -ENOMEM with order=%d, top=%d\n", 494 order, top); 495 496 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); 497 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); 498 499 list_move_tail(&block->link, &blocks); 500 } 501 502 /* There should be one final page for this sub-allocation */ 503 size = get_size(0, PAGE_SIZE); 504 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 505 size, size, &tmp, flags), 506 "buddy_alloc hit -ENOMEM for hole\n"); 507 508 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); 509 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); 510 511 list_move_tail(&block->link, &holes); 512 513 size = get_size(top, PAGE_SIZE); 514 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 515 size, size, &tmp, flags), 516 "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!", 517 top, max_order); 518 } 519 520 drm_buddy_free_list(&mm, &holes, 0); 521 522 /* Nothing larger than blocks of chunk_size now available */ 523 for (order = 1; order <= max_order; order++) { 524 size = get_size(order, PAGE_SIZE); 525 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 526 size, size, &tmp, flags), 527 "buddy_alloc unexpectedly succeeded at order %d, it should be full!", 528 order); 529 } 530 531 list_splice_tail(&holes, &blocks); 532 drm_buddy_free_list(&mm, &blocks, 0); 533 drm_buddy_fini(&mm); 534 } 535 536 static void drm_test_buddy_alloc_pessimistic(struct kunit *test) 537 { 538 u64 mm_size, size, start = 0; 539 struct drm_buddy_block *block, *bn; 540 const unsigned int max_order = 16; 541 unsigned long flags = 0; 542 struct drm_buddy mm; 543 unsigned int order; 544 LIST_HEAD(blocks); 545 LIST_HEAD(tmp); 546 547 /* 548 * Create a pot-sized mm, then allocate one of each possible 549 * order within. This should leave the mm with exactly one 550 * page left. 551 */ 552 553 mm_size = PAGE_SIZE << max_order; 554 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE), 555 "buddy_init failed\n"); 556 557 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); 558 559 for (order = 0; order < max_order; order++) { 560 size = get_size(order, PAGE_SIZE); 561 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 562 size, size, &tmp, flags), 563 "buddy_alloc hit -ENOMEM with order=%d\n", 564 order); 565 566 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); 567 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); 568 569 list_move_tail(&block->link, &blocks); 570 } 571 572 /* And now the last remaining block available */ 573 size = get_size(0, PAGE_SIZE); 574 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 575 size, size, &tmp, flags), 576 "buddy_alloc hit -ENOMEM on final alloc\n"); 577 578 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); 579 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); 580 581 list_move_tail(&block->link, &blocks); 582 583 /* Should be completely full! */ 584 for (order = max_order; order--;) { 585 size = get_size(order, PAGE_SIZE); 586 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 587 size, size, &tmp, flags), 588 "buddy_alloc unexpectedly succeeded, it should be full!"); 589 } 590 591 block = list_last_entry(&blocks, typeof(*block), link); 592 list_del(&block->link); 593 drm_buddy_free_block(&mm, block); 594 595 /* As we free in increasing size, we make available larger blocks */ 596 order = 1; 597 list_for_each_entry_safe(block, bn, &blocks, link) { 598 list_del(&block->link); 599 drm_buddy_free_block(&mm, block); 600 601 size = get_size(order, PAGE_SIZE); 602 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 603 size, size, &tmp, flags), 604 "buddy_alloc hit -ENOMEM with order=%d\n", 605 order); 606 607 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); 608 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); 609 610 list_del(&block->link); 611 drm_buddy_free_block(&mm, block); 612 order++; 613 } 614 615 /* To confirm, now the whole mm should be available */ 616 size = get_size(max_order, PAGE_SIZE); 617 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 618 size, size, &tmp, flags), 619 "buddy_alloc (realloc) hit -ENOMEM with order=%d\n", 620 max_order); 621 622 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); 623 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); 624 625 list_del(&block->link); 626 drm_buddy_free_block(&mm, block); 627 drm_buddy_free_list(&mm, &blocks, 0); 628 drm_buddy_fini(&mm); 629 } 630 631 static void drm_test_buddy_alloc_optimistic(struct kunit *test) 632 { 633 u64 mm_size, size, start = 0; 634 struct drm_buddy_block *block; 635 unsigned long flags = 0; 636 const int max_order = 16; 637 struct drm_buddy mm; 638 LIST_HEAD(blocks); 639 LIST_HEAD(tmp); 640 int order; 641 642 /* 643 * Create a mm with one block of each order available, and 644 * try to allocate them all. 645 */ 646 647 mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1); 648 649 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE), 650 "buddy_init failed\n"); 651 652 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); 653 654 for (order = 0; order <= max_order; order++) { 655 size = get_size(order, PAGE_SIZE); 656 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 657 size, size, &tmp, flags), 658 "buddy_alloc hit -ENOMEM with order=%d\n", 659 order); 660 661 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); 662 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); 663 664 list_move_tail(&block->link, &blocks); 665 } 666 667 /* Should be completely full! */ 668 size = get_size(0, PAGE_SIZE); 669 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 670 size, size, &tmp, flags), 671 "buddy_alloc unexpectedly succeeded, it should be full!"); 672 673 drm_buddy_free_list(&mm, &blocks, 0); 674 drm_buddy_fini(&mm); 675 } 676 677 static void drm_test_buddy_alloc_limit(struct kunit *test) 678 { 679 u64 size = U64_MAX, start = 0; 680 struct drm_buddy_block *block; 681 unsigned long flags = 0; 682 LIST_HEAD(allocated); 683 struct drm_buddy mm; 684 685 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE)); 686 687 KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER, 688 "mm.max_order(%d) != %d\n", mm.max_order, 689 DRM_BUDDY_MAX_ORDER); 690 691 size = mm.chunk_size << mm.max_order; 692 KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size, 693 PAGE_SIZE, &allocated, flags)); 694 695 block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link); 696 KUNIT_EXPECT_TRUE(test, block); 697 698 KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order, 699 "block order(%d) != %d\n", 700 drm_buddy_block_order(block), mm.max_order); 701 702 KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block), 703 BIT_ULL(mm.max_order) * PAGE_SIZE, 704 "block size(%llu) != %llu\n", 705 drm_buddy_block_size(&mm, block), 706 BIT_ULL(mm.max_order) * PAGE_SIZE); 707 708 drm_buddy_free_list(&mm, &allocated, 0); 709 drm_buddy_fini(&mm); 710 } 711 712 static int drm_buddy_suite_init(struct kunit_suite *suite) 713 { 714 while (!random_seed) 715 random_seed = get_random_u32(); 716 717 kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n", 718 random_seed); 719 720 return 0; 721 } 722 723 static struct kunit_case drm_buddy_tests[] = { 724 KUNIT_CASE(drm_test_buddy_alloc_limit), 725 KUNIT_CASE(drm_test_buddy_alloc_optimistic), 726 KUNIT_CASE(drm_test_buddy_alloc_pessimistic), 727 KUNIT_CASE(drm_test_buddy_alloc_pathological), 728 KUNIT_CASE(drm_test_buddy_alloc_contiguous), 729 KUNIT_CASE(drm_test_buddy_alloc_clear), 730 KUNIT_CASE(drm_test_buddy_alloc_range_bias), 731 {} 732 }; 733 734 static struct kunit_suite drm_buddy_test_suite = { 735 .name = "drm_buddy", 736 .suite_init = drm_buddy_suite_init, 737 .test_cases = drm_buddy_tests, 738 }; 739 740 kunit_test_suite(drm_buddy_test_suite); 741 742 MODULE_AUTHOR("Intel Corporation"); 743 MODULE_LICENSE("GPL"); 744