1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include <linux/kmemleak.h> 7 #include <linux/module.h> 8 #include <linux/sizes.h> 9 10 #include <drm/drm_buddy.h> 11 12 static struct kmem_cache *slab_blocks; 13 14 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, 15 struct drm_buddy_block *parent, 16 unsigned int order, 17 u64 offset) 18 { 19 struct drm_buddy_block *block; 20 21 BUG_ON(order > DRM_BUDDY_MAX_ORDER); 22 23 block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL); 24 if (!block) 25 return NULL; 26 27 block->header = offset; 28 block->header |= order; 29 block->parent = parent; 30 31 BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED); 32 return block; 33 } 34 35 static void drm_block_free(struct drm_buddy *mm, 36 struct drm_buddy_block *block) 37 { 38 kmem_cache_free(slab_blocks, block); 39 } 40 41 static void list_insert_sorted(struct drm_buddy *mm, 42 struct drm_buddy_block *block) 43 { 44 struct drm_buddy_block *node; 45 struct list_head *head; 46 47 head = &mm->free_list[drm_buddy_block_order(block)]; 48 if (list_empty(head)) { 49 list_add(&block->link, head); 50 return; 51 } 52 53 list_for_each_entry(node, head, link) 54 if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node)) 55 break; 56 57 __list_add(&block->link, node->link.prev, &node->link); 58 } 59 60 static void clear_reset(struct drm_buddy_block *block) 61 { 62 block->header &= ~DRM_BUDDY_HEADER_CLEAR; 63 } 64 65 static void mark_cleared(struct drm_buddy_block *block) 66 { 67 block->header |= DRM_BUDDY_HEADER_CLEAR; 68 } 69 70 static void mark_allocated(struct drm_buddy_block *block) 71 { 72 block->header &= ~DRM_BUDDY_HEADER_STATE; 73 block->header |= DRM_BUDDY_ALLOCATED; 74 75 list_del(&block->link); 76 } 77 78 static void mark_free(struct drm_buddy *mm, 79 struct drm_buddy_block *block) 80 { 81 block->header &= ~DRM_BUDDY_HEADER_STATE; 82 block->header |= DRM_BUDDY_FREE; 83 84 list_insert_sorted(mm, block); 85 } 86 87 static void mark_split(struct drm_buddy_block *block) 88 { 89 block->header &= ~DRM_BUDDY_HEADER_STATE; 90 block->header |= DRM_BUDDY_SPLIT; 91 92 list_del(&block->link); 93 } 94 95 static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2) 96 { 97 return s1 <= e2 && e1 >= s2; 98 } 99 100 static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2) 101 { 102 return s1 <= s2 && e1 >= e2; 103 } 104 105 static struct drm_buddy_block * 106 __get_buddy(struct drm_buddy_block *block) 107 { 108 struct drm_buddy_block *parent; 109 110 parent = block->parent; 111 if (!parent) 112 return NULL; 113 114 if (parent->left == block) 115 return parent->right; 116 117 return parent->left; 118 } 119 120 static unsigned int __drm_buddy_free(struct drm_buddy *mm, 121 struct drm_buddy_block *block, 122 bool force_merge) 123 { 124 struct drm_buddy_block *parent; 125 unsigned int order; 126 127 while ((parent = block->parent)) { 128 struct drm_buddy_block *buddy; 129 130 buddy = __get_buddy(block); 131 132 if (!drm_buddy_block_is_free(buddy)) 133 break; 134 135 if (!force_merge) { 136 /* 137 * Check the block and its buddy clear state and exit 138 * the loop if they both have the dissimilar state. 139 */ 140 if (drm_buddy_block_is_clear(block) != 141 drm_buddy_block_is_clear(buddy)) 142 break; 143 144 if (drm_buddy_block_is_clear(block)) 145 mark_cleared(parent); 146 } 147 148 list_del(&buddy->link); 149 if (force_merge && drm_buddy_block_is_clear(buddy)) 150 mm->clear_avail -= drm_buddy_block_size(mm, buddy); 151 152 drm_block_free(mm, block); 153 drm_block_free(mm, buddy); 154 155 block = parent; 156 } 157 158 order = drm_buddy_block_order(block); 159 mark_free(mm, block); 160 161 return order; 162 } 163 164 static int __force_merge(struct drm_buddy *mm, 165 u64 start, 166 u64 end, 167 unsigned int min_order) 168 { 169 unsigned int order; 170 int i; 171 172 if (!min_order) 173 return -ENOMEM; 174 175 if (min_order > mm->max_order) 176 return -EINVAL; 177 178 for (i = min_order - 1; i >= 0; i--) { 179 struct drm_buddy_block *block, *prev; 180 181 list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) { 182 struct drm_buddy_block *buddy; 183 u64 block_start, block_end; 184 185 if (!block->parent) 186 continue; 187 188 block_start = drm_buddy_block_offset(block); 189 block_end = block_start + drm_buddy_block_size(mm, block) - 1; 190 191 if (!contains(start, end, block_start, block_end)) 192 continue; 193 194 buddy = __get_buddy(block); 195 if (!drm_buddy_block_is_free(buddy)) 196 continue; 197 198 WARN_ON(drm_buddy_block_is_clear(block) == 199 drm_buddy_block_is_clear(buddy)); 200 201 /* 202 * If the prev block is same as buddy, don't access the 203 * block in the next iteration as we would free the 204 * buddy block as part of the free function. 205 */ 206 if (prev == buddy) 207 prev = list_prev_entry(prev, link); 208 209 list_del(&block->link); 210 if (drm_buddy_block_is_clear(block)) 211 mm->clear_avail -= drm_buddy_block_size(mm, block); 212 213 order = __drm_buddy_free(mm, block, true); 214 if (order >= min_order) 215 return 0; 216 } 217 } 218 219 return -ENOMEM; 220 } 221 222 /** 223 * drm_buddy_init - init memory manager 224 * 225 * @mm: DRM buddy manager to initialize 226 * @size: size in bytes to manage 227 * @chunk_size: minimum page size in bytes for our allocations 228 * 229 * Initializes the memory manager and its resources. 230 * 231 * Returns: 232 * 0 on success, error code on failure. 233 */ 234 int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) 235 { 236 unsigned int i; 237 u64 offset; 238 239 if (size < chunk_size) 240 return -EINVAL; 241 242 if (chunk_size < PAGE_SIZE) 243 return -EINVAL; 244 245 if (!is_power_of_2(chunk_size)) 246 return -EINVAL; 247 248 size = round_down(size, chunk_size); 249 250 mm->size = size; 251 mm->avail = size; 252 mm->clear_avail = 0; 253 mm->chunk_size = chunk_size; 254 mm->max_order = ilog2(size) - ilog2(chunk_size); 255 256 BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER); 257 258 mm->free_list = kmalloc_array(mm->max_order + 1, 259 sizeof(struct list_head), 260 GFP_KERNEL); 261 if (!mm->free_list) 262 return -ENOMEM; 263 264 for (i = 0; i <= mm->max_order; ++i) 265 INIT_LIST_HEAD(&mm->free_list[i]); 266 267 mm->n_roots = hweight64(size); 268 269 mm->roots = kmalloc_array(mm->n_roots, 270 sizeof(struct drm_buddy_block *), 271 GFP_KERNEL); 272 if (!mm->roots) 273 goto out_free_list; 274 275 offset = 0; 276 i = 0; 277 278 /* 279 * Split into power-of-two blocks, in case we are given a size that is 280 * not itself a power-of-two. 281 */ 282 do { 283 struct drm_buddy_block *root; 284 unsigned int order; 285 u64 root_size; 286 287 order = ilog2(size) - ilog2(chunk_size); 288 root_size = chunk_size << order; 289 290 root = drm_block_alloc(mm, NULL, order, offset); 291 if (!root) 292 goto out_free_roots; 293 294 mark_free(mm, root); 295 296 BUG_ON(i > mm->max_order); 297 BUG_ON(drm_buddy_block_size(mm, root) < chunk_size); 298 299 mm->roots[i] = root; 300 301 offset += root_size; 302 size -= root_size; 303 i++; 304 } while (size); 305 306 return 0; 307 308 out_free_roots: 309 while (i--) 310 drm_block_free(mm, mm->roots[i]); 311 kfree(mm->roots); 312 out_free_list: 313 kfree(mm->free_list); 314 return -ENOMEM; 315 } 316 EXPORT_SYMBOL(drm_buddy_init); 317 318 /** 319 * drm_buddy_fini - tear down the memory manager 320 * 321 * @mm: DRM buddy manager to free 322 * 323 * Cleanup memory manager resources and the freelist 324 */ 325 void drm_buddy_fini(struct drm_buddy *mm) 326 { 327 u64 root_size, size; 328 unsigned int order; 329 int i; 330 331 size = mm->size; 332 333 for (i = 0; i < mm->n_roots; ++i) { 334 order = ilog2(size) - ilog2(mm->chunk_size); 335 __force_merge(mm, 0, size, order); 336 337 WARN_ON(!drm_buddy_block_is_free(mm->roots[i])); 338 drm_block_free(mm, mm->roots[i]); 339 340 root_size = mm->chunk_size << order; 341 size -= root_size; 342 } 343 344 WARN_ON(mm->avail != mm->size); 345 346 kfree(mm->roots); 347 kfree(mm->free_list); 348 } 349 EXPORT_SYMBOL(drm_buddy_fini); 350 351 static int split_block(struct drm_buddy *mm, 352 struct drm_buddy_block *block) 353 { 354 unsigned int block_order = drm_buddy_block_order(block) - 1; 355 u64 offset = drm_buddy_block_offset(block); 356 357 BUG_ON(!drm_buddy_block_is_free(block)); 358 BUG_ON(!drm_buddy_block_order(block)); 359 360 block->left = drm_block_alloc(mm, block, block_order, offset); 361 if (!block->left) 362 return -ENOMEM; 363 364 block->right = drm_block_alloc(mm, block, block_order, 365 offset + (mm->chunk_size << block_order)); 366 if (!block->right) { 367 drm_block_free(mm, block->left); 368 return -ENOMEM; 369 } 370 371 mark_free(mm, block->left); 372 mark_free(mm, block->right); 373 374 if (drm_buddy_block_is_clear(block)) { 375 mark_cleared(block->left); 376 mark_cleared(block->right); 377 clear_reset(block); 378 } 379 380 mark_split(block); 381 382 return 0; 383 } 384 385 /** 386 * drm_get_buddy - get buddy address 387 * 388 * @block: DRM buddy block 389 * 390 * Returns the corresponding buddy block for @block, or NULL 391 * if this is a root block and can't be merged further. 392 * Requires some kind of locking to protect against 393 * any concurrent allocate and free operations. 394 */ 395 struct drm_buddy_block * 396 drm_get_buddy(struct drm_buddy_block *block) 397 { 398 return __get_buddy(block); 399 } 400 EXPORT_SYMBOL(drm_get_buddy); 401 402 /** 403 * drm_buddy_free_block - free a block 404 * 405 * @mm: DRM buddy manager 406 * @block: block to be freed 407 */ 408 void drm_buddy_free_block(struct drm_buddy *mm, 409 struct drm_buddy_block *block) 410 { 411 BUG_ON(!drm_buddy_block_is_allocated(block)); 412 mm->avail += drm_buddy_block_size(mm, block); 413 if (drm_buddy_block_is_clear(block)) 414 mm->clear_avail += drm_buddy_block_size(mm, block); 415 416 __drm_buddy_free(mm, block, false); 417 } 418 EXPORT_SYMBOL(drm_buddy_free_block); 419 420 static void __drm_buddy_free_list(struct drm_buddy *mm, 421 struct list_head *objects, 422 bool mark_clear, 423 bool mark_dirty) 424 { 425 struct drm_buddy_block *block, *on; 426 427 WARN_ON(mark_dirty && mark_clear); 428 429 list_for_each_entry_safe(block, on, objects, link) { 430 if (mark_clear) 431 mark_cleared(block); 432 else if (mark_dirty) 433 clear_reset(block); 434 drm_buddy_free_block(mm, block); 435 cond_resched(); 436 } 437 INIT_LIST_HEAD(objects); 438 } 439 440 static void drm_buddy_free_list_internal(struct drm_buddy *mm, 441 struct list_head *objects) 442 { 443 /* 444 * Don't touch the clear/dirty bit, since allocation is still internal 445 * at this point. For example we might have just failed part of the 446 * allocation. 447 */ 448 __drm_buddy_free_list(mm, objects, false, false); 449 } 450 451 /** 452 * drm_buddy_free_list - free blocks 453 * 454 * @mm: DRM buddy manager 455 * @objects: input list head to free blocks 456 * @flags: optional flags like DRM_BUDDY_CLEARED 457 */ 458 void drm_buddy_free_list(struct drm_buddy *mm, 459 struct list_head *objects, 460 unsigned int flags) 461 { 462 bool mark_clear = flags & DRM_BUDDY_CLEARED; 463 464 __drm_buddy_free_list(mm, objects, mark_clear, !mark_clear); 465 } 466 EXPORT_SYMBOL(drm_buddy_free_list); 467 468 static bool block_incompatible(struct drm_buddy_block *block, unsigned int flags) 469 { 470 bool needs_clear = flags & DRM_BUDDY_CLEAR_ALLOCATION; 471 472 return needs_clear != drm_buddy_block_is_clear(block); 473 } 474 475 static struct drm_buddy_block * 476 __alloc_range_bias(struct drm_buddy *mm, 477 u64 start, u64 end, 478 unsigned int order, 479 unsigned long flags, 480 bool fallback) 481 { 482 u64 req_size = mm->chunk_size << order; 483 struct drm_buddy_block *block; 484 struct drm_buddy_block *buddy; 485 LIST_HEAD(dfs); 486 int err; 487 int i; 488 489 end = end - 1; 490 491 for (i = 0; i < mm->n_roots; ++i) 492 list_add_tail(&mm->roots[i]->tmp_link, &dfs); 493 494 do { 495 u64 block_start; 496 u64 block_end; 497 498 block = list_first_entry_or_null(&dfs, 499 struct drm_buddy_block, 500 tmp_link); 501 if (!block) 502 break; 503 504 list_del(&block->tmp_link); 505 506 if (drm_buddy_block_order(block) < order) 507 continue; 508 509 block_start = drm_buddy_block_offset(block); 510 block_end = block_start + drm_buddy_block_size(mm, block) - 1; 511 512 if (!overlaps(start, end, block_start, block_end)) 513 continue; 514 515 if (drm_buddy_block_is_allocated(block)) 516 continue; 517 518 if (block_start < start || block_end > end) { 519 u64 adjusted_start = max(block_start, start); 520 u64 adjusted_end = min(block_end, end); 521 522 if (round_down(adjusted_end + 1, req_size) <= 523 round_up(adjusted_start, req_size)) 524 continue; 525 } 526 527 if (!fallback && block_incompatible(block, flags)) 528 continue; 529 530 if (contains(start, end, block_start, block_end) && 531 order == drm_buddy_block_order(block)) { 532 /* 533 * Find the free block within the range. 534 */ 535 if (drm_buddy_block_is_free(block)) 536 return block; 537 538 continue; 539 } 540 541 if (!drm_buddy_block_is_split(block)) { 542 err = split_block(mm, block); 543 if (unlikely(err)) 544 goto err_undo; 545 } 546 547 list_add(&block->right->tmp_link, &dfs); 548 list_add(&block->left->tmp_link, &dfs); 549 } while (1); 550 551 return ERR_PTR(-ENOSPC); 552 553 err_undo: 554 /* 555 * We really don't want to leave around a bunch of split blocks, since 556 * bigger is better, so make sure we merge everything back before we 557 * free the allocated blocks. 558 */ 559 buddy = __get_buddy(block); 560 if (buddy && 561 (drm_buddy_block_is_free(block) && 562 drm_buddy_block_is_free(buddy))) 563 __drm_buddy_free(mm, block, false); 564 return ERR_PTR(err); 565 } 566 567 static struct drm_buddy_block * 568 __drm_buddy_alloc_range_bias(struct drm_buddy *mm, 569 u64 start, u64 end, 570 unsigned int order, 571 unsigned long flags) 572 { 573 struct drm_buddy_block *block; 574 bool fallback = false; 575 576 block = __alloc_range_bias(mm, start, end, order, 577 flags, fallback); 578 if (IS_ERR(block)) 579 return __alloc_range_bias(mm, start, end, order, 580 flags, !fallback); 581 582 return block; 583 } 584 585 static struct drm_buddy_block * 586 get_maxblock(struct drm_buddy *mm, unsigned int order, 587 unsigned long flags) 588 { 589 struct drm_buddy_block *max_block = NULL, *block = NULL; 590 unsigned int i; 591 592 for (i = order; i <= mm->max_order; ++i) { 593 struct drm_buddy_block *tmp_block; 594 595 list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) { 596 if (block_incompatible(tmp_block, flags)) 597 continue; 598 599 block = tmp_block; 600 break; 601 } 602 603 if (!block) 604 continue; 605 606 if (!max_block) { 607 max_block = block; 608 continue; 609 } 610 611 if (drm_buddy_block_offset(block) > 612 drm_buddy_block_offset(max_block)) { 613 max_block = block; 614 } 615 } 616 617 return max_block; 618 } 619 620 static struct drm_buddy_block * 621 alloc_from_freelist(struct drm_buddy *mm, 622 unsigned int order, 623 unsigned long flags) 624 { 625 struct drm_buddy_block *block = NULL; 626 unsigned int tmp; 627 int err; 628 629 if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) { 630 block = get_maxblock(mm, order, flags); 631 if (block) 632 /* Store the obtained block order */ 633 tmp = drm_buddy_block_order(block); 634 } else { 635 for (tmp = order; tmp <= mm->max_order; ++tmp) { 636 struct drm_buddy_block *tmp_block; 637 638 list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) { 639 if (block_incompatible(tmp_block, flags)) 640 continue; 641 642 block = tmp_block; 643 break; 644 } 645 646 if (block) 647 break; 648 } 649 } 650 651 if (!block) { 652 /* Fallback method */ 653 for (tmp = order; tmp <= mm->max_order; ++tmp) { 654 if (!list_empty(&mm->free_list[tmp])) { 655 block = list_last_entry(&mm->free_list[tmp], 656 struct drm_buddy_block, 657 link); 658 if (block) 659 break; 660 } 661 } 662 663 if (!block) 664 return ERR_PTR(-ENOSPC); 665 } 666 667 BUG_ON(!drm_buddy_block_is_free(block)); 668 669 while (tmp != order) { 670 err = split_block(mm, block); 671 if (unlikely(err)) 672 goto err_undo; 673 674 block = block->right; 675 tmp--; 676 } 677 return block; 678 679 err_undo: 680 if (tmp != order) 681 __drm_buddy_free(mm, block, false); 682 return ERR_PTR(err); 683 } 684 685 static int __alloc_range(struct drm_buddy *mm, 686 struct list_head *dfs, 687 u64 start, u64 size, 688 struct list_head *blocks, 689 u64 *total_allocated_on_err) 690 { 691 struct drm_buddy_block *block; 692 struct drm_buddy_block *buddy; 693 u64 total_allocated = 0; 694 LIST_HEAD(allocated); 695 u64 end; 696 int err; 697 698 end = start + size - 1; 699 700 do { 701 u64 block_start; 702 u64 block_end; 703 704 block = list_first_entry_or_null(dfs, 705 struct drm_buddy_block, 706 tmp_link); 707 if (!block) 708 break; 709 710 list_del(&block->tmp_link); 711 712 block_start = drm_buddy_block_offset(block); 713 block_end = block_start + drm_buddy_block_size(mm, block) - 1; 714 715 if (!overlaps(start, end, block_start, block_end)) 716 continue; 717 718 if (drm_buddy_block_is_allocated(block)) { 719 err = -ENOSPC; 720 goto err_free; 721 } 722 723 if (contains(start, end, block_start, block_end)) { 724 if (drm_buddy_block_is_free(block)) { 725 mark_allocated(block); 726 total_allocated += drm_buddy_block_size(mm, block); 727 mm->avail -= drm_buddy_block_size(mm, block); 728 if (drm_buddy_block_is_clear(block)) 729 mm->clear_avail -= drm_buddy_block_size(mm, block); 730 list_add_tail(&block->link, &allocated); 731 continue; 732 } else if (!mm->clear_avail) { 733 err = -ENOSPC; 734 goto err_free; 735 } 736 } 737 738 if (!drm_buddy_block_is_split(block)) { 739 err = split_block(mm, block); 740 if (unlikely(err)) 741 goto err_undo; 742 } 743 744 list_add(&block->right->tmp_link, dfs); 745 list_add(&block->left->tmp_link, dfs); 746 } while (1); 747 748 if (total_allocated < size) { 749 err = -ENOSPC; 750 goto err_free; 751 } 752 753 list_splice_tail(&allocated, blocks); 754 755 return 0; 756 757 err_undo: 758 /* 759 * We really don't want to leave around a bunch of split blocks, since 760 * bigger is better, so make sure we merge everything back before we 761 * free the allocated blocks. 762 */ 763 buddy = __get_buddy(block); 764 if (buddy && 765 (drm_buddy_block_is_free(block) && 766 drm_buddy_block_is_free(buddy))) 767 __drm_buddy_free(mm, block, false); 768 769 err_free: 770 if (err == -ENOSPC && total_allocated_on_err) { 771 list_splice_tail(&allocated, blocks); 772 *total_allocated_on_err = total_allocated; 773 } else { 774 drm_buddy_free_list_internal(mm, &allocated); 775 } 776 777 return err; 778 } 779 780 static int __drm_buddy_alloc_range(struct drm_buddy *mm, 781 u64 start, 782 u64 size, 783 u64 *total_allocated_on_err, 784 struct list_head *blocks) 785 { 786 LIST_HEAD(dfs); 787 int i; 788 789 for (i = 0; i < mm->n_roots; ++i) 790 list_add_tail(&mm->roots[i]->tmp_link, &dfs); 791 792 return __alloc_range(mm, &dfs, start, size, 793 blocks, total_allocated_on_err); 794 } 795 796 static int __alloc_contig_try_harder(struct drm_buddy *mm, 797 u64 size, 798 u64 min_block_size, 799 struct list_head *blocks) 800 { 801 u64 rhs_offset, lhs_offset, lhs_size, filled; 802 struct drm_buddy_block *block; 803 struct list_head *list; 804 LIST_HEAD(blocks_lhs); 805 unsigned long pages; 806 unsigned int order; 807 u64 modify_size; 808 int err; 809 810 modify_size = rounddown_pow_of_two(size); 811 pages = modify_size >> ilog2(mm->chunk_size); 812 order = fls(pages) - 1; 813 if (order == 0) 814 return -ENOSPC; 815 816 list = &mm->free_list[order]; 817 if (list_empty(list)) 818 return -ENOSPC; 819 820 list_for_each_entry_reverse(block, list, link) { 821 /* Allocate blocks traversing RHS */ 822 rhs_offset = drm_buddy_block_offset(block); 823 err = __drm_buddy_alloc_range(mm, rhs_offset, size, 824 &filled, blocks); 825 if (!err || err != -ENOSPC) 826 return err; 827 828 lhs_size = max((size - filled), min_block_size); 829 if (!IS_ALIGNED(lhs_size, min_block_size)) 830 lhs_size = round_up(lhs_size, min_block_size); 831 832 /* Allocate blocks traversing LHS */ 833 lhs_offset = drm_buddy_block_offset(block) - lhs_size; 834 err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size, 835 NULL, &blocks_lhs); 836 if (!err) { 837 list_splice(&blocks_lhs, blocks); 838 return 0; 839 } else if (err != -ENOSPC) { 840 drm_buddy_free_list_internal(mm, blocks); 841 return err; 842 } 843 /* Free blocks for the next iteration */ 844 drm_buddy_free_list_internal(mm, blocks); 845 } 846 847 return -ENOSPC; 848 } 849 850 /** 851 * drm_buddy_block_trim - free unused pages 852 * 853 * @mm: DRM buddy manager 854 * @new_size: original size requested 855 * @blocks: Input and output list of allocated blocks. 856 * MUST contain single block as input to be trimmed. 857 * On success will contain the newly allocated blocks 858 * making up the @new_size. Blocks always appear in 859 * ascending order 860 * 861 * For contiguous allocation, we round up the size to the nearest 862 * power of two value, drivers consume *actual* size, so remaining 863 * portions are unused and can be optionally freed with this function 864 * 865 * Returns: 866 * 0 on success, error code on failure. 867 */ 868 int drm_buddy_block_trim(struct drm_buddy *mm, 869 u64 new_size, 870 struct list_head *blocks) 871 { 872 struct drm_buddy_block *parent; 873 struct drm_buddy_block *block; 874 LIST_HEAD(dfs); 875 u64 new_start; 876 int err; 877 878 if (!list_is_singular(blocks)) 879 return -EINVAL; 880 881 block = list_first_entry(blocks, 882 struct drm_buddy_block, 883 link); 884 885 if (WARN_ON(!drm_buddy_block_is_allocated(block))) 886 return -EINVAL; 887 888 if (new_size > drm_buddy_block_size(mm, block)) 889 return -EINVAL; 890 891 if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size)) 892 return -EINVAL; 893 894 if (new_size == drm_buddy_block_size(mm, block)) 895 return 0; 896 897 list_del(&block->link); 898 mark_free(mm, block); 899 mm->avail += drm_buddy_block_size(mm, block); 900 if (drm_buddy_block_is_clear(block)) 901 mm->clear_avail += drm_buddy_block_size(mm, block); 902 903 /* Prevent recursively freeing this node */ 904 parent = block->parent; 905 block->parent = NULL; 906 907 new_start = drm_buddy_block_offset(block); 908 list_add(&block->tmp_link, &dfs); 909 err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL); 910 if (err) { 911 mark_allocated(block); 912 mm->avail -= drm_buddy_block_size(mm, block); 913 if (drm_buddy_block_is_clear(block)) 914 mm->clear_avail -= drm_buddy_block_size(mm, block); 915 list_add(&block->link, blocks); 916 } 917 918 block->parent = parent; 919 return err; 920 } 921 EXPORT_SYMBOL(drm_buddy_block_trim); 922 923 static struct drm_buddy_block * 924 __drm_buddy_alloc_blocks(struct drm_buddy *mm, 925 u64 start, u64 end, 926 unsigned int order, 927 unsigned long flags) 928 { 929 if (flags & DRM_BUDDY_RANGE_ALLOCATION) 930 /* Allocate traversing within the range */ 931 return __drm_buddy_alloc_range_bias(mm, start, end, 932 order, flags); 933 else 934 /* Allocate from freelist */ 935 return alloc_from_freelist(mm, order, flags); 936 } 937 938 /** 939 * drm_buddy_alloc_blocks - allocate power-of-two blocks 940 * 941 * @mm: DRM buddy manager to allocate from 942 * @start: start of the allowed range for this block 943 * @end: end of the allowed range for this block 944 * @size: size of the allocation in bytes 945 * @min_block_size: alignment of the allocation 946 * @blocks: output list head to add allocated blocks 947 * @flags: DRM_BUDDY_*_ALLOCATION flags 948 * 949 * alloc_range_bias() called on range limitations, which traverses 950 * the tree and returns the desired block. 951 * 952 * alloc_from_freelist() called when *no* range restrictions 953 * are enforced, which picks the block from the freelist. 954 * 955 * Returns: 956 * 0 on success, error code on failure. 957 */ 958 int drm_buddy_alloc_blocks(struct drm_buddy *mm, 959 u64 start, u64 end, u64 size, 960 u64 min_block_size, 961 struct list_head *blocks, 962 unsigned long flags) 963 { 964 struct drm_buddy_block *block = NULL; 965 u64 original_size, original_min_size; 966 unsigned int min_order, order; 967 LIST_HEAD(allocated); 968 unsigned long pages; 969 int err; 970 971 if (size < mm->chunk_size) 972 return -EINVAL; 973 974 if (min_block_size < mm->chunk_size) 975 return -EINVAL; 976 977 if (!is_power_of_2(min_block_size)) 978 return -EINVAL; 979 980 if (!IS_ALIGNED(start | end | size, mm->chunk_size)) 981 return -EINVAL; 982 983 if (end > mm->size) 984 return -EINVAL; 985 986 if (range_overflows(start, size, mm->size)) 987 return -EINVAL; 988 989 /* Actual range allocation */ 990 if (start + size == end) { 991 if (!IS_ALIGNED(start | end, min_block_size)) 992 return -EINVAL; 993 994 return __drm_buddy_alloc_range(mm, start, size, NULL, blocks); 995 } 996 997 original_size = size; 998 original_min_size = min_block_size; 999 1000 /* Roundup the size to power of 2 */ 1001 if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) { 1002 size = roundup_pow_of_two(size); 1003 min_block_size = size; 1004 /* Align size value to min_block_size */ 1005 } else if (!IS_ALIGNED(size, min_block_size)) { 1006 size = round_up(size, min_block_size); 1007 } 1008 1009 pages = size >> ilog2(mm->chunk_size); 1010 order = fls(pages) - 1; 1011 min_order = ilog2(min_block_size) - ilog2(mm->chunk_size); 1012 1013 do { 1014 order = min(order, (unsigned int)fls(pages) - 1); 1015 BUG_ON(order > mm->max_order); 1016 BUG_ON(order < min_order); 1017 1018 do { 1019 block = __drm_buddy_alloc_blocks(mm, start, 1020 end, 1021 order, 1022 flags); 1023 if (!IS_ERR(block)) 1024 break; 1025 1026 if (order-- == min_order) { 1027 /* Try allocation through force merge method */ 1028 if (mm->clear_avail && 1029 !__force_merge(mm, start, end, min_order)) { 1030 block = __drm_buddy_alloc_blocks(mm, start, 1031 end, 1032 min_order, 1033 flags); 1034 if (!IS_ERR(block)) { 1035 order = min_order; 1036 break; 1037 } 1038 } 1039 1040 /* 1041 * Try contiguous block allocation through 1042 * try harder method. 1043 */ 1044 if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION && 1045 !(flags & DRM_BUDDY_RANGE_ALLOCATION)) 1046 return __alloc_contig_try_harder(mm, 1047 original_size, 1048 original_min_size, 1049 blocks); 1050 err = -ENOSPC; 1051 goto err_free; 1052 } 1053 } while (1); 1054 1055 mark_allocated(block); 1056 mm->avail -= drm_buddy_block_size(mm, block); 1057 if (drm_buddy_block_is_clear(block)) 1058 mm->clear_avail -= drm_buddy_block_size(mm, block); 1059 kmemleak_update_trace(block); 1060 list_add_tail(&block->link, &allocated); 1061 1062 pages -= BIT(order); 1063 1064 if (!pages) 1065 break; 1066 } while (1); 1067 1068 /* Trim the allocated block to the required size */ 1069 if (original_size != size) { 1070 struct list_head *trim_list; 1071 LIST_HEAD(temp); 1072 u64 trim_size; 1073 1074 trim_list = &allocated; 1075 trim_size = original_size; 1076 1077 if (!list_is_singular(&allocated)) { 1078 block = list_last_entry(&allocated, typeof(*block), link); 1079 list_move(&block->link, &temp); 1080 trim_list = &temp; 1081 trim_size = drm_buddy_block_size(mm, block) - 1082 (size - original_size); 1083 } 1084 1085 drm_buddy_block_trim(mm, 1086 trim_size, 1087 trim_list); 1088 1089 if (!list_empty(&temp)) 1090 list_splice_tail(trim_list, &allocated); 1091 } 1092 1093 list_splice_tail(&allocated, blocks); 1094 return 0; 1095 1096 err_free: 1097 drm_buddy_free_list_internal(mm, &allocated); 1098 return err; 1099 } 1100 EXPORT_SYMBOL(drm_buddy_alloc_blocks); 1101 1102 /** 1103 * drm_buddy_block_print - print block information 1104 * 1105 * @mm: DRM buddy manager 1106 * @block: DRM buddy block 1107 * @p: DRM printer to use 1108 */ 1109 void drm_buddy_block_print(struct drm_buddy *mm, 1110 struct drm_buddy_block *block, 1111 struct drm_printer *p) 1112 { 1113 u64 start = drm_buddy_block_offset(block); 1114 u64 size = drm_buddy_block_size(mm, block); 1115 1116 drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size); 1117 } 1118 EXPORT_SYMBOL(drm_buddy_block_print); 1119 1120 /** 1121 * drm_buddy_print - print allocator state 1122 * 1123 * @mm: DRM buddy manager 1124 * @p: DRM printer to use 1125 */ 1126 void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p) 1127 { 1128 int order; 1129 1130 drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB\n", 1131 mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20); 1132 1133 for (order = mm->max_order; order >= 0; order--) { 1134 struct drm_buddy_block *block; 1135 u64 count = 0, free; 1136 1137 list_for_each_entry(block, &mm->free_list[order], link) { 1138 BUG_ON(!drm_buddy_block_is_free(block)); 1139 count++; 1140 } 1141 1142 drm_printf(p, "order-%2d ", order); 1143 1144 free = count * (mm->chunk_size << order); 1145 if (free < SZ_1M) 1146 drm_printf(p, "free: %8llu KiB", free >> 10); 1147 else 1148 drm_printf(p, "free: %8llu MiB", free >> 20); 1149 1150 drm_printf(p, ", blocks: %llu\n", count); 1151 } 1152 } 1153 EXPORT_SYMBOL(drm_buddy_print); 1154 1155 static void drm_buddy_module_exit(void) 1156 { 1157 kmem_cache_destroy(slab_blocks); 1158 } 1159 1160 static int __init drm_buddy_module_init(void) 1161 { 1162 slab_blocks = KMEM_CACHE(drm_buddy_block, 0); 1163 if (!slab_blocks) 1164 return -ENOMEM; 1165 1166 return 0; 1167 } 1168 1169 module_init(drm_buddy_module_init); 1170 module_exit(drm_buddy_module_exit); 1171 1172 MODULE_DESCRIPTION("DRM Buddy Allocator"); 1173 MODULE_LICENSE("Dual MIT/GPL"); 1174