1 /************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. 4 * Copyright 2016 Intel Corporation 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 * 28 **************************************************************************/ 29 30 /* 31 * Generic simple memory manager implementation. Intended to be used as a base 32 * class implementation for more advanced memory managers. 33 * 34 * Note that the algorithm used is quite simple and there might be substantial 35 * performance gains if a smarter free list is implemented. Currently it is 36 * just an unordered stack of free regions. This could easily be improved if 37 * an RB-tree is used instead. At least if we expect heavy fragmentation. 38 * 39 * Aligned allocations can also see improvement. 40 * 41 * Authors: 42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 43 */ 44 45 #include <drm/drmP.h> 46 #include <drm/drm_mm.h> 47 #include <linux/slab.h> 48 #include <linux/seq_file.h> 49 #include <linux/export.h> 50 #include <linux/interval_tree_generic.h> 51 52 /** 53 * DOC: Overview 54 * 55 * drm_mm provides a simple range allocator. The drivers are free to use the 56 * resource allocator from the linux core if it suits them, the upside of drm_mm 57 * is that it's in the DRM core. Which means that it's easier to extend for 58 * some of the crazier special purpose needs of gpus. 59 * 60 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. 61 * Drivers are free to embed either of them into their own suitable 62 * datastructures. drm_mm itself will not do any memory allocations of its own, 63 * so if drivers choose not to embed nodes they need to still allocate them 64 * themselves. 65 * 66 * The range allocator also supports reservation of preallocated blocks. This is 67 * useful for taking over initial mode setting configurations from the firmware, 68 * where an object needs to be created which exactly matches the firmware's 69 * scanout target. As long as the range is still free it can be inserted anytime 70 * after the allocator is initialized, which helps with avoiding looped 71 * dependencies in the driver load sequence. 72 * 73 * drm_mm maintains a stack of most recently freed holes, which of all 74 * simplistic datastructures seems to be a fairly decent approach to clustering 75 * allocations and avoiding too much fragmentation. This means free space 76 * searches are O(num_holes). Given that all the fancy features drm_mm supports 77 * something better would be fairly complex and since gfx thrashing is a fairly 78 * steep cliff not a real concern. Removing a node again is O(1). 79 * 80 * drm_mm supports a few features: Alignment and range restrictions can be 81 * supplied. Furthermore every &drm_mm_node has a color value (which is just an 82 * opaque unsigned long) which in conjunction with a driver callback can be used 83 * to implement sophisticated placement restrictions. The i915 DRM driver uses 84 * this to implement guard pages between incompatible caching domains in the 85 * graphics TT. 86 * 87 * Two behaviors are supported for searching and allocating: bottom-up and 88 * top-down. The default is bottom-up. Top-down allocation can be used if the 89 * memory area has different restrictions, or just to reduce fragmentation. 90 * 91 * Finally iteration helpers to walk all nodes and all holes are provided as are 92 * some basic allocator dumpers for debugging. 93 * 94 * Note that this range allocator is not thread-safe, drivers need to protect 95 * modifications with their on locking. The idea behind this is that for a full 96 * memory manager additional data needs to be protected anyway, hence internal 97 * locking would be fully redundant. 98 */ 99 100 #ifdef CONFIG_DRM_DEBUG_MM 101 #include <linux/stackdepot.h> 102 103 #define STACKDEPTH 32 104 #define BUFSZ 4096 105 106 static noinline void save_stack(struct drm_mm_node *node) 107 { 108 unsigned long entries[STACKDEPTH]; 109 struct stack_trace trace = { 110 .entries = entries, 111 .max_entries = STACKDEPTH, 112 .skip = 1 113 }; 114 115 save_stack_trace(&trace); 116 if (trace.nr_entries != 0 && 117 trace.entries[trace.nr_entries-1] == ULONG_MAX) 118 trace.nr_entries--; 119 120 /* May be called under spinlock, so avoid sleeping */ 121 node->stack = depot_save_stack(&trace, GFP_NOWAIT); 122 } 123 124 static void show_leaks(struct drm_mm *mm) 125 { 126 struct drm_mm_node *node; 127 unsigned long entries[STACKDEPTH]; 128 char *buf; 129 130 buf = kmalloc(BUFSZ, GFP_KERNEL); 131 if (!buf) 132 return; 133 134 list_for_each_entry(node, drm_mm_nodes(mm), node_list) { 135 struct stack_trace trace = { 136 .entries = entries, 137 .max_entries = STACKDEPTH 138 }; 139 140 if (!node->stack) { 141 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n", 142 node->start, node->size); 143 continue; 144 } 145 146 depot_fetch_stack(node->stack, &trace); 147 snprint_stack_trace(buf, BUFSZ, &trace, 0); 148 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s", 149 node->start, node->size, buf); 150 } 151 152 kfree(buf); 153 } 154 155 #undef STACKDEPTH 156 #undef BUFSZ 157 #else 158 static void save_stack(struct drm_mm_node *node) { } 159 static void show_leaks(struct drm_mm *mm) { } 160 #endif 161 162 #define START(node) ((node)->start) 163 #define LAST(node) ((node)->start + (node)->size - 1) 164 165 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, 166 u64, __subtree_last, 167 START, LAST, static inline, drm_mm_interval_tree) 168 169 struct drm_mm_node * 170 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) 171 { 172 return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree, 173 start, last) ?: (struct drm_mm_node *)&mm->head_node; 174 } 175 EXPORT_SYMBOL(__drm_mm_interval_first); 176 177 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, 178 struct drm_mm_node *node) 179 { 180 struct drm_mm *mm = hole_node->mm; 181 struct rb_node **link, *rb; 182 struct drm_mm_node *parent; 183 184 node->__subtree_last = LAST(node); 185 186 if (hole_node->allocated) { 187 rb = &hole_node->rb; 188 while (rb) { 189 parent = rb_entry(rb, struct drm_mm_node, rb); 190 if (parent->__subtree_last >= node->__subtree_last) 191 break; 192 193 parent->__subtree_last = node->__subtree_last; 194 rb = rb_parent(rb); 195 } 196 197 rb = &hole_node->rb; 198 link = &hole_node->rb.rb_right; 199 } else { 200 rb = NULL; 201 link = &mm->interval_tree.rb_node; 202 } 203 204 while (*link) { 205 rb = *link; 206 parent = rb_entry(rb, struct drm_mm_node, rb); 207 if (parent->__subtree_last < node->__subtree_last) 208 parent->__subtree_last = node->__subtree_last; 209 if (node->start < parent->start) 210 link = &parent->rb.rb_left; 211 else 212 link = &parent->rb.rb_right; 213 } 214 215 rb_link_node(&node->rb, rb, link); 216 rb_insert_augmented(&node->rb, 217 &mm->interval_tree, 218 &drm_mm_interval_tree_augment); 219 } 220 221 #define RB_INSERT(root, member, expr) do { \ 222 struct rb_node **link = &root.rb_node, *rb = NULL; \ 223 u64 x = expr(node); \ 224 while (*link) { \ 225 rb = *link; \ 226 if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \ 227 link = &rb->rb_left; \ 228 else \ 229 link = &rb->rb_right; \ 230 } \ 231 rb_link_node(&node->member, rb, link); \ 232 rb_insert_color(&node->member, &root); \ 233 } while (0) 234 235 #define HOLE_SIZE(NODE) ((NODE)->hole_size) 236 #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE)) 237 238 static void add_hole(struct drm_mm_node *node) 239 { 240 struct drm_mm *mm = node->mm; 241 242 node->hole_size = 243 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node); 244 DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); 245 246 RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE); 247 RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR); 248 249 list_add(&node->hole_stack, &mm->hole_stack); 250 } 251 252 static void rm_hole(struct drm_mm_node *node) 253 { 254 DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); 255 256 list_del(&node->hole_stack); 257 rb_erase(&node->rb_hole_size, &node->mm->holes_size); 258 rb_erase(&node->rb_hole_addr, &node->mm->holes_addr); 259 node->hole_size = 0; 260 261 DRM_MM_BUG_ON(drm_mm_hole_follows(node)); 262 } 263 264 static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb) 265 { 266 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size); 267 } 268 269 static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb) 270 { 271 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr); 272 } 273 274 static inline u64 rb_hole_size(struct rb_node *rb) 275 { 276 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size; 277 } 278 279 static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size) 280 { 281 struct rb_node *best = NULL; 282 struct rb_node **link = &mm->holes_size.rb_node; 283 284 while (*link) { 285 struct rb_node *rb = *link; 286 287 if (size <= rb_hole_size(rb)) { 288 link = &rb->rb_left; 289 best = rb; 290 } else { 291 link = &rb->rb_right; 292 } 293 } 294 295 return rb_hole_size_to_node(best); 296 } 297 298 static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr) 299 { 300 struct drm_mm_node *node = NULL; 301 struct rb_node **link = &mm->holes_addr.rb_node; 302 303 while (*link) { 304 u64 hole_start; 305 306 node = rb_hole_addr_to_node(*link); 307 hole_start = __drm_mm_hole_node_start(node); 308 309 if (addr < hole_start) 310 link = &node->rb_hole_addr.rb_left; 311 else if (addr > hole_start + node->hole_size) 312 link = &node->rb_hole_addr.rb_right; 313 else 314 break; 315 } 316 317 return node; 318 } 319 320 static struct drm_mm_node * 321 first_hole(struct drm_mm *mm, 322 u64 start, u64 end, u64 size, 323 enum drm_mm_insert_mode mode) 324 { 325 if (RB_EMPTY_ROOT(&mm->holes_size)) 326 return NULL; 327 328 switch (mode) { 329 default: 330 case DRM_MM_INSERT_BEST: 331 return best_hole(mm, size); 332 333 case DRM_MM_INSERT_LOW: 334 return find_hole(mm, start); 335 336 case DRM_MM_INSERT_HIGH: 337 return find_hole(mm, end); 338 339 case DRM_MM_INSERT_EVICT: 340 return list_first_entry_or_null(&mm->hole_stack, 341 struct drm_mm_node, 342 hole_stack); 343 } 344 } 345 346 static struct drm_mm_node * 347 next_hole(struct drm_mm *mm, 348 struct drm_mm_node *node, 349 enum drm_mm_insert_mode mode) 350 { 351 switch (mode) { 352 default: 353 case DRM_MM_INSERT_BEST: 354 return rb_hole_size_to_node(rb_next(&node->rb_hole_size)); 355 356 case DRM_MM_INSERT_LOW: 357 return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr)); 358 359 case DRM_MM_INSERT_HIGH: 360 return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr)); 361 362 case DRM_MM_INSERT_EVICT: 363 node = list_next_entry(node, hole_stack); 364 return &node->hole_stack == &mm->hole_stack ? NULL : node; 365 } 366 } 367 368 /** 369 * drm_mm_reserve_node - insert an pre-initialized node 370 * @mm: drm_mm allocator to insert @node into 371 * @node: drm_mm_node to insert 372 * 373 * This functions inserts an already set-up &drm_mm_node into the allocator, 374 * meaning that start, size and color must be set by the caller. All other 375 * fields must be cleared to 0. This is useful to initialize the allocator with 376 * preallocated objects which must be set-up before the range allocator can be 377 * set-up, e.g. when taking over a firmware framebuffer. 378 * 379 * Returns: 380 * 0 on success, -ENOSPC if there's no hole where @node is. 381 */ 382 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 383 { 384 u64 end = node->start + node->size; 385 struct drm_mm_node *hole; 386 u64 hole_start, hole_end; 387 u64 adj_start, adj_end; 388 389 end = node->start + node->size; 390 if (unlikely(end <= node->start)) 391 return -ENOSPC; 392 393 /* Find the relevant hole to add our node to */ 394 hole = find_hole(mm, node->start); 395 if (!hole) 396 return -ENOSPC; 397 398 adj_start = hole_start = __drm_mm_hole_node_start(hole); 399 adj_end = hole_end = hole_start + hole->hole_size; 400 401 if (mm->color_adjust) 402 mm->color_adjust(hole, node->color, &adj_start, &adj_end); 403 404 if (adj_start > node->start || adj_end < end) 405 return -ENOSPC; 406 407 node->mm = mm; 408 409 list_add(&node->node_list, &hole->node_list); 410 drm_mm_interval_tree_add_node(hole, node); 411 node->allocated = true; 412 node->hole_size = 0; 413 414 rm_hole(hole); 415 if (node->start > hole_start) 416 add_hole(hole); 417 if (end < hole_end) 418 add_hole(node); 419 420 save_stack(node); 421 return 0; 422 } 423 EXPORT_SYMBOL(drm_mm_reserve_node); 424 425 /** 426 * drm_mm_insert_node_in_range - ranged search for space and insert @node 427 * @mm: drm_mm to allocate from 428 * @node: preallocate node to insert 429 * @size: size of the allocation 430 * @alignment: alignment of the allocation 431 * @color: opaque tag value to use for this node 432 * @range_start: start of the allowed range for this node 433 * @range_end: end of the allowed range for this node 434 * @mode: fine-tune the allocation search and placement 435 * 436 * The preallocated @node must be cleared to 0. 437 * 438 * Returns: 439 * 0 on success, -ENOSPC if there's no suitable hole. 440 */ 441 int drm_mm_insert_node_in_range(struct drm_mm * const mm, 442 struct drm_mm_node * const node, 443 u64 size, u64 alignment, 444 unsigned long color, 445 u64 range_start, u64 range_end, 446 enum drm_mm_insert_mode mode) 447 { 448 struct drm_mm_node *hole; 449 u64 remainder_mask; 450 451 DRM_MM_BUG_ON(range_start >= range_end); 452 453 if (unlikely(size == 0 || range_end - range_start < size)) 454 return -ENOSPC; 455 456 if (alignment <= 1) 457 alignment = 0; 458 459 remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; 460 for (hole = first_hole(mm, range_start, range_end, size, mode); hole; 461 hole = next_hole(mm, hole, mode)) { 462 u64 hole_start = __drm_mm_hole_node_start(hole); 463 u64 hole_end = hole_start + hole->hole_size; 464 u64 adj_start, adj_end; 465 u64 col_start, col_end; 466 467 if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end) 468 break; 469 470 if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start) 471 break; 472 473 col_start = hole_start; 474 col_end = hole_end; 475 if (mm->color_adjust) 476 mm->color_adjust(hole, color, &col_start, &col_end); 477 478 adj_start = max(col_start, range_start); 479 adj_end = min(col_end, range_end); 480 481 if (adj_end <= adj_start || adj_end - adj_start < size) 482 continue; 483 484 if (mode == DRM_MM_INSERT_HIGH) 485 adj_start = adj_end - size; 486 487 if (alignment) { 488 u64 rem; 489 490 if (likely(remainder_mask)) 491 rem = adj_start & remainder_mask; 492 else 493 div64_u64_rem(adj_start, alignment, &rem); 494 if (rem) { 495 adj_start -= rem; 496 if (mode != DRM_MM_INSERT_HIGH) 497 adj_start += alignment; 498 499 if (adj_start < max(col_start, range_start) || 500 min(col_end, range_end) - adj_start < size) 501 continue; 502 503 if (adj_end <= adj_start || 504 adj_end - adj_start < size) 505 continue; 506 } 507 } 508 509 node->mm = mm; 510 node->size = size; 511 node->start = adj_start; 512 node->color = color; 513 node->hole_size = 0; 514 515 list_add(&node->node_list, &hole->node_list); 516 drm_mm_interval_tree_add_node(hole, node); 517 node->allocated = true; 518 519 rm_hole(hole); 520 if (adj_start > hole_start) 521 add_hole(hole); 522 if (adj_start + size < hole_end) 523 add_hole(node); 524 525 save_stack(node); 526 return 0; 527 } 528 529 return -ENOSPC; 530 } 531 EXPORT_SYMBOL(drm_mm_insert_node_in_range); 532 533 /** 534 * drm_mm_remove_node - Remove a memory node from the allocator. 535 * @node: drm_mm_node to remove 536 * 537 * This just removes a node from its drm_mm allocator. The node does not need to 538 * be cleared again before it can be re-inserted into this or any other drm_mm 539 * allocator. It is a bug to call this function on a unallocated node. 540 */ 541 void drm_mm_remove_node(struct drm_mm_node *node) 542 { 543 struct drm_mm *mm = node->mm; 544 struct drm_mm_node *prev_node; 545 546 DRM_MM_BUG_ON(!node->allocated); 547 DRM_MM_BUG_ON(node->scanned_block); 548 549 prev_node = list_prev_entry(node, node_list); 550 551 if (drm_mm_hole_follows(node)) 552 rm_hole(node); 553 554 drm_mm_interval_tree_remove(node, &mm->interval_tree); 555 list_del(&node->node_list); 556 node->allocated = false; 557 558 if (drm_mm_hole_follows(prev_node)) 559 rm_hole(prev_node); 560 add_hole(prev_node); 561 } 562 EXPORT_SYMBOL(drm_mm_remove_node); 563 564 /** 565 * drm_mm_replace_node - move an allocation from @old to @new 566 * @old: drm_mm_node to remove from the allocator 567 * @new: drm_mm_node which should inherit @old's allocation 568 * 569 * This is useful for when drivers embed the drm_mm_node structure and hence 570 * can't move allocations by reassigning pointers. It's a combination of remove 571 * and insert with the guarantee that the allocation start will match. 572 */ 573 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 574 { 575 DRM_MM_BUG_ON(!old->allocated); 576 577 *new = *old; 578 579 list_replace(&old->node_list, &new->node_list); 580 rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree); 581 582 if (drm_mm_hole_follows(old)) { 583 list_replace(&old->hole_stack, &new->hole_stack); 584 rb_replace_node(&old->rb_hole_size, 585 &new->rb_hole_size, 586 &old->mm->holes_size); 587 rb_replace_node(&old->rb_hole_addr, 588 &new->rb_hole_addr, 589 &old->mm->holes_addr); 590 } 591 592 old->allocated = false; 593 new->allocated = true; 594 } 595 EXPORT_SYMBOL(drm_mm_replace_node); 596 597 /** 598 * DOC: lru scan roster 599 * 600 * Very often GPUs need to have continuous allocations for a given object. When 601 * evicting objects to make space for a new one it is therefore not most 602 * efficient when we simply start to select all objects from the tail of an LRU 603 * until there's a suitable hole: Especially for big objects or nodes that 604 * otherwise have special allocation constraints there's a good chance we evict 605 * lots of (smaller) objects unnecessarily. 606 * 607 * The DRM range allocator supports this use-case through the scanning 608 * interfaces. First a scan operation needs to be initialized with 609 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds 610 * objects to the roster, probably by walking an LRU list, but this can be 611 * freely implemented. Eviction candiates are added using 612 * drm_mm_scan_add_block() until a suitable hole is found or there are no 613 * further evictable objects. Eviction roster metadata is tracked in &struct 614 * drm_mm_scan. 615 * 616 * The driver must walk through all objects again in exactly the reverse 617 * order to restore the allocator state. Note that while the allocator is used 618 * in the scan mode no other operation is allowed. 619 * 620 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block() 621 * reported true) in the scan, and any overlapping nodes after color adjustment 622 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and 623 * since freeing a node is also O(1) the overall complexity is 624 * O(scanned_objects). So like the free stack which needs to be walked before a 625 * scan operation even begins this is linear in the number of objects. It 626 * doesn't seem to hurt too badly. 627 */ 628 629 /** 630 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning 631 * @scan: scan state 632 * @mm: drm_mm to scan 633 * @size: size of the allocation 634 * @alignment: alignment of the allocation 635 * @color: opaque tag value to use for the allocation 636 * @start: start of the allowed range for the allocation 637 * @end: end of the allowed range for the allocation 638 * @mode: fine-tune the allocation search and placement 639 * 640 * This simply sets up the scanning routines with the parameters for the desired 641 * hole. 642 * 643 * Warning: 644 * As long as the scan list is non-empty, no other operations than 645 * adding/removing nodes to/from the scan list are allowed. 646 */ 647 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, 648 struct drm_mm *mm, 649 u64 size, 650 u64 alignment, 651 unsigned long color, 652 u64 start, 653 u64 end, 654 enum drm_mm_insert_mode mode) 655 { 656 DRM_MM_BUG_ON(start >= end); 657 DRM_MM_BUG_ON(!size || size > end - start); 658 DRM_MM_BUG_ON(mm->scan_active); 659 660 scan->mm = mm; 661 662 if (alignment <= 1) 663 alignment = 0; 664 665 scan->color = color; 666 scan->alignment = alignment; 667 scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; 668 scan->size = size; 669 scan->mode = mode; 670 671 DRM_MM_BUG_ON(end <= start); 672 scan->range_start = start; 673 scan->range_end = end; 674 675 scan->hit_start = U64_MAX; 676 scan->hit_end = 0; 677 } 678 EXPORT_SYMBOL(drm_mm_scan_init_with_range); 679 680 /** 681 * drm_mm_scan_add_block - add a node to the scan list 682 * @scan: the active drm_mm scanner 683 * @node: drm_mm_node to add 684 * 685 * Add a node to the scan list that might be freed to make space for the desired 686 * hole. 687 * 688 * Returns: 689 * True if a hole has been found, false otherwise. 690 */ 691 bool drm_mm_scan_add_block(struct drm_mm_scan *scan, 692 struct drm_mm_node *node) 693 { 694 struct drm_mm *mm = scan->mm; 695 struct drm_mm_node *hole; 696 u64 hole_start, hole_end; 697 u64 col_start, col_end; 698 u64 adj_start, adj_end; 699 700 DRM_MM_BUG_ON(node->mm != mm); 701 DRM_MM_BUG_ON(!node->allocated); 702 DRM_MM_BUG_ON(node->scanned_block); 703 node->scanned_block = true; 704 mm->scan_active++; 705 706 /* Remove this block from the node_list so that we enlarge the hole 707 * (distance between the end of our previous node and the start of 708 * or next), without poisoning the link so that we can restore it 709 * later in drm_mm_scan_remove_block(). 710 */ 711 hole = list_prev_entry(node, node_list); 712 DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node); 713 __list_del_entry(&node->node_list); 714 715 hole_start = __drm_mm_hole_node_start(hole); 716 hole_end = __drm_mm_hole_node_end(hole); 717 718 col_start = hole_start; 719 col_end = hole_end; 720 if (mm->color_adjust) 721 mm->color_adjust(hole, scan->color, &col_start, &col_end); 722 723 adj_start = max(col_start, scan->range_start); 724 adj_end = min(col_end, scan->range_end); 725 if (adj_end <= adj_start || adj_end - adj_start < scan->size) 726 return false; 727 728 if (scan->mode == DRM_MM_INSERT_HIGH) 729 adj_start = adj_end - scan->size; 730 731 if (scan->alignment) { 732 u64 rem; 733 734 if (likely(scan->remainder_mask)) 735 rem = adj_start & scan->remainder_mask; 736 else 737 div64_u64_rem(adj_start, scan->alignment, &rem); 738 if (rem) { 739 adj_start -= rem; 740 if (scan->mode != DRM_MM_INSERT_HIGH) 741 adj_start += scan->alignment; 742 if (adj_start < max(col_start, scan->range_start) || 743 min(col_end, scan->range_end) - adj_start < scan->size) 744 return false; 745 746 if (adj_end <= adj_start || 747 adj_end - adj_start < scan->size) 748 return false; 749 } 750 } 751 752 scan->hit_start = adj_start; 753 scan->hit_end = adj_start + scan->size; 754 755 DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end); 756 DRM_MM_BUG_ON(scan->hit_start < hole_start); 757 DRM_MM_BUG_ON(scan->hit_end > hole_end); 758 759 return true; 760 } 761 EXPORT_SYMBOL(drm_mm_scan_add_block); 762 763 /** 764 * drm_mm_scan_remove_block - remove a node from the scan list 765 * @scan: the active drm_mm scanner 766 * @node: drm_mm_node to remove 767 * 768 * Nodes **must** be removed in exactly the reverse order from the scan list as 769 * they have been added (e.g. using list_add() as they are added and then 770 * list_for_each() over that eviction list to remove), otherwise the internal 771 * state of the memory manager will be corrupted. 772 * 773 * When the scan list is empty, the selected memory nodes can be freed. An 774 * immediately following drm_mm_insert_node_in_range_generic() or one of the 775 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return 776 * the just freed block (because its at the top of the free_stack list). 777 * 778 * Returns: 779 * True if this block should be evicted, false otherwise. Will always 780 * return false when no hole has been found. 781 */ 782 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, 783 struct drm_mm_node *node) 784 { 785 struct drm_mm_node *prev_node; 786 787 DRM_MM_BUG_ON(node->mm != scan->mm); 788 DRM_MM_BUG_ON(!node->scanned_block); 789 node->scanned_block = false; 790 791 DRM_MM_BUG_ON(!node->mm->scan_active); 792 node->mm->scan_active--; 793 794 /* During drm_mm_scan_add_block() we decoupled this node leaving 795 * its pointers intact. Now that the caller is walking back along 796 * the eviction list we can restore this block into its rightful 797 * place on the full node_list. To confirm that the caller is walking 798 * backwards correctly we check that prev_node->next == node->next, 799 * i.e. both believe the same node should be on the other side of the 800 * hole. 801 */ 802 prev_node = list_prev_entry(node, node_list); 803 DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) != 804 list_next_entry(node, node_list)); 805 list_add(&node->node_list, &prev_node->node_list); 806 807 return (node->start + node->size > scan->hit_start && 808 node->start < scan->hit_end); 809 } 810 EXPORT_SYMBOL(drm_mm_scan_remove_block); 811 812 /** 813 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole 814 * @scan: drm_mm scan with target hole 815 * 816 * After completing an eviction scan and removing the selected nodes, we may 817 * need to remove a few more nodes from either side of the target hole if 818 * mm.color_adjust is being used. 819 * 820 * Returns: 821 * A node to evict, or NULL if there are no overlapping nodes. 822 */ 823 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan) 824 { 825 struct drm_mm *mm = scan->mm; 826 struct drm_mm_node *hole; 827 u64 hole_start, hole_end; 828 829 DRM_MM_BUG_ON(list_empty(&mm->hole_stack)); 830 831 if (!mm->color_adjust) 832 return NULL; 833 834 hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack); 835 hole_start = __drm_mm_hole_node_start(hole); 836 hole_end = hole_start + hole->hole_size; 837 838 DRM_MM_BUG_ON(hole_start > scan->hit_start); 839 DRM_MM_BUG_ON(hole_end < scan->hit_end); 840 841 mm->color_adjust(hole, scan->color, &hole_start, &hole_end); 842 if (hole_start > scan->hit_start) 843 return hole; 844 if (hole_end < scan->hit_end) 845 return list_next_entry(hole, node_list); 846 847 return NULL; 848 } 849 EXPORT_SYMBOL(drm_mm_scan_color_evict); 850 851 /** 852 * drm_mm_init - initialize a drm-mm allocator 853 * @mm: the drm_mm structure to initialize 854 * @start: start of the range managed by @mm 855 * @size: end of the range managed by @mm 856 * 857 * Note that @mm must be cleared to 0 before calling this function. 858 */ 859 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size) 860 { 861 DRM_MM_BUG_ON(start + size <= start); 862 863 mm->color_adjust = NULL; 864 865 INIT_LIST_HEAD(&mm->hole_stack); 866 mm->interval_tree = RB_ROOT; 867 mm->holes_size = RB_ROOT; 868 mm->holes_addr = RB_ROOT; 869 870 /* Clever trick to avoid a special case in the free hole tracking. */ 871 INIT_LIST_HEAD(&mm->head_node.node_list); 872 mm->head_node.allocated = false; 873 mm->head_node.mm = mm; 874 mm->head_node.start = start + size; 875 mm->head_node.size = -size; 876 add_hole(&mm->head_node); 877 878 mm->scan_active = 0; 879 } 880 EXPORT_SYMBOL(drm_mm_init); 881 882 /** 883 * drm_mm_takedown - clean up a drm_mm allocator 884 * @mm: drm_mm allocator to clean up 885 * 886 * Note that it is a bug to call this function on an allocator which is not 887 * clean. 888 */ 889 void drm_mm_takedown(struct drm_mm *mm) 890 { 891 if (WARN(!drm_mm_clean(mm), 892 "Memory manager not clean during takedown.\n")) 893 show_leaks(mm); 894 } 895 EXPORT_SYMBOL(drm_mm_takedown); 896 897 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry) 898 { 899 u64 start, size; 900 901 size = entry->hole_size; 902 if (size) { 903 start = drm_mm_hole_node_start(entry); 904 drm_printf(p, "%#018llx-%#018llx: %llu: free\n", 905 start, start + size, size); 906 } 907 908 return size; 909 } 910 /** 911 * drm_mm_print - print allocator state 912 * @mm: drm_mm allocator to print 913 * @p: DRM printer to use 914 */ 915 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p) 916 { 917 const struct drm_mm_node *entry; 918 u64 total_used = 0, total_free = 0, total = 0; 919 920 total_free += drm_mm_dump_hole(p, &mm->head_node); 921 922 drm_mm_for_each_node(entry, mm) { 923 drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start, 924 entry->start + entry->size, entry->size); 925 total_used += entry->size; 926 total_free += drm_mm_dump_hole(p, entry); 927 } 928 total = total_free + total_used; 929 930 drm_printf(p, "total: %llu, used %llu free %llu\n", total, 931 total_used, total_free); 932 } 933 EXPORT_SYMBOL(drm_mm_print); 934