xref: /linux/drivers/gpu/drm/drm_mm.c (revision 50f0033d1a0f3a8e9eed09ab68067fbb57b0669d)
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28 
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43 
44 #include <drm/drmP.h>
45 #include <drm/drm_mm.h>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 #include <linux/export.h>
49 #include <linux/interval_tree_generic.h>
50 
51 /**
52  * DOC: Overview
53  *
54  * drm_mm provides a simple range allocator. The drivers are free to use the
55  * resource allocator from the linux core if it suits them, the upside of drm_mm
56  * is that it's in the DRM core. Which means that it's easier to extend for
57  * some of the crazier special purpose needs of gpus.
58  *
59  * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
60  * Drivers are free to embed either of them into their own suitable
61  * datastructures. drm_mm itself will not do any allocations of its own, so if
62  * drivers choose not to embed nodes they need to still allocate them
63  * themselves.
64  *
65  * The range allocator also supports reservation of preallocated blocks. This is
66  * useful for taking over initial mode setting configurations from the firmware,
67  * where an object needs to be created which exactly matches the firmware's
68  * scanout target. As long as the range is still free it can be inserted anytime
69  * after the allocator is initialized, which helps with avoiding looped
70  * depencies in the driver load sequence.
71  *
72  * drm_mm maintains a stack of most recently freed holes, which of all
73  * simplistic datastructures seems to be a fairly decent approach to clustering
74  * allocations and avoiding too much fragmentation. This means free space
75  * searches are O(num_holes). Given that all the fancy features drm_mm supports
76  * something better would be fairly complex and since gfx thrashing is a fairly
77  * steep cliff not a real concern. Removing a node again is O(1).
78  *
79  * drm_mm supports a few features: Alignment and range restrictions can be
80  * supplied. Further more every &drm_mm_node has a color value (which is just an
81  * opaqua unsigned long) which in conjunction with a driver callback can be used
82  * to implement sophisticated placement restrictions. The i915 DRM driver uses
83  * this to implement guard pages between incompatible caching domains in the
84  * graphics TT.
85  *
86  * Two behaviors are supported for searching and allocating: bottom-up and top-down.
87  * The default is bottom-up. Top-down allocation can be used if the memory area
88  * has different restrictions, or just to reduce fragmentation.
89  *
90  * Finally iteration helpers to walk all nodes and all holes are provided as are
91  * some basic allocator dumpers for debugging.
92  */
93 
94 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
95 						u64 size,
96 						unsigned alignment,
97 						unsigned long color,
98 						enum drm_mm_search_flags flags);
99 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
100 						u64 size,
101 						unsigned alignment,
102 						unsigned long color,
103 						u64 start,
104 						u64 end,
105 						enum drm_mm_search_flags flags);
106 
107 #ifdef CONFIG_DRM_DEBUG_MM
108 #include <linux/stackdepot.h>
109 
110 #define STACKDEPTH 32
111 #define BUFSZ 4096
112 
113 static noinline void save_stack(struct drm_mm_node *node)
114 {
115 	unsigned long entries[STACKDEPTH];
116 	struct stack_trace trace = {
117 		.entries = entries,
118 		.max_entries = STACKDEPTH,
119 		.skip = 1
120 	};
121 
122 	save_stack_trace(&trace);
123 	if (trace.nr_entries != 0 &&
124 	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
125 		trace.nr_entries--;
126 
127 	/* May be called under spinlock, so avoid sleeping */
128 	node->stack = depot_save_stack(&trace, GFP_NOWAIT);
129 }
130 
131 static void show_leaks(struct drm_mm *mm)
132 {
133 	struct drm_mm_node *node;
134 	unsigned long entries[STACKDEPTH];
135 	char *buf;
136 
137 	buf = kmalloc(BUFSZ, GFP_KERNEL);
138 	if (!buf)
139 		return;
140 
141 	list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
142 		struct stack_trace trace = {
143 			.entries = entries,
144 			.max_entries = STACKDEPTH
145 		};
146 
147 		if (!node->stack) {
148 			DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
149 				  node->start, node->size);
150 			continue;
151 		}
152 
153 		depot_fetch_stack(node->stack, &trace);
154 		snprint_stack_trace(buf, BUFSZ, &trace, 0);
155 		DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
156 			  node->start, node->size, buf);
157 	}
158 
159 	kfree(buf);
160 }
161 
162 #undef STACKDEPTH
163 #undef BUFSZ
164 #else
165 static void save_stack(struct drm_mm_node *node) { }
166 static void show_leaks(struct drm_mm *mm) { }
167 #endif
168 
169 #define START(node) ((node)->start)
170 #define LAST(node)  ((node)->start + (node)->size - 1)
171 
172 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
173 		     u64, __subtree_last,
174 		     START, LAST, static inline, drm_mm_interval_tree)
175 
176 struct drm_mm_node *
177 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
178 {
179 	return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree,
180 					       start, last);
181 }
182 EXPORT_SYMBOL(__drm_mm_interval_first);
183 
184 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
185 					  struct drm_mm_node *node)
186 {
187 	struct drm_mm *mm = hole_node->mm;
188 	struct rb_node **link, *rb;
189 	struct drm_mm_node *parent;
190 
191 	node->__subtree_last = LAST(node);
192 
193 	if (hole_node->allocated) {
194 		rb = &hole_node->rb;
195 		while (rb) {
196 			parent = rb_entry(rb, struct drm_mm_node, rb);
197 			if (parent->__subtree_last >= node->__subtree_last)
198 				break;
199 
200 			parent->__subtree_last = node->__subtree_last;
201 			rb = rb_parent(rb);
202 		}
203 
204 		rb = &hole_node->rb;
205 		link = &hole_node->rb.rb_right;
206 	} else {
207 		rb = NULL;
208 		link = &mm->interval_tree.rb_node;
209 	}
210 
211 	while (*link) {
212 		rb = *link;
213 		parent = rb_entry(rb, struct drm_mm_node, rb);
214 		if (parent->__subtree_last < node->__subtree_last)
215 			parent->__subtree_last = node->__subtree_last;
216 		if (node->start < parent->start)
217 			link = &parent->rb.rb_left;
218 		else
219 			link = &parent->rb.rb_right;
220 	}
221 
222 	rb_link_node(&node->rb, rb, link);
223 	rb_insert_augmented(&node->rb,
224 			    &mm->interval_tree,
225 			    &drm_mm_interval_tree_augment);
226 }
227 
228 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
229 				 struct drm_mm_node *node,
230 				 u64 size, unsigned alignment,
231 				 unsigned long color,
232 				 enum drm_mm_allocator_flags flags)
233 {
234 	struct drm_mm *mm = hole_node->mm;
235 	u64 hole_start = drm_mm_hole_node_start(hole_node);
236 	u64 hole_end = drm_mm_hole_node_end(hole_node);
237 	u64 adj_start = hole_start;
238 	u64 adj_end = hole_end;
239 
240 	DRM_MM_BUG_ON(node->allocated);
241 
242 	if (mm->color_adjust)
243 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
244 
245 	if (flags & DRM_MM_CREATE_TOP)
246 		adj_start = adj_end - size;
247 
248 	if (alignment) {
249 		u64 tmp = adj_start;
250 		unsigned rem;
251 
252 		rem = do_div(tmp, alignment);
253 		if (rem) {
254 			if (flags & DRM_MM_CREATE_TOP)
255 				adj_start -= rem;
256 			else
257 				adj_start += alignment - rem;
258 		}
259 	}
260 
261 	DRM_MM_BUG_ON(adj_start < hole_start);
262 	DRM_MM_BUG_ON(adj_end > hole_end);
263 
264 	if (adj_start == hole_start) {
265 		hole_node->hole_follows = 0;
266 		list_del(&hole_node->hole_stack);
267 	}
268 
269 	node->start = adj_start;
270 	node->size = size;
271 	node->mm = mm;
272 	node->color = color;
273 	node->allocated = 1;
274 
275 	list_add(&node->node_list, &hole_node->node_list);
276 
277 	drm_mm_interval_tree_add_node(hole_node, node);
278 
279 	DRM_MM_BUG_ON(node->start + node->size > adj_end);
280 
281 	node->hole_follows = 0;
282 	if (__drm_mm_hole_node_start(node) < hole_end) {
283 		list_add(&node->hole_stack, &mm->hole_stack);
284 		node->hole_follows = 1;
285 	}
286 
287 	save_stack(node);
288 }
289 
290 /**
291  * drm_mm_reserve_node - insert an pre-initialized node
292  * @mm: drm_mm allocator to insert @node into
293  * @node: drm_mm_node to insert
294  *
295  * This functions inserts an already set-up drm_mm_node into the allocator,
296  * meaning that start, size and color must be set by the caller. This is useful
297  * to initialize the allocator with preallocated objects which must be set-up
298  * before the range allocator can be set-up, e.g. when taking over a firmware
299  * framebuffer.
300  *
301  * Returns:
302  * 0 on success, -ENOSPC if there's no hole where @node is.
303  */
304 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
305 {
306 	u64 end = node->start + node->size;
307 	struct drm_mm_node *hole;
308 	u64 hole_start, hole_end;
309 	u64 adj_start, adj_end;
310 
311 	if (WARN_ON(node->size == 0))
312 		return -EINVAL;
313 
314 	end = node->start + node->size;
315 
316 	/* Find the relevant hole to add our node to */
317 	hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
318 					       node->start, ~(u64)0);
319 	if (hole) {
320 		if (hole->start < end)
321 			return -ENOSPC;
322 	} else {
323 		hole = list_entry(drm_mm_nodes(mm), typeof(*hole), node_list);
324 	}
325 
326 	hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
327 	if (!hole->hole_follows)
328 		return -ENOSPC;
329 
330 	adj_start = hole_start = __drm_mm_hole_node_start(hole);
331 	adj_end = hole_end = __drm_mm_hole_node_end(hole);
332 
333 	if (mm->color_adjust)
334 		mm->color_adjust(hole, node->color, &adj_start, &adj_end);
335 
336 	if (adj_start > node->start || adj_end < end)
337 		return -ENOSPC;
338 
339 	node->mm = mm;
340 	node->allocated = 1;
341 
342 	list_add(&node->node_list, &hole->node_list);
343 
344 	drm_mm_interval_tree_add_node(hole, node);
345 
346 	if (node->start == hole_start) {
347 		hole->hole_follows = 0;
348 		list_del(&hole->hole_stack);
349 	}
350 
351 	node->hole_follows = 0;
352 	if (end != hole_end) {
353 		list_add(&node->hole_stack, &mm->hole_stack);
354 		node->hole_follows = 1;
355 	}
356 
357 	save_stack(node);
358 
359 	return 0;
360 }
361 EXPORT_SYMBOL(drm_mm_reserve_node);
362 
363 /**
364  * drm_mm_insert_node_generic - search for space and insert @node
365  * @mm: drm_mm to allocate from
366  * @node: preallocate node to insert
367  * @size: size of the allocation
368  * @alignment: alignment of the allocation
369  * @color: opaque tag value to use for this node
370  * @sflags: flags to fine-tune the allocation search
371  * @aflags: flags to fine-tune the allocation behavior
372  *
373  * The preallocated node must be cleared to 0.
374  *
375  * Returns:
376  * 0 on success, -ENOSPC if there's no suitable hole.
377  */
378 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
379 			       u64 size, unsigned alignment,
380 			       unsigned long color,
381 			       enum drm_mm_search_flags sflags,
382 			       enum drm_mm_allocator_flags aflags)
383 {
384 	struct drm_mm_node *hole_node;
385 
386 	if (WARN_ON(size == 0))
387 		return -EINVAL;
388 
389 	hole_node = drm_mm_search_free_generic(mm, size, alignment,
390 					       color, sflags);
391 	if (!hole_node)
392 		return -ENOSPC;
393 
394 	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
395 	return 0;
396 }
397 EXPORT_SYMBOL(drm_mm_insert_node_generic);
398 
399 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
400 				       struct drm_mm_node *node,
401 				       u64 size, unsigned alignment,
402 				       unsigned long color,
403 				       u64 start, u64 end,
404 				       enum drm_mm_allocator_flags flags)
405 {
406 	struct drm_mm *mm = hole_node->mm;
407 	u64 hole_start = drm_mm_hole_node_start(hole_node);
408 	u64 hole_end = drm_mm_hole_node_end(hole_node);
409 	u64 adj_start = hole_start;
410 	u64 adj_end = hole_end;
411 
412 	DRM_MM_BUG_ON(!hole_node->hole_follows || node->allocated);
413 
414 	if (adj_start < start)
415 		adj_start = start;
416 	if (adj_end > end)
417 		adj_end = end;
418 
419 	if (mm->color_adjust)
420 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
421 
422 	if (flags & DRM_MM_CREATE_TOP)
423 		adj_start = adj_end - size;
424 
425 	if (alignment) {
426 		u64 tmp = adj_start;
427 		unsigned rem;
428 
429 		rem = do_div(tmp, alignment);
430 		if (rem) {
431 			if (flags & DRM_MM_CREATE_TOP)
432 				adj_start -= rem;
433 			else
434 				adj_start += alignment - rem;
435 		}
436 	}
437 
438 	if (adj_start == hole_start) {
439 		hole_node->hole_follows = 0;
440 		list_del(&hole_node->hole_stack);
441 	}
442 
443 	node->start = adj_start;
444 	node->size = size;
445 	node->mm = mm;
446 	node->color = color;
447 	node->allocated = 1;
448 
449 	list_add(&node->node_list, &hole_node->node_list);
450 
451 	drm_mm_interval_tree_add_node(hole_node, node);
452 
453 	DRM_MM_BUG_ON(node->start < start);
454 	DRM_MM_BUG_ON(node->start < adj_start);
455 	DRM_MM_BUG_ON(node->start + node->size > adj_end);
456 	DRM_MM_BUG_ON(node->start + node->size > end);
457 
458 	node->hole_follows = 0;
459 	if (__drm_mm_hole_node_start(node) < hole_end) {
460 		list_add(&node->hole_stack, &mm->hole_stack);
461 		node->hole_follows = 1;
462 	}
463 
464 	save_stack(node);
465 }
466 
467 /**
468  * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
469  * @mm: drm_mm to allocate from
470  * @node: preallocate node to insert
471  * @size: size of the allocation
472  * @alignment: alignment of the allocation
473  * @color: opaque tag value to use for this node
474  * @start: start of the allowed range for this node
475  * @end: end of the allowed range for this node
476  * @sflags: flags to fine-tune the allocation search
477  * @aflags: flags to fine-tune the allocation behavior
478  *
479  * The preallocated node must be cleared to 0.
480  *
481  * Returns:
482  * 0 on success, -ENOSPC if there's no suitable hole.
483  */
484 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
485 					u64 size, unsigned alignment,
486 					unsigned long color,
487 					u64 start, u64 end,
488 					enum drm_mm_search_flags sflags,
489 					enum drm_mm_allocator_flags aflags)
490 {
491 	struct drm_mm_node *hole_node;
492 
493 	if (WARN_ON(size == 0))
494 		return -EINVAL;
495 
496 	hole_node = drm_mm_search_free_in_range_generic(mm,
497 							size, alignment, color,
498 							start, end, sflags);
499 	if (!hole_node)
500 		return -ENOSPC;
501 
502 	drm_mm_insert_helper_range(hole_node, node,
503 				   size, alignment, color,
504 				   start, end, aflags);
505 	return 0;
506 }
507 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
508 
509 /**
510  * drm_mm_remove_node - Remove a memory node from the allocator.
511  * @node: drm_mm_node to remove
512  *
513  * This just removes a node from its drm_mm allocator. The node does not need to
514  * be cleared again before it can be re-inserted into this or any other drm_mm
515  * allocator. It is a bug to call this function on a un-allocated node.
516  */
517 void drm_mm_remove_node(struct drm_mm_node *node)
518 {
519 	struct drm_mm *mm = node->mm;
520 	struct drm_mm_node *prev_node;
521 
522 	DRM_MM_BUG_ON(!node->allocated);
523 	DRM_MM_BUG_ON(node->scanned_block ||
524 		      node->scanned_prev_free ||
525 		      node->scanned_next_free);
526 
527 	prev_node =
528 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
529 
530 	if (node->hole_follows) {
531 		DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) ==
532 			      __drm_mm_hole_node_end(node));
533 		list_del(&node->hole_stack);
534 	} else
535 		DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) !=
536 			      __drm_mm_hole_node_end(node));
537 
538 
539 	if (!prev_node->hole_follows) {
540 		prev_node->hole_follows = 1;
541 		list_add(&prev_node->hole_stack, &mm->hole_stack);
542 	} else
543 		list_move(&prev_node->hole_stack, &mm->hole_stack);
544 
545 	drm_mm_interval_tree_remove(node, &mm->interval_tree);
546 	list_del(&node->node_list);
547 	node->allocated = 0;
548 }
549 EXPORT_SYMBOL(drm_mm_remove_node);
550 
551 static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
552 {
553 	if (end - start < size)
554 		return 0;
555 
556 	if (alignment) {
557 		u64 tmp = start;
558 		unsigned rem;
559 
560 		rem = do_div(tmp, alignment);
561 		if (rem)
562 			start += alignment - rem;
563 	}
564 
565 	return end >= start + size;
566 }
567 
568 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
569 						      u64 size,
570 						      unsigned alignment,
571 						      unsigned long color,
572 						      enum drm_mm_search_flags flags)
573 {
574 	struct drm_mm_node *entry;
575 	struct drm_mm_node *best;
576 	u64 adj_start;
577 	u64 adj_end;
578 	u64 best_size;
579 
580 	DRM_MM_BUG_ON(mm->scanned_blocks);
581 
582 	best = NULL;
583 	best_size = ~0UL;
584 
585 	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
586 			       flags & DRM_MM_SEARCH_BELOW) {
587 		u64 hole_size = adj_end - adj_start;
588 
589 		if (mm->color_adjust) {
590 			mm->color_adjust(entry, color, &adj_start, &adj_end);
591 			if (adj_end <= adj_start)
592 				continue;
593 		}
594 
595 		if (!check_free_hole(adj_start, adj_end, size, alignment))
596 			continue;
597 
598 		if (!(flags & DRM_MM_SEARCH_BEST))
599 			return entry;
600 
601 		if (hole_size < best_size) {
602 			best = entry;
603 			best_size = hole_size;
604 		}
605 	}
606 
607 	return best;
608 }
609 
610 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
611 							u64 size,
612 							unsigned alignment,
613 							unsigned long color,
614 							u64 start,
615 							u64 end,
616 							enum drm_mm_search_flags flags)
617 {
618 	struct drm_mm_node *entry;
619 	struct drm_mm_node *best;
620 	u64 adj_start;
621 	u64 adj_end;
622 	u64 best_size;
623 
624 	DRM_MM_BUG_ON(mm->scanned_blocks);
625 
626 	best = NULL;
627 	best_size = ~0UL;
628 
629 	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
630 			       flags & DRM_MM_SEARCH_BELOW) {
631 		u64 hole_size = adj_end - adj_start;
632 
633 		if (adj_start < start)
634 			adj_start = start;
635 		if (adj_end > end)
636 			adj_end = end;
637 
638 		if (mm->color_adjust) {
639 			mm->color_adjust(entry, color, &adj_start, &adj_end);
640 			if (adj_end <= adj_start)
641 				continue;
642 		}
643 
644 		if (!check_free_hole(adj_start, adj_end, size, alignment))
645 			continue;
646 
647 		if (!(flags & DRM_MM_SEARCH_BEST))
648 			return entry;
649 
650 		if (hole_size < best_size) {
651 			best = entry;
652 			best_size = hole_size;
653 		}
654 	}
655 
656 	return best;
657 }
658 
659 /**
660  * drm_mm_replace_node - move an allocation from @old to @new
661  * @old: drm_mm_node to remove from the allocator
662  * @new: drm_mm_node which should inherit @old's allocation
663  *
664  * This is useful for when drivers embed the drm_mm_node structure and hence
665  * can't move allocations by reassigning pointers. It's a combination of remove
666  * and insert with the guarantee that the allocation start will match.
667  */
668 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
669 {
670 	DRM_MM_BUG_ON(!old->allocated);
671 
672 	list_replace(&old->node_list, &new->node_list);
673 	list_replace(&old->hole_stack, &new->hole_stack);
674 	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
675 	new->hole_follows = old->hole_follows;
676 	new->mm = old->mm;
677 	new->start = old->start;
678 	new->size = old->size;
679 	new->color = old->color;
680 	new->__subtree_last = old->__subtree_last;
681 
682 	old->allocated = 0;
683 	new->allocated = 1;
684 }
685 EXPORT_SYMBOL(drm_mm_replace_node);
686 
687 /**
688  * DOC: lru scan roaster
689  *
690  * Very often GPUs need to have continuous allocations for a given object. When
691  * evicting objects to make space for a new one it is therefore not most
692  * efficient when we simply start to select all objects from the tail of an LRU
693  * until there's a suitable hole: Especially for big objects or nodes that
694  * otherwise have special allocation constraints there's a good chance we evict
695  * lots of (smaller) objects unecessarily.
696  *
697  * The DRM range allocator supports this use-case through the scanning
698  * interfaces. First a scan operation needs to be initialized with
699  * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
700  * objects to the roaster (probably by walking an LRU list, but this can be
701  * freely implemented) until a suitable hole is found or there's no further
702  * evitable object.
703  *
704  * The the driver must walk through all objects again in exactly the reverse
705  * order to restore the allocator state. Note that while the allocator is used
706  * in the scan mode no other operation is allowed.
707  *
708  * Finally the driver evicts all objects selected in the scan. Adding and
709  * removing an object is O(1), and since freeing a node is also O(1) the overall
710  * complexity is O(scanned_objects). So like the free stack which needs to be
711  * walked before a scan operation even begins this is linear in the number of
712  * objects. It doesn't seem to hurt badly.
713  */
714 
715 /**
716  * drm_mm_init_scan - initialize lru scanning
717  * @mm: drm_mm to scan
718  * @size: size of the allocation
719  * @alignment: alignment of the allocation
720  * @color: opaque tag value to use for the allocation
721  *
722  * This simply sets up the scanning routines with the parameters for the desired
723  * hole. Note that there's no need to specify allocation flags, since they only
724  * change the place a node is allocated from within a suitable hole.
725  *
726  * Warning:
727  * As long as the scan list is non-empty, no other operations than
728  * adding/removing nodes to/from the scan list are allowed.
729  */
730 void drm_mm_init_scan(struct drm_mm *mm,
731 		      u64 size,
732 		      unsigned alignment,
733 		      unsigned long color)
734 {
735 	mm->scan_color = color;
736 	mm->scan_alignment = alignment;
737 	mm->scan_size = size;
738 	mm->scanned_blocks = 0;
739 	mm->scan_hit_start = 0;
740 	mm->scan_hit_end = 0;
741 	mm->scan_check_range = 0;
742 	mm->prev_scanned_node = NULL;
743 }
744 EXPORT_SYMBOL(drm_mm_init_scan);
745 
746 /**
747  * drm_mm_init_scan - initialize range-restricted lru scanning
748  * @mm: drm_mm to scan
749  * @size: size of the allocation
750  * @alignment: alignment of the allocation
751  * @color: opaque tag value to use for the allocation
752  * @start: start of the allowed range for the allocation
753  * @end: end of the allowed range for the allocation
754  *
755  * This simply sets up the scanning routines with the parameters for the desired
756  * hole. Note that there's no need to specify allocation flags, since they only
757  * change the place a node is allocated from within a suitable hole.
758  *
759  * Warning:
760  * As long as the scan list is non-empty, no other operations than
761  * adding/removing nodes to/from the scan list are allowed.
762  */
763 void drm_mm_init_scan_with_range(struct drm_mm *mm,
764 				 u64 size,
765 				 unsigned alignment,
766 				 unsigned long color,
767 				 u64 start,
768 				 u64 end)
769 {
770 	mm->scan_color = color;
771 	mm->scan_alignment = alignment;
772 	mm->scan_size = size;
773 	mm->scanned_blocks = 0;
774 	mm->scan_hit_start = 0;
775 	mm->scan_hit_end = 0;
776 	mm->scan_start = start;
777 	mm->scan_end = end;
778 	mm->scan_check_range = 1;
779 	mm->prev_scanned_node = NULL;
780 }
781 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
782 
783 /**
784  * drm_mm_scan_add_block - add a node to the scan list
785  * @node: drm_mm_node to add
786  *
787  * Add a node to the scan list that might be freed to make space for the desired
788  * hole.
789  *
790  * Returns:
791  * True if a hole has been found, false otherwise.
792  */
793 bool drm_mm_scan_add_block(struct drm_mm_node *node)
794 {
795 	struct drm_mm *mm = node->mm;
796 	struct drm_mm_node *prev_node;
797 	u64 hole_start, hole_end;
798 	u64 adj_start, adj_end;
799 
800 	mm->scanned_blocks++;
801 
802 	DRM_MM_BUG_ON(node->scanned_block);
803 	node->scanned_block = 1;
804 
805 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
806 			       node_list);
807 
808 	node->scanned_preceeds_hole = prev_node->hole_follows;
809 	prev_node->hole_follows = 1;
810 	list_del(&node->node_list);
811 	node->node_list.prev = &prev_node->node_list;
812 	node->node_list.next = &mm->prev_scanned_node->node_list;
813 	mm->prev_scanned_node = node;
814 
815 	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
816 	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
817 
818 	if (mm->scan_check_range) {
819 		if (adj_start < mm->scan_start)
820 			adj_start = mm->scan_start;
821 		if (adj_end > mm->scan_end)
822 			adj_end = mm->scan_end;
823 	}
824 
825 	if (mm->color_adjust)
826 		mm->color_adjust(prev_node, mm->scan_color,
827 				 &adj_start, &adj_end);
828 
829 	if (check_free_hole(adj_start, adj_end,
830 			    mm->scan_size, mm->scan_alignment)) {
831 		mm->scan_hit_start = hole_start;
832 		mm->scan_hit_end = hole_end;
833 		return true;
834 	}
835 
836 	return false;
837 }
838 EXPORT_SYMBOL(drm_mm_scan_add_block);
839 
840 /**
841  * drm_mm_scan_remove_block - remove a node from the scan list
842  * @node: drm_mm_node to remove
843  *
844  * Nodes _must_ be removed in the exact same order from the scan list as they
845  * have been added, otherwise the internal state of the memory manager will be
846  * corrupted.
847  *
848  * When the scan list is empty, the selected memory nodes can be freed. An
849  * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
850  * return the just freed block (because its at the top of the free_stack list).
851  *
852  * Returns:
853  * True if this block should be evicted, false otherwise. Will always
854  * return false when no hole has been found.
855  */
856 bool drm_mm_scan_remove_block(struct drm_mm_node *node)
857 {
858 	struct drm_mm *mm = node->mm;
859 	struct drm_mm_node *prev_node;
860 
861 	mm->scanned_blocks--;
862 
863 	DRM_MM_BUG_ON(!node->scanned_block);
864 	node->scanned_block = 0;
865 
866 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
867 			       node_list);
868 
869 	prev_node->hole_follows = node->scanned_preceeds_hole;
870 	list_add(&node->node_list, &prev_node->node_list);
871 
872 	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
873 		 node->start < mm->scan_hit_end);
874 }
875 EXPORT_SYMBOL(drm_mm_scan_remove_block);
876 
877 /**
878  * drm_mm_clean - checks whether an allocator is clean
879  * @mm: drm_mm allocator to check
880  *
881  * Returns:
882  * True if the allocator is completely free, false if there's still a node
883  * allocated in it.
884  */
885 bool drm_mm_clean(const struct drm_mm *mm)
886 {
887 	const struct list_head *head = drm_mm_nodes(mm);
888 
889 	return (head->next->next == head);
890 }
891 EXPORT_SYMBOL(drm_mm_clean);
892 
893 /**
894  * drm_mm_init - initialize a drm-mm allocator
895  * @mm: the drm_mm structure to initialize
896  * @start: start of the range managed by @mm
897  * @size: end of the range managed by @mm
898  *
899  * Note that @mm must be cleared to 0 before calling this function.
900  */
901 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
902 {
903 	INIT_LIST_HEAD(&mm->hole_stack);
904 	mm->scanned_blocks = 0;
905 
906 	/* Clever trick to avoid a special case in the free hole tracking. */
907 	INIT_LIST_HEAD(&mm->head_node.node_list);
908 	mm->head_node.allocated = 0;
909 	mm->head_node.hole_follows = 1;
910 	mm->head_node.scanned_block = 0;
911 	mm->head_node.scanned_prev_free = 0;
912 	mm->head_node.scanned_next_free = 0;
913 	mm->head_node.mm = mm;
914 	mm->head_node.start = start + size;
915 	mm->head_node.size = start - mm->head_node.start;
916 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
917 
918 	mm->interval_tree = RB_ROOT;
919 
920 	mm->color_adjust = NULL;
921 }
922 EXPORT_SYMBOL(drm_mm_init);
923 
924 /**
925  * drm_mm_takedown - clean up a drm_mm allocator
926  * @mm: drm_mm allocator to clean up
927  *
928  * Note that it is a bug to call this function on an allocator which is not
929  * clean.
930  */
931 void drm_mm_takedown(struct drm_mm *mm)
932 {
933 	if (WARN(!list_empty(drm_mm_nodes(mm)),
934 		 "Memory manager not clean during takedown.\n"))
935 		show_leaks(mm);
936 
937 }
938 EXPORT_SYMBOL(drm_mm_takedown);
939 
940 static u64 drm_mm_debug_hole(const struct drm_mm_node *entry,
941 			     const char *prefix)
942 {
943 	u64 hole_start, hole_end, hole_size;
944 
945 	if (entry->hole_follows) {
946 		hole_start = drm_mm_hole_node_start(entry);
947 		hole_end = drm_mm_hole_node_end(entry);
948 		hole_size = hole_end - hole_start;
949 		pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
950 			 hole_end, hole_size);
951 		return hole_size;
952 	}
953 
954 	return 0;
955 }
956 
957 /**
958  * drm_mm_debug_table - dump allocator state to dmesg
959  * @mm: drm_mm allocator to dump
960  * @prefix: prefix to use for dumping to dmesg
961  */
962 void drm_mm_debug_table(const struct drm_mm *mm, const char *prefix)
963 {
964 	const struct drm_mm_node *entry;
965 	u64 total_used = 0, total_free = 0, total = 0;
966 
967 	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
968 
969 	drm_mm_for_each_node(entry, mm) {
970 		pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
971 			 entry->start + entry->size, entry->size);
972 		total_used += entry->size;
973 		total_free += drm_mm_debug_hole(entry, prefix);
974 	}
975 	total = total_free + total_used;
976 
977 	pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
978 		 total_used, total_free);
979 }
980 EXPORT_SYMBOL(drm_mm_debug_table);
981 
982 #if defined(CONFIG_DEBUG_FS)
983 static u64 drm_mm_dump_hole(struct seq_file *m, const struct drm_mm_node *entry)
984 {
985 	u64 hole_start, hole_end, hole_size;
986 
987 	if (entry->hole_follows) {
988 		hole_start = drm_mm_hole_node_start(entry);
989 		hole_end = drm_mm_hole_node_end(entry);
990 		hole_size = hole_end - hole_start;
991 		seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
992 			   hole_end, hole_size);
993 		return hole_size;
994 	}
995 
996 	return 0;
997 }
998 
999 /**
1000  * drm_mm_dump_table - dump allocator state to a seq_file
1001  * @m: seq_file to dump to
1002  * @mm: drm_mm allocator to dump
1003  */
1004 int drm_mm_dump_table(struct seq_file *m, const struct drm_mm *mm)
1005 {
1006 	const struct drm_mm_node *entry;
1007 	u64 total_used = 0, total_free = 0, total = 0;
1008 
1009 	total_free += drm_mm_dump_hole(m, &mm->head_node);
1010 
1011 	drm_mm_for_each_node(entry, mm) {
1012 		seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
1013 			   entry->start + entry->size, entry->size);
1014 		total_used += entry->size;
1015 		total_free += drm_mm_dump_hole(m, entry);
1016 	}
1017 	total = total_free + total_used;
1018 
1019 	seq_printf(m, "total: %llu, used %llu free %llu\n", total,
1020 		   total_used, total_free);
1021 	return 0;
1022 }
1023 EXPORT_SYMBOL(drm_mm_dump_table);
1024 #endif
1025