xref: /freebsd/sys/dev/drm2/drm_mm.c (revision 2a664c03e55254b0f3b32dcdfc78179c0a57a8d2)
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 /*
33  * Generic simple memory manager implementation. Intended to be used as a base
34  * class implementation for more advanced memory managers.
35  *
36  * Note that the algorithm used is quite simple and there might be substantial
37  * performance gains if a smarter free list is implemented. Currently it is just an
38  * unordered stack of free regions. This could easily be improved if an RB-tree
39  * is used instead. At least if we expect heavy fragmentation.
40  *
41  * Aligned allocations can also see improvement.
42  *
43  * Authors:
44  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
45  */
46 
47 #include <dev/drm2/drmP.h>
48 #include <dev/drm2/drm_mm.h>
49 
50 #define MM_UNUSED_TARGET 4
51 
52 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
53 {
54 	struct drm_mm_node *child;
55 
56 	child = malloc(sizeof(*child), DRM_MEM_MM, M_ZERO |
57 	    (atomic ? M_NOWAIT : M_WAITOK));
58 
59 	if (unlikely(child == NULL)) {
60 		mtx_lock(&mm->unused_lock);
61 		if (list_empty(&mm->unused_nodes))
62 			child = NULL;
63 		else {
64 			child =
65 			    list_entry(mm->unused_nodes.next,
66 				       struct drm_mm_node, node_list);
67 			list_del(&child->node_list);
68 			--mm->num_unused;
69 		}
70 		mtx_unlock(&mm->unused_lock);
71 	}
72 	return child;
73 }
74 
75 int drm_mm_pre_get(struct drm_mm *mm)
76 {
77 	struct drm_mm_node *node;
78 
79 	mtx_lock(&mm->unused_lock);
80 	while (mm->num_unused < MM_UNUSED_TARGET) {
81 		mtx_unlock(&mm->unused_lock);
82 		node = malloc(sizeof(*node), DRM_MEM_MM, M_WAITOK);
83 		mtx_lock(&mm->unused_lock);
84 
85 		if (unlikely(node == NULL)) {
86 			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
87 			mtx_unlock(&mm->unused_lock);
88 			return ret;
89 		}
90 		++mm->num_unused;
91 		list_add_tail(&node->node_list, &mm->unused_nodes);
92 	}
93 	mtx_unlock(&mm->unused_lock);
94 	return 0;
95 }
96 
97 static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
98 {
99 	return hole_node->start + hole_node->size;
100 }
101 
102 static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
103 {
104 	struct drm_mm_node *next_node =
105 		list_entry(hole_node->node_list.next, struct drm_mm_node,
106 			   node_list);
107 
108 	return next_node->start;
109 }
110 
111 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
112 				 struct drm_mm_node *node,
113 				 unsigned long size, unsigned alignment)
114 {
115 	struct drm_mm *mm = hole_node->mm;
116 	unsigned long tmp = 0, wasted = 0;
117 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
118 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
119 
120 	KASSERT(hole_node->hole_follows && !node->allocated, ("hole_node"));
121 
122 	if (alignment)
123 		tmp = hole_start % alignment;
124 
125 	if (!tmp) {
126 		hole_node->hole_follows = 0;
127 		list_del_init(&hole_node->hole_stack);
128 	} else
129 		wasted = alignment - tmp;
130 
131 	node->start = hole_start + wasted;
132 	node->size = size;
133 	node->mm = mm;
134 	node->allocated = 1;
135 
136 	INIT_LIST_HEAD(&node->hole_stack);
137 	list_add(&node->node_list, &hole_node->node_list);
138 
139 	KASSERT(node->start + node->size <= hole_end, ("hole pos"));
140 
141 	if (node->start + node->size < hole_end) {
142 		list_add(&node->hole_stack, &mm->hole_stack);
143 		node->hole_follows = 1;
144 	} else {
145 		node->hole_follows = 0;
146 	}
147 }
148 
149 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
150 					     unsigned long size,
151 					     unsigned alignment,
152 					     int atomic)
153 {
154 	struct drm_mm_node *node;
155 
156 	node = drm_mm_kmalloc(hole_node->mm, atomic);
157 	if (unlikely(node == NULL))
158 		return NULL;
159 
160 	drm_mm_insert_helper(hole_node, node, size, alignment);
161 
162 	return node;
163 }
164 
165 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
166 		       unsigned long size, unsigned alignment)
167 {
168 	struct drm_mm_node *hole_node;
169 
170 	hole_node = drm_mm_search_free(mm, size, alignment, 0);
171 	if (!hole_node)
172 		return -ENOSPC;
173 
174 	drm_mm_insert_helper(hole_node, node, size, alignment);
175 
176 	return 0;
177 }
178 
179 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
180 				       struct drm_mm_node *node,
181 				       unsigned long size, unsigned alignment,
182 				       unsigned long start, unsigned long end)
183 {
184 	struct drm_mm *mm = hole_node->mm;
185 	unsigned long tmp = 0, wasted = 0;
186 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
187 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
188 
189 	KASSERT(hole_node->hole_follows && !node->allocated, ("hole_node"));
190 
191 	if (hole_start < start)
192 		wasted += start - hole_start;
193 	if (alignment)
194 		tmp = (hole_start + wasted) % alignment;
195 
196 	if (tmp)
197 		wasted += alignment - tmp;
198 
199 	if (!wasted) {
200 		hole_node->hole_follows = 0;
201 		list_del_init(&hole_node->hole_stack);
202 	}
203 
204 	node->start = hole_start + wasted;
205 	node->size = size;
206 	node->mm = mm;
207 	node->allocated = 1;
208 
209 	INIT_LIST_HEAD(&node->hole_stack);
210 	list_add(&node->node_list, &hole_node->node_list);
211 
212 	KASSERT(node->start + node->size <= hole_end, ("hole_end"));
213 	KASSERT(node->start + node->size <= end, ("end"));
214 
215 	if (node->start + node->size < hole_end) {
216 		list_add(&node->hole_stack, &mm->hole_stack);
217 		node->hole_follows = 1;
218 	} else {
219 		node->hole_follows = 0;
220 	}
221 }
222 
223 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
224 						unsigned long size,
225 						unsigned alignment,
226 						unsigned long start,
227 						unsigned long end,
228 						int atomic)
229 {
230 	struct drm_mm_node *node;
231 
232 	node = drm_mm_kmalloc(hole_node->mm, atomic);
233 	if (unlikely(node == NULL))
234 		return NULL;
235 
236 	drm_mm_insert_helper_range(hole_node, node, size, alignment,
237 				   start, end);
238 
239 	return node;
240 }
241 
242 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
243 				unsigned long size, unsigned alignment,
244 				unsigned long start, unsigned long end)
245 {
246 	struct drm_mm_node *hole_node;
247 
248 	hole_node = drm_mm_search_free_in_range(mm, size, alignment,
249 						start, end, 0);
250 	if (!hole_node)
251 		return -ENOSPC;
252 
253 	drm_mm_insert_helper_range(hole_node, node, size, alignment,
254 				   start, end);
255 
256 	return 0;
257 }
258 
259 void drm_mm_remove_node(struct drm_mm_node *node)
260 {
261 	struct drm_mm *mm = node->mm;
262 	struct drm_mm_node *prev_node;
263 
264 	KASSERT(!node->scanned_block && !node->scanned_prev_free
265 	    && !node->scanned_next_free, ("node"));
266 
267 	prev_node =
268 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
269 
270 	if (node->hole_follows) {
271 		KASSERT(drm_mm_hole_node_start(node)
272 			!= drm_mm_hole_node_end(node), ("hole_follows"));
273 		list_del(&node->hole_stack);
274 	} else
275 		KASSERT(drm_mm_hole_node_start(node)
276 		       == drm_mm_hole_node_end(node), ("!hole_follows"));
277 
278 	if (!prev_node->hole_follows) {
279 		prev_node->hole_follows = 1;
280 		list_add(&prev_node->hole_stack, &mm->hole_stack);
281 	} else
282 		list_move(&prev_node->hole_stack, &mm->hole_stack);
283 
284 	list_del(&node->node_list);
285 	node->allocated = 0;
286 }
287 
288 /*
289  * Put a block. Merge with the previous and / or next block if they are free.
290  * Otherwise add to the free stack.
291  */
292 
293 void drm_mm_put_block(struct drm_mm_node *node)
294 {
295 	struct drm_mm *mm = node->mm;
296 
297 	drm_mm_remove_node(node);
298 
299 	mtx_lock(&mm->unused_lock);
300 	if (mm->num_unused < MM_UNUSED_TARGET) {
301 		list_add(&node->node_list, &mm->unused_nodes);
302 		++mm->num_unused;
303 	} else
304 		free(node, DRM_MEM_MM);
305 	mtx_unlock(&mm->unused_lock);
306 }
307 
308 static int check_free_hole(unsigned long start, unsigned long end,
309 			   unsigned long size, unsigned alignment)
310 {
311 	unsigned wasted = 0;
312 
313 	if (end - start < size)
314 		return 0;
315 
316 	if (alignment) {
317 		unsigned tmp = start % alignment;
318 		if (tmp)
319 			wasted = alignment - tmp;
320 	}
321 
322 	if (end >= start + size + wasted) {
323 		return 1;
324 	}
325 
326 	return 0;
327 }
328 
329 
330 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
331 				       unsigned long size,
332 				       unsigned alignment, int best_match)
333 {
334 	struct drm_mm_node *entry;
335 	struct drm_mm_node *best;
336 	unsigned long best_size;
337 
338 	best = NULL;
339 	best_size = ~0UL;
340 
341 	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
342 		KASSERT(entry->hole_follows, ("hole_follows"));
343 		if (!check_free_hole(drm_mm_hole_node_start(entry),
344 				     drm_mm_hole_node_end(entry),
345 				     size, alignment))
346 			continue;
347 
348 		if (!best_match)
349 			return entry;
350 
351 		if (entry->size < best_size) {
352 			best = entry;
353 			best_size = entry->size;
354 		}
355 	}
356 
357 	return best;
358 }
359 
360 struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
361 						unsigned long size,
362 						unsigned alignment,
363 						unsigned long start,
364 						unsigned long end,
365 						int best_match)
366 {
367 	struct drm_mm_node *entry;
368 	struct drm_mm_node *best;
369 	unsigned long best_size;
370 
371 	KASSERT(!mm->scanned_blocks, ("scanned"));
372 
373 	best = NULL;
374 	best_size = ~0UL;
375 
376 	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
377 		unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
378 			start : drm_mm_hole_node_start(entry);
379 		unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
380 			end : drm_mm_hole_node_end(entry);
381 
382 		KASSERT(entry->hole_follows, ("hole_follows"));
383 		if (!check_free_hole(adj_start, adj_end, size, alignment))
384 			continue;
385 
386 		if (!best_match)
387 			return entry;
388 
389 		if (entry->size < best_size) {
390 			best = entry;
391 			best_size = entry->size;
392 		}
393 	}
394 
395 	return best;
396 }
397 
398 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
399 {
400 	list_replace(&old->node_list, &new->node_list);
401 	list_replace(&old->hole_stack, &new->hole_stack);
402 	new->hole_follows = old->hole_follows;
403 	new->mm = old->mm;
404 	new->start = old->start;
405 	new->size = old->size;
406 
407 	old->allocated = 0;
408 	new->allocated = 1;
409 }
410 
411 void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
412 		      unsigned alignment)
413 {
414 	mm->scan_alignment = alignment;
415 	mm->scan_size = size;
416 	mm->scanned_blocks = 0;
417 	mm->scan_hit_start = 0;
418 	mm->scan_hit_size = 0;
419 	mm->scan_check_range = 0;
420 	mm->prev_scanned_node = NULL;
421 }
422 
423 void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
424 				 unsigned alignment,
425 				 unsigned long start,
426 				 unsigned long end)
427 {
428 	mm->scan_alignment = alignment;
429 	mm->scan_size = size;
430 	mm->scanned_blocks = 0;
431 	mm->scan_hit_start = 0;
432 	mm->scan_hit_size = 0;
433 	mm->scan_start = start;
434 	mm->scan_end = end;
435 	mm->scan_check_range = 1;
436 	mm->prev_scanned_node = NULL;
437 }
438 
439 int drm_mm_scan_add_block(struct drm_mm_node *node)
440 {
441 	struct drm_mm *mm = node->mm;
442 	struct drm_mm_node *prev_node;
443 	unsigned long hole_start, hole_end;
444 	unsigned long adj_start;
445 	unsigned long adj_end;
446 
447 	mm->scanned_blocks++;
448 
449 	KASSERT(!node->scanned_block, ("node->scanned_block"));
450 	node->scanned_block = 1;
451 
452 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
453 			       node_list);
454 
455 	node->scanned_preceeds_hole = prev_node->hole_follows;
456 	prev_node->hole_follows = 1;
457 	list_del(&node->node_list);
458 	node->node_list.prev = &prev_node->node_list;
459 	node->node_list.next = &mm->prev_scanned_node->node_list;
460 	mm->prev_scanned_node = node;
461 
462 	hole_start = drm_mm_hole_node_start(prev_node);
463 	hole_end = drm_mm_hole_node_end(prev_node);
464 	if (mm->scan_check_range) {
465 		adj_start = hole_start < mm->scan_start ?
466 			mm->scan_start : hole_start;
467 		adj_end = hole_end > mm->scan_end ?
468 			mm->scan_end : hole_end;
469 	} else {
470 		adj_start = hole_start;
471 		adj_end = hole_end;
472 	}
473 
474 	if (check_free_hole(adj_start , adj_end,
475 			    mm->scan_size, mm->scan_alignment)) {
476 		mm->scan_hit_start = hole_start;
477 		mm->scan_hit_size = hole_end;
478 
479 		return 1;
480 	}
481 
482 	return 0;
483 }
484 
485 int drm_mm_scan_remove_block(struct drm_mm_node *node)
486 {
487 	struct drm_mm *mm = node->mm;
488 	struct drm_mm_node *prev_node;
489 
490 	mm->scanned_blocks--;
491 
492 	KASSERT(node->scanned_block, ("scanned_block"));
493 	node->scanned_block = 0;
494 
495 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
496 			       node_list);
497 
498 	prev_node->hole_follows = node->scanned_preceeds_hole;
499 	INIT_LIST_HEAD(&node->node_list);
500 	list_add(&node->node_list, &prev_node->node_list);
501 
502 	/* Only need to check for containement because start&size for the
503 	 * complete resulting free block (not just the desired part) is
504 	 * stored. */
505 	if (node->start >= mm->scan_hit_start &&
506 	    node->start + node->size
507 	    		<= mm->scan_hit_start + mm->scan_hit_size) {
508 		return 1;
509 	}
510 
511 	return 0;
512 }
513 
514 int drm_mm_clean(struct drm_mm * mm)
515 {
516 	struct list_head *head = &mm->head_node.node_list;
517 
518 	return (head->next->next == head);
519 }
520 
521 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
522 {
523 	INIT_LIST_HEAD(&mm->hole_stack);
524 	INIT_LIST_HEAD(&mm->unused_nodes);
525 	mm->num_unused = 0;
526 	mm->scanned_blocks = 0;
527 	mtx_init(&mm->unused_lock, "drm_unused", NULL, MTX_DEF);
528 
529 	INIT_LIST_HEAD(&mm->head_node.node_list);
530 	INIT_LIST_HEAD(&mm->head_node.hole_stack);
531 	mm->head_node.hole_follows = 1;
532 	mm->head_node.scanned_block = 0;
533 	mm->head_node.scanned_prev_free = 0;
534 	mm->head_node.scanned_next_free = 0;
535 	mm->head_node.mm = mm;
536 	mm->head_node.start = start + size;
537 	mm->head_node.size = start - mm->head_node.start;
538 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
539 
540 	return 0;
541 }
542 
543 void drm_mm_takedown(struct drm_mm * mm)
544 {
545 	struct drm_mm_node *entry, *next;
546 
547 	if (!list_empty(&mm->head_node.node_list)) {
548 		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
549 		return;
550 	}
551 
552 	mtx_lock(&mm->unused_lock);
553 	list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
554 		list_del(&entry->node_list);
555 		free(entry, DRM_MEM_MM);
556 		--mm->num_unused;
557 	}
558 	mtx_unlock(&mm->unused_lock);
559 
560 	mtx_destroy(&mm->unused_lock);
561 
562 	KASSERT(mm->num_unused == 0, ("num_unused != 0"));
563 }
564