xref: /linux/drivers/gpu/drm/drm_mm.c (revision 27258e448eb301cf89e351df87aa8cb916653bf2)
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28 
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43 
44 #include "drmP.h"
45 #include "drm_mm.h"
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 
49 #define MM_UNUSED_TARGET 4
50 
51 unsigned long drm_mm_tail_space(struct drm_mm *mm)
52 {
53 	struct list_head *tail_node;
54 	struct drm_mm_node *entry;
55 
56 	tail_node = mm->ml_entry.prev;
57 	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
58 	if (!entry->free)
59 		return 0;
60 
61 	return entry->size;
62 }
63 
64 int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
65 {
66 	struct list_head *tail_node;
67 	struct drm_mm_node *entry;
68 
69 	tail_node = mm->ml_entry.prev;
70 	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
71 	if (!entry->free)
72 		return -ENOMEM;
73 
74 	if (entry->size <= size)
75 		return -ENOMEM;
76 
77 	entry->size -= size;
78 	return 0;
79 }
80 
81 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
82 {
83 	struct drm_mm_node *child;
84 
85 	if (atomic)
86 		child = kmalloc(sizeof(*child), GFP_ATOMIC);
87 	else
88 		child = kmalloc(sizeof(*child), GFP_KERNEL);
89 
90 	if (unlikely(child == NULL)) {
91 		spin_lock(&mm->unused_lock);
92 		if (list_empty(&mm->unused_nodes))
93 			child = NULL;
94 		else {
95 			child =
96 			    list_entry(mm->unused_nodes.next,
97 				       struct drm_mm_node, fl_entry);
98 			list_del(&child->fl_entry);
99 			--mm->num_unused;
100 		}
101 		spin_unlock(&mm->unused_lock);
102 	}
103 	return child;
104 }
105 
106 int drm_mm_pre_get(struct drm_mm *mm)
107 {
108 	struct drm_mm_node *node;
109 
110 	spin_lock(&mm->unused_lock);
111 	while (mm->num_unused < MM_UNUSED_TARGET) {
112 		spin_unlock(&mm->unused_lock);
113 		node = kmalloc(sizeof(*node), GFP_KERNEL);
114 		spin_lock(&mm->unused_lock);
115 
116 		if (unlikely(node == NULL)) {
117 			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
118 			spin_unlock(&mm->unused_lock);
119 			return ret;
120 		}
121 		++mm->num_unused;
122 		list_add_tail(&node->fl_entry, &mm->unused_nodes);
123 	}
124 	spin_unlock(&mm->unused_lock);
125 	return 0;
126 }
127 EXPORT_SYMBOL(drm_mm_pre_get);
128 
129 static int drm_mm_create_tail_node(struct drm_mm *mm,
130 				   unsigned long start,
131 				   unsigned long size, int atomic)
132 {
133 	struct drm_mm_node *child;
134 
135 	child = drm_mm_kmalloc(mm, atomic);
136 	if (unlikely(child == NULL))
137 		return -ENOMEM;
138 
139 	child->free = 1;
140 	child->size = size;
141 	child->start = start;
142 	child->mm = mm;
143 
144 	list_add_tail(&child->ml_entry, &mm->ml_entry);
145 	list_add_tail(&child->fl_entry, &mm->fl_entry);
146 
147 	return 0;
148 }
149 
150 int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
151 {
152 	struct list_head *tail_node;
153 	struct drm_mm_node *entry;
154 
155 	tail_node = mm->ml_entry.prev;
156 	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
157 	if (!entry->free) {
158 		return drm_mm_create_tail_node(mm, entry->start + entry->size,
159 					       size, atomic);
160 	}
161 	entry->size += size;
162 	return 0;
163 }
164 
165 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
166 						 unsigned long size,
167 						 int atomic)
168 {
169 	struct drm_mm_node *child;
170 
171 	child = drm_mm_kmalloc(parent->mm, atomic);
172 	if (unlikely(child == NULL))
173 		return NULL;
174 
175 	INIT_LIST_HEAD(&child->fl_entry);
176 
177 	child->free = 0;
178 	child->size = size;
179 	child->start = parent->start;
180 	child->mm = parent->mm;
181 
182 	list_add_tail(&child->ml_entry, &parent->ml_entry);
183 	INIT_LIST_HEAD(&child->fl_entry);
184 
185 	parent->size -= size;
186 	parent->start += size;
187 	return child;
188 }
189 
190 
191 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
192 					     unsigned long size,
193 					     unsigned alignment,
194 					     int atomic)
195 {
196 
197 	struct drm_mm_node *align_splitoff = NULL;
198 	unsigned tmp = 0;
199 
200 	if (alignment)
201 		tmp = node->start % alignment;
202 
203 	if (tmp) {
204 		align_splitoff =
205 		    drm_mm_split_at_start(node, alignment - tmp, atomic);
206 		if (unlikely(align_splitoff == NULL))
207 			return NULL;
208 	}
209 
210 	if (node->size == size) {
211 		list_del_init(&node->fl_entry);
212 		node->free = 0;
213 	} else {
214 		node = drm_mm_split_at_start(node, size, atomic);
215 	}
216 
217 	if (align_splitoff)
218 		drm_mm_put_block(align_splitoff);
219 
220 	return node;
221 }
222 EXPORT_SYMBOL(drm_mm_get_block_generic);
223 
224 /*
225  * Put a block. Merge with the previous and / or next block if they are free.
226  * Otherwise add to the free stack.
227  */
228 
229 void drm_mm_put_block(struct drm_mm_node *cur)
230 {
231 
232 	struct drm_mm *mm = cur->mm;
233 	struct list_head *cur_head = &cur->ml_entry;
234 	struct list_head *root_head = &mm->ml_entry;
235 	struct drm_mm_node *prev_node = NULL;
236 	struct drm_mm_node *next_node;
237 
238 	int merged = 0;
239 
240 	if (cur_head->prev != root_head) {
241 		prev_node =
242 		    list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
243 		if (prev_node->free) {
244 			prev_node->size += cur->size;
245 			merged = 1;
246 		}
247 	}
248 	if (cur_head->next != root_head) {
249 		next_node =
250 		    list_entry(cur_head->next, struct drm_mm_node, ml_entry);
251 		if (next_node->free) {
252 			if (merged) {
253 				prev_node->size += next_node->size;
254 				list_del(&next_node->ml_entry);
255 				list_del(&next_node->fl_entry);
256 				if (mm->num_unused < MM_UNUSED_TARGET) {
257 					list_add(&next_node->fl_entry,
258 						 &mm->unused_nodes);
259 					++mm->num_unused;
260 				} else
261 					kfree(next_node);
262 			} else {
263 				next_node->size += cur->size;
264 				next_node->start = cur->start;
265 				merged = 1;
266 			}
267 		}
268 	}
269 	if (!merged) {
270 		cur->free = 1;
271 		list_add(&cur->fl_entry, &mm->fl_entry);
272 	} else {
273 		list_del(&cur->ml_entry);
274 		if (mm->num_unused < MM_UNUSED_TARGET) {
275 			list_add(&cur->fl_entry, &mm->unused_nodes);
276 			++mm->num_unused;
277 		} else
278 			kfree(cur);
279 	}
280 }
281 
282 EXPORT_SYMBOL(drm_mm_put_block);
283 
284 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
285 				       unsigned long size,
286 				       unsigned alignment, int best_match)
287 {
288 	struct list_head *list;
289 	const struct list_head *free_stack = &mm->fl_entry;
290 	struct drm_mm_node *entry;
291 	struct drm_mm_node *best;
292 	unsigned long best_size;
293 	unsigned wasted;
294 
295 	best = NULL;
296 	best_size = ~0UL;
297 
298 	list_for_each(list, free_stack) {
299 		entry = list_entry(list, struct drm_mm_node, fl_entry);
300 		wasted = 0;
301 
302 		if (entry->size < size)
303 			continue;
304 
305 		if (alignment) {
306 			register unsigned tmp = entry->start % alignment;
307 			if (tmp)
308 				wasted += alignment - tmp;
309 		}
310 
311 		if (entry->size >= size + wasted) {
312 			if (!best_match)
313 				return entry;
314 			if (size < best_size) {
315 				best = entry;
316 				best_size = entry->size;
317 			}
318 		}
319 	}
320 
321 	return best;
322 }
323 EXPORT_SYMBOL(drm_mm_search_free);
324 
325 int drm_mm_clean(struct drm_mm * mm)
326 {
327 	struct list_head *head = &mm->ml_entry;
328 
329 	return (head->next->next == head);
330 }
331 EXPORT_SYMBOL(drm_mm_clean);
332 
333 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
334 {
335 	INIT_LIST_HEAD(&mm->ml_entry);
336 	INIT_LIST_HEAD(&mm->fl_entry);
337 	INIT_LIST_HEAD(&mm->unused_nodes);
338 	mm->num_unused = 0;
339 	spin_lock_init(&mm->unused_lock);
340 
341 	return drm_mm_create_tail_node(mm, start, size, 0);
342 }
343 EXPORT_SYMBOL(drm_mm_init);
344 
345 void drm_mm_takedown(struct drm_mm * mm)
346 {
347 	struct list_head *bnode = mm->fl_entry.next;
348 	struct drm_mm_node *entry;
349 	struct drm_mm_node *next;
350 
351 	entry = list_entry(bnode, struct drm_mm_node, fl_entry);
352 
353 	if (entry->ml_entry.next != &mm->ml_entry ||
354 	    entry->fl_entry.next != &mm->fl_entry) {
355 		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
356 		return;
357 	}
358 
359 	list_del(&entry->fl_entry);
360 	list_del(&entry->ml_entry);
361 	kfree(entry);
362 
363 	spin_lock(&mm->unused_lock);
364 	list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
365 		list_del(&entry->fl_entry);
366 		kfree(entry);
367 		--mm->num_unused;
368 	}
369 	spin_unlock(&mm->unused_lock);
370 
371 	BUG_ON(mm->num_unused != 0);
372 }
373 EXPORT_SYMBOL(drm_mm_takedown);
374 
375 #if defined(CONFIG_DEBUG_FS)
376 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
377 {
378 	struct drm_mm_node *entry;
379 	int total_used = 0, total_free = 0, total = 0;
380 
381 	list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
382 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
383 		total += entry->size;
384 		if (entry->free)
385 			total_free += entry->size;
386 		else
387 			total_used += entry->size;
388 	}
389 	seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used);
390 	return 0;
391 }
392 EXPORT_SYMBOL(drm_mm_dump_table);
393 #endif
394