xref: /linux/drivers/gpu/drm/drm_mm.c (revision e27ecdd94d81e5bc3d1f68591701db5adb342f0d)
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28 
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43 
44 #include "drmP.h"
45 #include "drm_mm.h"
46 #include <linux/slab.h>
47 
48 #define MM_UNUSED_TARGET 4
49 
50 unsigned long drm_mm_tail_space(struct drm_mm *mm)
51 {
52 	struct list_head *tail_node;
53 	struct drm_mm_node *entry;
54 
55 	tail_node = mm->ml_entry.prev;
56 	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
57 	if (!entry->free)
58 		return 0;
59 
60 	return entry->size;
61 }
62 
63 int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
64 {
65 	struct list_head *tail_node;
66 	struct drm_mm_node *entry;
67 
68 	tail_node = mm->ml_entry.prev;
69 	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
70 	if (!entry->free)
71 		return -ENOMEM;
72 
73 	if (entry->size <= size)
74 		return -ENOMEM;
75 
76 	entry->size -= size;
77 	return 0;
78 }
79 
80 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
81 {
82 	struct drm_mm_node *child;
83 
84 	if (atomic)
85 		child = kmalloc(sizeof(*child), GFP_ATOMIC);
86 	else
87 		child = kmalloc(sizeof(*child), GFP_KERNEL);
88 
89 	if (unlikely(child == NULL)) {
90 		spin_lock(&mm->unused_lock);
91 		if (list_empty(&mm->unused_nodes))
92 			child = NULL;
93 		else {
94 			child =
95 			    list_entry(mm->unused_nodes.next,
96 				       struct drm_mm_node, fl_entry);
97 			list_del(&child->fl_entry);
98 			--mm->num_unused;
99 		}
100 		spin_unlock(&mm->unused_lock);
101 	}
102 	return child;
103 }
104 
105 int drm_mm_pre_get(struct drm_mm *mm)
106 {
107 	struct drm_mm_node *node;
108 
109 	spin_lock(&mm->unused_lock);
110 	while (mm->num_unused < MM_UNUSED_TARGET) {
111 		spin_unlock(&mm->unused_lock);
112 		node = kmalloc(sizeof(*node), GFP_KERNEL);
113 		spin_lock(&mm->unused_lock);
114 
115 		if (unlikely(node == NULL)) {
116 			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
117 			spin_unlock(&mm->unused_lock);
118 			return ret;
119 		}
120 		++mm->num_unused;
121 		list_add_tail(&node->fl_entry, &mm->unused_nodes);
122 	}
123 	spin_unlock(&mm->unused_lock);
124 	return 0;
125 }
126 EXPORT_SYMBOL(drm_mm_pre_get);
127 
128 static int drm_mm_create_tail_node(struct drm_mm *mm,
129 				   unsigned long start,
130 				   unsigned long size, int atomic)
131 {
132 	struct drm_mm_node *child;
133 
134 	child = drm_mm_kmalloc(mm, atomic);
135 	if (unlikely(child == NULL))
136 		return -ENOMEM;
137 
138 	child->free = 1;
139 	child->size = size;
140 	child->start = start;
141 	child->mm = mm;
142 
143 	list_add_tail(&child->ml_entry, &mm->ml_entry);
144 	list_add_tail(&child->fl_entry, &mm->fl_entry);
145 
146 	return 0;
147 }
148 
149 int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
150 {
151 	struct list_head *tail_node;
152 	struct drm_mm_node *entry;
153 
154 	tail_node = mm->ml_entry.prev;
155 	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
156 	if (!entry->free) {
157 		return drm_mm_create_tail_node(mm, entry->start + entry->size,
158 					       size, atomic);
159 	}
160 	entry->size += size;
161 	return 0;
162 }
163 
164 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
165 						 unsigned long size,
166 						 int atomic)
167 {
168 	struct drm_mm_node *child;
169 
170 	child = drm_mm_kmalloc(parent->mm, atomic);
171 	if (unlikely(child == NULL))
172 		return NULL;
173 
174 	INIT_LIST_HEAD(&child->fl_entry);
175 
176 	child->free = 0;
177 	child->size = size;
178 	child->start = parent->start;
179 	child->mm = parent->mm;
180 
181 	list_add_tail(&child->ml_entry, &parent->ml_entry);
182 	INIT_LIST_HEAD(&child->fl_entry);
183 
184 	parent->size -= size;
185 	parent->start += size;
186 	return child;
187 }
188 
189 
190 
191 struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
192 				unsigned long size, unsigned alignment)
193 {
194 
195 	struct drm_mm_node *align_splitoff = NULL;
196 	struct drm_mm_node *child;
197 	unsigned tmp = 0;
198 
199 	if (alignment)
200 		tmp = parent->start % alignment;
201 
202 	if (tmp) {
203 		align_splitoff =
204 		    drm_mm_split_at_start(parent, alignment - tmp, 0);
205 		if (unlikely(align_splitoff == NULL))
206 			return NULL;
207 	}
208 
209 	if (parent->size == size) {
210 		list_del_init(&parent->fl_entry);
211 		parent->free = 0;
212 		return parent;
213 	} else {
214 		child = drm_mm_split_at_start(parent, size, 0);
215 	}
216 
217 	if (align_splitoff)
218 		drm_mm_put_block(align_splitoff);
219 
220 	return child;
221 }
222 
223 EXPORT_SYMBOL(drm_mm_get_block);
224 
225 struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
226 					    unsigned long size,
227 					    unsigned alignment)
228 {
229 
230 	struct drm_mm_node *align_splitoff = NULL;
231 	struct drm_mm_node *child;
232 	unsigned tmp = 0;
233 
234 	if (alignment)
235 		tmp = parent->start % alignment;
236 
237 	if (tmp) {
238 		align_splitoff =
239 		    drm_mm_split_at_start(parent, alignment - tmp, 1);
240 		if (unlikely(align_splitoff == NULL))
241 			return NULL;
242 	}
243 
244 	if (parent->size == size) {
245 		list_del_init(&parent->fl_entry);
246 		parent->free = 0;
247 		return parent;
248 	} else {
249 		child = drm_mm_split_at_start(parent, size, 1);
250 	}
251 
252 	if (align_splitoff)
253 		drm_mm_put_block(align_splitoff);
254 
255 	return child;
256 }
257 EXPORT_SYMBOL(drm_mm_get_block_atomic);
258 
259 /*
260  * Put a block. Merge with the previous and / or next block if they are free.
261  * Otherwise add to the free stack.
262  */
263 
264 void drm_mm_put_block(struct drm_mm_node *cur)
265 {
266 
267 	struct drm_mm *mm = cur->mm;
268 	struct list_head *cur_head = &cur->ml_entry;
269 	struct list_head *root_head = &mm->ml_entry;
270 	struct drm_mm_node *prev_node = NULL;
271 	struct drm_mm_node *next_node;
272 
273 	int merged = 0;
274 
275 	if (cur_head->prev != root_head) {
276 		prev_node =
277 		    list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
278 		if (prev_node->free) {
279 			prev_node->size += cur->size;
280 			merged = 1;
281 		}
282 	}
283 	if (cur_head->next != root_head) {
284 		next_node =
285 		    list_entry(cur_head->next, struct drm_mm_node, ml_entry);
286 		if (next_node->free) {
287 			if (merged) {
288 				prev_node->size += next_node->size;
289 				list_del(&next_node->ml_entry);
290 				list_del(&next_node->fl_entry);
291 				if (mm->num_unused < MM_UNUSED_TARGET) {
292 					list_add(&next_node->fl_entry,
293 						 &mm->unused_nodes);
294 					++mm->num_unused;
295 				} else
296 					kfree(next_node);
297 			} else {
298 				next_node->size += cur->size;
299 				next_node->start = cur->start;
300 				merged = 1;
301 			}
302 		}
303 	}
304 	if (!merged) {
305 		cur->free = 1;
306 		list_add(&cur->fl_entry, &mm->fl_entry);
307 	} else {
308 		list_del(&cur->ml_entry);
309 		if (mm->num_unused < MM_UNUSED_TARGET) {
310 			list_add(&cur->fl_entry, &mm->unused_nodes);
311 			++mm->num_unused;
312 		} else
313 			kfree(cur);
314 	}
315 }
316 
317 EXPORT_SYMBOL(drm_mm_put_block);
318 
319 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
320 				       unsigned long size,
321 				       unsigned alignment, int best_match)
322 {
323 	struct list_head *list;
324 	const struct list_head *free_stack = &mm->fl_entry;
325 	struct drm_mm_node *entry;
326 	struct drm_mm_node *best;
327 	unsigned long best_size;
328 	unsigned wasted;
329 
330 	best = NULL;
331 	best_size = ~0UL;
332 
333 	list_for_each(list, free_stack) {
334 		entry = list_entry(list, struct drm_mm_node, fl_entry);
335 		wasted = 0;
336 
337 		if (entry->size < size)
338 			continue;
339 
340 		if (alignment) {
341 			register unsigned tmp = entry->start % alignment;
342 			if (tmp)
343 				wasted += alignment - tmp;
344 		}
345 
346 		if (entry->size >= size + wasted) {
347 			if (!best_match)
348 				return entry;
349 			if (size < best_size) {
350 				best = entry;
351 				best_size = entry->size;
352 			}
353 		}
354 	}
355 
356 	return best;
357 }
358 EXPORT_SYMBOL(drm_mm_search_free);
359 
360 int drm_mm_clean(struct drm_mm * mm)
361 {
362 	struct list_head *head = &mm->ml_entry;
363 
364 	return (head->next->next == head);
365 }
366 EXPORT_SYMBOL(drm_mm_clean);
367 
368 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
369 {
370 	INIT_LIST_HEAD(&mm->ml_entry);
371 	INIT_LIST_HEAD(&mm->fl_entry);
372 	INIT_LIST_HEAD(&mm->unused_nodes);
373 	mm->num_unused = 0;
374 	spin_lock_init(&mm->unused_lock);
375 
376 	return drm_mm_create_tail_node(mm, start, size, 0);
377 }
378 EXPORT_SYMBOL(drm_mm_init);
379 
380 void drm_mm_takedown(struct drm_mm * mm)
381 {
382 	struct list_head *bnode = mm->fl_entry.next;
383 	struct drm_mm_node *entry;
384 	struct drm_mm_node *next;
385 
386 	entry = list_entry(bnode, struct drm_mm_node, fl_entry);
387 
388 	if (entry->ml_entry.next != &mm->ml_entry ||
389 	    entry->fl_entry.next != &mm->fl_entry) {
390 		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
391 		return;
392 	}
393 
394 	list_del(&entry->fl_entry);
395 	list_del(&entry->ml_entry);
396 	kfree(entry);
397 
398 	spin_lock(&mm->unused_lock);
399 	list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
400 		list_del(&entry->fl_entry);
401 		kfree(entry);
402 		--mm->num_unused;
403 	}
404 	spin_unlock(&mm->unused_lock);
405 
406 	BUG_ON(mm->num_unused != 0);
407 }
408 EXPORT_SYMBOL(drm_mm_takedown);
409