xref: /titanic_50/usr/src/uts/common/io/drm/drm_mm.c (revision c9181931796b5d4505a5230d4a399b406b0aabc4)
1 /*
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * Copyright (c) 2009, Intel Corporation.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files(the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice(including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  *
28  */
29 
30 /*
31  * Generic simple memory manager implementation. Intended to be used as a base
32  * class implementation for more advanced memory managers.
33  *
34  * Note that the algorithm used is quite simple and there might be substantial
35  * performance gains if a smarter free list is implemented.
36  * Currently it is just an
37  * unordered stack of free regions. This could easily be improved if an RB-tree
38  * is used instead. At least if we expect heavy fragmentation.
39  *
40  * Aligned allocations can also see improvement.
41  *
42  * Authors:
43  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
44  */
45 
46 /*
47  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
48  * Use is subject to license terms.
49  */
50 
51 #include "drmP.h"
52 
53 unsigned long
54 drm_mm_tail_space(struct drm_mm *mm)
55 {
56 	struct list_head *tail_node;
57 	struct drm_mm_node *entry;
58 
59 	tail_node = mm->ml_entry.prev;
60 	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
61 	if (!entry->free)
62 		return (0);
63 
64 	return (entry->size);
65 }
66 
67 int
68 drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
69 {
70 	struct list_head *tail_node;
71 	struct drm_mm_node *entry;
72 
73 	tail_node = mm->ml_entry.prev;
74 	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
75 	if (!entry->free)
76 		return (ENOMEM);
77 
78 	if (entry->size <= size)
79 		return (ENOMEM);
80 
81 	entry->size -= size;
82 	return (0);
83 }
84 
85 
86 static int
87 drm_mm_create_tail_node(struct drm_mm *mm,
88 		    unsigned long start,
89 		    unsigned long size)
90 {
91 	struct drm_mm_node *child;
92 
93 	child = (struct drm_mm_node *)
94 	    drm_alloc(sizeof (*child), DRM_MEM_MM);
95 	if (!child)
96 		return (ENOMEM);
97 
98 	child->free = 1;
99 	child->size = size;
100 	child->start = start;
101 	child->mm = mm;
102 
103 	list_add_tail(&child->ml_entry, &mm->ml_entry, (caddr_t)child);
104 	list_add_tail(&child->fl_entry, &mm->fl_entry, (caddr_t)child);
105 
106 	return (0);
107 }
108 
109 
110 int
111 drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
112 {
113 	struct list_head *tail_node;
114 	struct drm_mm_node *entry;
115 
116 	tail_node = mm->ml_entry.prev;
117 	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
118 	if (!entry->free) {
119 		return (drm_mm_create_tail_node(mm,
120 		    entry->start + entry->size, size));
121 	}
122 	entry->size += size;
123 	return (0);
124 }
125 
126 static struct drm_mm_node *
127 drm_mm_split_at_start(struct drm_mm_node *parent,
128 		    unsigned long size)
129 {
130 	struct drm_mm_node *child;
131 
132 	child = (struct drm_mm_node *)
133 	    drm_alloc(sizeof (*child), DRM_MEM_MM);
134 	if (!child)
135 		return (NULL);
136 
137 	INIT_LIST_HEAD(&child->fl_entry);
138 
139 	child->free = 0;
140 	child->size = size;
141 	child->start = parent->start;
142 	child->mm = parent->mm;
143 
144 	list_add_tail(&child->ml_entry, &parent->ml_entry, (caddr_t)child);
145 	INIT_LIST_HEAD(&child->fl_entry);
146 
147 	parent->size -= size;
148 	parent->start += size;
149 	return (child);
150 }
151 
152 /*
153  * Put a block. Merge with the previous and / or next block if they are free.
154  * Otherwise add to the free stack.
155  */
156 
157 void
158 drm_mm_put_block(struct drm_mm_node *cur)
159 {
160 
161 	struct drm_mm *mm = cur->mm;
162 	struct list_head *cur_head = &cur->ml_entry;
163 	struct list_head *root_head = &mm->ml_entry;
164 	struct drm_mm_node *prev_node = NULL;
165 	struct drm_mm_node *next_node;
166 
167 	int merged = 0;
168 
169 	if (cur_head->prev != root_head) {
170 		prev_node = list_entry(cur_head->prev,
171 		    struct drm_mm_node, ml_entry);
172 		if (prev_node->free) {
173 			prev_node->size += cur->size;
174 			merged = 1;
175 		}
176 	}
177 	if (cur_head->next != root_head) {
178 		next_node = list_entry(cur_head->next,
179 		    struct drm_mm_node, ml_entry);
180 		if (next_node->free) {
181 			if (merged) {
182 				prev_node->size += next_node->size;
183 				list_del(&next_node->ml_entry);
184 				list_del(&next_node->fl_entry);
185 				drm_free(next_node,
186 				    sizeof (*next_node), DRM_MEM_MM);
187 			} else {
188 				next_node->size += cur->size;
189 				next_node->start = cur->start;
190 				merged = 1;
191 			}
192 		}
193 	}
194 	if (!merged) {
195 		cur->free = 1;
196 		list_add(&cur->fl_entry, &mm->fl_entry, (caddr_t)cur);
197 	} else {
198 		list_del(&cur->ml_entry);
199 		drm_free(cur, sizeof (*cur), DRM_MEM_MM);
200 	}
201 }
202 
203 struct drm_mm_node *
204 drm_mm_get_block(struct drm_mm_node *parent,
205 		    unsigned long size,
206 		    unsigned alignment)
207 {
208 
209 	struct drm_mm_node *align_splitoff = NULL;
210 	struct drm_mm_node *child;
211 	unsigned tmp = 0;
212 
213 	if (alignment)
214 		tmp = parent->start % alignment;
215 
216 	if (tmp) {
217 		align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
218 		if (!align_splitoff)
219 			return (NULL);
220 	}
221 
222 	if (parent->size == size) {
223 		list_del_init(&parent->fl_entry);
224 		parent->free = 0;
225 		return (parent);
226 	} else {
227 		child = drm_mm_split_at_start(parent, size);
228 	}
229 
230 	if (align_splitoff)
231 		drm_mm_put_block(align_splitoff);
232 
233 	return (child);
234 }
235 
236 struct drm_mm_node *
237 drm_mm_search_free(const struct drm_mm *mm,
238 		    unsigned long size,
239 		    unsigned alignment,
240 		    int best_match)
241 {
242 	struct list_head *list;
243 	const struct list_head *free_stack = &mm->fl_entry;
244 	struct drm_mm_node *entry;
245 	struct drm_mm_node *best;
246 	unsigned long best_size;
247 	unsigned wasted;
248 
249 	best = NULL;
250 	best_size = ~0UL;
251 
252 	list_for_each(list, free_stack) {
253 		entry = list_entry(list, struct drm_mm_node, fl_entry);
254 		wasted = 0;
255 
256 		if (entry->size < size)
257 			continue;
258 
259 		if (alignment) {
260 			register unsigned tmp = entry->start % alignment;
261 			if (tmp)
262 				wasted += alignment - tmp;
263 		}
264 
265 
266 		if (entry->size >= size + wasted) {
267 			if (!best_match)
268 				return (entry);
269 			if (size < best_size) {
270 				best = entry;
271 				best_size = entry->size;
272 			}
273 		}
274 	}
275 
276 	return (best);
277 }
278 
279 int
280 drm_mm_clean(struct drm_mm *mm)
281 {
282 	struct list_head *head = &mm->ml_entry;
283 
284 	return (head->next->next == head);
285 }
286 
287 int
288 drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size)
289 {
290 	INIT_LIST_HEAD(&mm->ml_entry);
291 	INIT_LIST_HEAD(&mm->fl_entry);
292 
293 	return (drm_mm_create_tail_node(mm, start, size));
294 }
295 
296 
297 void
298 drm_mm_takedown(struct drm_mm *mm)
299 {
300 	struct list_head *bnode = mm->fl_entry.next;
301 	struct drm_mm_node *entry;
302 
303 	entry = list_entry(bnode, struct drm_mm_node, fl_entry);
304 
305 	if (entry->ml_entry.next != &mm->ml_entry ||
306 	    entry->fl_entry.next != &mm->fl_entry) {
307 		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
308 		return;
309 	}
310 
311 	list_del(&entry->fl_entry);
312 	list_del(&entry->ml_entry);
313 
314 	drm_free(entry, sizeof (*entry), DRM_MEM_MM);
315 }
316 
317 void
318 drm_mm_clean_ml(const struct drm_mm *mm)
319 {
320 	const struct list_head *mlstack = &mm->ml_entry;
321 	struct list_head *list, *temp;
322 	struct drm_mm_node *entry;
323 
324 	if (mlstack->next == NULL)
325 		return;
326 
327 	list_for_each_safe(list, temp, mlstack) {
328 		entry = list_entry(list, struct drm_mm_node, ml_entry);
329 		DRM_DEBUG("ml_entry 0x%x, size 0x%x, start 0x%x",
330 		    entry, entry->size, entry->start);
331 
332 		list_del(&entry->fl_entry);
333 		list_del(&entry->ml_entry);
334 		drm_free(entry, sizeof (*entry), DRM_MEM_MM);
335 	}
336 }
337