xref: /titanic_50/usr/src/uts/intel/io/drm/i915_mem.c (revision ae115bc77f6fcde83175c75b4206dc2e50747966)
1 /*
2  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /* BEGIN CSTYLED */
7 
8 /* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
9  */
10 /**************************************************************************
11  *
12  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
13  * All Rights Reserved.
14  *
15  * Permission is hereby granted, free of charge, to any person obtaining a
16  * copy of this software and associated documentation files (the
17  * "Software"), to deal in the Software without restriction, including
18  * without limitation the rights to use, copy, modify, merge, publish,
19  * distribute, sub license, and/or sell copies of the Software, and to
20  * permit persons to whom the Software is furnished to do so, subject to
21  * the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the
24  * next paragraph) shall be included in all copies or substantial portions
25  * of the Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
28  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
30  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
31  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
32  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
33  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34  *
35  **************************************************************************/
36 
37 #pragma ident	"%Z%%M%	%I%	%E% SMI"
38 
39 /* This memory manager is integrated into the global/local lru
40  * mechanisms used by the clients.  Specifically, it operates by
41  * setting the 'in_use' fields of the global LRU to indicate whether
42  * this region is privately allocated to a client.
43  *
44  * This does require the client to actually respect that field.
45  *
46  * Currently no effort is made to allocate 'private' memory in any
47  * clever way - the LRU information isn't used to determine which
48  * block to allocate, and the ring is drained prior to allocations --
49  * in other words allocation is expensive.
50  */
51 
52 #include "drmP.h"
53 #include "drm.h"
54 #include "i915_drm.h"
55 #include "i915_drv.h"
56 
57 void mark_block(drm_device_t * dev, struct mem_block *p, int in_use)
58 {
59 	drm_i915_private_t *dev_priv = dev->dev_private;
60 	drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
61 	drm_tex_region_t *list;
62 	unsigned shift, nr;
63 	unsigned start;
64 	unsigned end;
65 	unsigned i;
66 	int age;
67 
68 	shift = dev_priv->tex_lru_log_granularity;
69 	nr = I915_NR_TEX_REGIONS;
70 
71 	start = p->start >> shift;
72 	end = (p->start + p->size - 1) >> shift;
73 
74 	age = ++sarea_priv->texAge;
75 	list = sarea_priv->texList;
76 
77 	/* Mark the regions with the new flag and update their age.  Move
78 	 * them to head of list to preserve LRU semantics.
79 	 */
80 	for (i = start; i <= end; i++) {
81 		list[i].in_use = in_use;
82 		list[i].age = age;
83 
84 		/* remove_from_list(i)
85 		 */
86 		list[(unsigned)list[i].next].prev = list[i].prev;
87 		list[(unsigned)list[i].prev].next = list[i].next;
88 
89 		/* insert_at_head(list, i)
90 		 */
91 		list[i].prev = nr;
92 		list[i].next = list[nr].next;
93 		list[(unsigned)list[nr].next].prev = i;
94 		list[nr].next = i;
95 	}
96 }
97 
98 /* Very simple allocator for agp memory, working on a static range
99  * already mapped into each client's address space.
100  */
101 
102 static struct mem_block *split_block(struct mem_block *p, int start, int size,
103 				     DRMFILE filp)
104 {
105 	/* Maybe cut off the start of an existing block */
106 	if (start > p->start) {
107 		struct mem_block *newblock =
108 		    drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
109 		if (!newblock)
110 			goto out;
111 		newblock->start = start;
112 		newblock->size = p->size - (start - p->start);
113 		newblock->filp = NULL;
114 		newblock->next = p->next;
115 		newblock->prev = p;
116 		p->next->prev = newblock;
117 		p->next = newblock;
118 		p->size -= newblock->size;
119 		p = newblock;
120 	}
121 
122 	/* Maybe cut off the end of an existing block */
123 	if (size < p->size) {
124 		struct mem_block *newblock =
125 		    drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
126 		if (!newblock)
127 			goto out;
128 		newblock->start = start + size;
129 		newblock->size = p->size - size;
130 		newblock->filp = NULL;
131 		newblock->next = p->next;
132 		newblock->prev = p;
133 		p->next->prev = newblock;
134 		p->next = newblock;
135 		p->size = size;
136 	}
137 
138       out:
139 	/* Our block is in the middle */
140 	p->filp = filp;
141 	return (p);
142 }
143 
144 static struct mem_block *alloc_block(struct mem_block *heap, int size,
145 				     int align2, DRMFILE filp)
146 {
147 	struct mem_block *p;
148 	int mask = (1 << align2) - 1;
149 
150 	for (p = heap->next; p != heap; p = p->next) {
151 		int start = (p->start + mask) & ~mask;
152 		if (p->filp == NULL && start + size <= p->start + p->size)
153 			return split_block(p, start, size, filp);
154 	}
155 
156 	return NULL;
157 }
158 
159 static struct mem_block *find_block(struct mem_block *heap, int start)
160 {
161 	struct mem_block *p;
162 
163 	for (p = heap->next; p != heap; p = p->next)
164 		if (p->start == start)
165 			return (p);
166 
167 	return (NULL);
168 }
169 
170 struct mem_block *find_block_by_proc(struct mem_block *heap, DRMFILE filp)
171 {
172 	struct mem_block *p;
173 
174 	for (p = heap->next; p != heap; p = p->next)
175 		if (p->filp == filp)
176 			return (p);
177 
178 	return (NULL);
179 }
180 
181 void free_block(struct mem_block *p)
182 {
183 	p->filp = NULL;
184 
185 	/* Assumes a single contiguous range.  Needs a special filp in
186 	 * 'heap' to stop it being subsumed.
187 	 */
188 	if (p->next->filp == NULL) {
189 		struct mem_block *q = p->next;
190 		p->size += q->size;
191 		p->next = q->next;
192 		p->next->prev = p;
193 		drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
194 	}
195 
196 	if (p->prev->filp == NULL) {
197 		struct mem_block *q = p->prev;
198 		q->size += p->size;
199 		q->next = p->next;
200 		q->next->prev = q;
201 		drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
202 	}
203 }
204 
205 /* Initialize.  How to check for an uninitialized heap?
206  */
207 static int init_heap(struct mem_block **heap, int start, int size)
208 {
209 	struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
210 
211 	if (!blocks)
212 		return DRM_ERR(ENOMEM);
213 
214 	*heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
215 	if (!*heap) {
216 		drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
217 		return DRM_ERR(ENOMEM);
218 	}
219 
220 	blocks->start = start;
221 	blocks->size = size;
222 	blocks->filp = NULL;
223 	blocks->next = blocks->prev = *heap;
224 
225 	(void) memset(*heap, 0, sizeof(**heap));
226 	(*heap)->filp = (DRMFILE) - 1;
227 	(*heap)->next = (*heap)->prev = blocks;
228 	return (0);
229 }
230 
231 /* Free all blocks associated with the releasing file.
232  */
233 void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap)
234 {
235 	struct mem_block *p;
236 
237 	if (!heap || !heap->next)
238 		return;
239 
240 	for (p = heap->next; p != heap; p = p->next) {
241 		if (p->filp == filp) {
242 			p->filp = NULL;
243 			mark_block(dev, p, 0);
244 		}
245 	}
246 
247 	/* Assumes a single contiguous range.  Needs a special filp in
248 	 * 'heap' to stop it being subsumed.
249 	 */
250 	for (p = heap->next; p != heap; p = p->next) {
251 		while (p->filp == NULL && p->next->filp == NULL) {
252 			struct mem_block *q = p->next;
253 			p->size += q->size;
254 			p->next = q->next;
255 			p->next->prev = p;
256 			drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
257 		}
258 	}
259 }
260 
261 /* Shutdown.
262  */
263 void i915_mem_takedown(struct mem_block **heap)
264 {
265 	struct mem_block *p;
266 
267 	if (!*heap)
268 		return;
269 
270 	for (p = (*heap)->next; p != *heap;) {
271 		struct mem_block *q = p;
272 		p = p->next;
273 		drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
274 	}
275 
276 	drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
277 	*heap = NULL;
278 }
279 
280 struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
281 {
282 	switch (region) {
283 	case I915_MEM_REGION_AGP:
284 		return (&dev_priv->agp_heap);
285 	default:
286 		return (NULL);
287 	}
288 }
289 
290 /* IOCTL HANDLERS */
291 
292 /*ARGSUSED*/
293 int i915_mem_alloc(DRM_IOCTL_ARGS)
294 {
295 	DRM_DEVICE;
296 	drm_i915_private_t *dev_priv = dev->dev_private;
297 	drm_i915_mem_alloc_t alloc;
298 	struct mem_block *block, **heap;
299 
300 	if (!dev_priv) {
301 		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
302 		return DRM_ERR(EINVAL);
303 	}
304 
305 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
306 		drm_i915_mem_alloc32_t	alloc32;
307 
308 		DRM_COPY_FROM_USER_IOCTL(alloc32,
309 			(drm_i915_mem_alloc32_t __user *) data,
310 			sizeof (alloc32));
311 		alloc.region = alloc32.region;
312 		alloc.alignment = alloc32.alignment;
313 		alloc.size = alloc32.size;
314 		alloc.region_offset = (int __user *)(uintptr_t)alloc32.region_offset;
315 	} else
316 		DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data,
317 			sizeof(alloc));
318 
319 	heap = get_heap(dev_priv, alloc.region);
320 	if (!heap || !*heap)
321 		return DRM_ERR(EFAULT);
322 
323 	/* Make things easier on ourselves: all allocations at least
324 	 * 4k aligned.
325 	 */
326 	if (alloc.alignment < 12)
327 		alloc.alignment = 12;
328 
329 	block = alloc_block(*heap, alloc.size, alloc.alignment, filp);
330 
331 	if (!block)
332 		return DRM_ERR(ENOMEM);
333 
334 	mark_block(dev, block, 1);
335 
336 	if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
337 		DRM_ERROR("copy_to_user\n");
338 		return DRM_ERR(EFAULT);
339 	}
340 
341 	return (0);
342 }
343 
344 /*ARGSUSED*/
345 int i915_mem_free(DRM_IOCTL_ARGS)
346 {
347 	DRM_DEVICE;
348 	drm_i915_private_t *dev_priv = dev->dev_private;
349 	drm_i915_mem_free_t memfree;
350 	struct mem_block *block, **heap;
351 
352 	if (!dev_priv) {
353 		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
354 		return DRM_ERR(EINVAL);
355 	}
356 
357 	DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data,
358 				 sizeof(memfree));
359 
360 	heap = get_heap(dev_priv, memfree.region);
361 	if (!heap || !*heap)
362 		return DRM_ERR(EFAULT);
363 
364 	block = find_block(*heap, memfree.region_offset);
365 	if (!block)
366 		return DRM_ERR(EFAULT);
367 
368 	if (block->filp != filp)
369 		return DRM_ERR(EPERM);
370 
371 	mark_block(dev, block, 0);
372 	free_block(block);
373 	return (0);
374 }
375 
376 /*ARGSUSED*/
377 int i915_mem_init_heap(DRM_IOCTL_ARGS)
378 {
379 	DRM_DEVICE;
380 	drm_i915_private_t *dev_priv = dev->dev_private;
381 	drm_i915_mem_init_heap_t initheap;
382 	struct mem_block **heap;
383 
384 	if (!dev_priv) {
385 		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
386 		return DRM_ERR(EINVAL);
387 	}
388 
389 	DRM_COPY_FROM_USER_IOCTL(initheap,
390 				 (drm_i915_mem_init_heap_t __user *) data,
391 				 sizeof(initheap));
392 
393 	heap = get_heap(dev_priv, initheap.region);
394 	if (!heap)
395 		return DRM_ERR(EFAULT);
396 
397 	if (*heap) {
398 		DRM_ERROR("heap already initialized?");
399 		return DRM_ERR(EFAULT);
400 	}
401 
402 	return init_heap(heap, initheap.start, initheap.size);
403 }
404