xref: /titanic_41/usr/src/uts/intel/io/drm/i915_mem.c (revision bbfe764ef179c2f606b093b6ec6bb149cde0e31b)
1 /* BEGIN CSTYLED */
2 
3 /* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
4  */
5 /*
6  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7  * All Rights Reserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the
11  * "Software"), to deal in the Software without restriction, including
12  * without limitation the rights to use, copy, modify, merge, publish,
13  * distribute, sub license, and/or sell copies of the Software, and to
14  * permit persons to whom the Software is furnished to do so, subject to
15  * the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the
18  * next paragraph) shall be included in all copies or substantial portions
19  * of the Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
25  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28  *
29  */
30 
31 /*
32  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
33  * Use is subject to license terms.
34  */
35 
36 #pragma ident	"%Z%%M%	%I%	%E% SMI"
37 
38 /* This memory manager is integrated into the global/local lru
39  * mechanisms used by the clients.  Specifically, it operates by
40  * setting the 'in_use' fields of the global LRU to indicate whether
41  * this region is privately allocated to a client.
42  *
43  * This does require the client to actually respect that field.
44  *
45  * Currently no effort is made to allocate 'private' memory in any
46  * clever way - the LRU information isn't used to determine which
47  * block to allocate, and the ring is drained prior to allocations --
48  * in other words allocation is expensive.
49  */
50 
51 #include "drmP.h"
52 #include "drm.h"
53 #include "i915_drm.h"
54 #include "i915_drv.h"
55 
56 void mark_block(drm_device_t * dev, struct mem_block *p, int in_use)
57 {
58 	drm_i915_private_t *dev_priv = dev->dev_private;
59 	drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
60 	drm_tex_region_t *list;
61 	unsigned shift, nr;
62 	unsigned start;
63 	unsigned end;
64 	unsigned i;
65 	int age;
66 
67 	shift = dev_priv->tex_lru_log_granularity;
68 	nr = I915_NR_TEX_REGIONS;
69 
70 	start = p->start >> shift;
71 	end = (p->start + p->size - 1) >> shift;
72 
73 	age = ++sarea_priv->texAge;
74 	list = sarea_priv->texList;
75 
76 	/* Mark the regions with the new flag and update their age.  Move
77 	 * them to head of list to preserve LRU semantics.
78 	 */
79 	for (i = start; i <= end; i++) {
80 		list[i].in_use = in_use;
81 		list[i].age = age;
82 
83 		/* remove_from_list(i)
84 		 */
85 		list[(unsigned)list[i].next].prev = list[i].prev;
86 		list[(unsigned)list[i].prev].next = list[i].next;
87 
88 		/* insert_at_head(list, i)
89 		 */
90 		list[i].prev = nr;
91 		list[i].next = list[nr].next;
92 		list[(unsigned)list[nr].next].prev = i;
93 		list[nr].next = i;
94 	}
95 }
96 
97 /* Very simple allocator for agp memory, working on a static range
98  * already mapped into each client's address space.
99  */
100 
101 static struct mem_block *split_block(struct mem_block *p, int start, int size,
102 				     DRMFILE filp)
103 {
104 	/* Maybe cut off the start of an existing block */
105 	if (start > p->start) {
106 		struct mem_block *newblock =
107 		    drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
108 		if (!newblock)
109 			goto out;
110 		newblock->start = start;
111 		newblock->size = p->size - (start - p->start);
112 		newblock->filp = NULL;
113 		newblock->next = p->next;
114 		newblock->prev = p;
115 		p->next->prev = newblock;
116 		p->next = newblock;
117 		p->size -= newblock->size;
118 		p = newblock;
119 	}
120 
121 	/* Maybe cut off the end of an existing block */
122 	if (size < p->size) {
123 		struct mem_block *newblock =
124 		    drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
125 		if (!newblock)
126 			goto out;
127 		newblock->start = start + size;
128 		newblock->size = p->size - size;
129 		newblock->filp = NULL;
130 		newblock->next = p->next;
131 		newblock->prev = p;
132 		p->next->prev = newblock;
133 		p->next = newblock;
134 		p->size = size;
135 	}
136 
137       out:
138 	/* Our block is in the middle */
139 	p->filp = filp;
140 	return (p);
141 }
142 
143 static struct mem_block *alloc_block(struct mem_block *heap, int size,
144 				     int align2, DRMFILE filp)
145 {
146 	struct mem_block *p;
147 	int mask = (1 << align2) - 1;
148 
149 	for (p = heap->next; p != heap; p = p->next) {
150 		int start = (p->start + mask) & ~mask;
151 		if (p->filp == NULL && start + size <= p->start + p->size)
152 			return split_block(p, start, size, filp);
153 	}
154 
155 	return NULL;
156 }
157 
158 static struct mem_block *find_block(struct mem_block *heap, int start)
159 {
160 	struct mem_block *p;
161 
162 	for (p = heap->next; p != heap; p = p->next)
163 		if (p->start == start)
164 			return (p);
165 
166 	return (NULL);
167 }
168 
169 struct mem_block *find_block_by_proc(struct mem_block *heap, DRMFILE filp)
170 {
171 	struct mem_block *p;
172 
173 	for (p = heap->next; p != heap; p = p->next)
174 		if (p->filp == filp)
175 			return (p);
176 
177 	return (NULL);
178 }
179 
180 void free_block(struct mem_block *p)
181 {
182 	p->filp = NULL;
183 
184 	/* Assumes a single contiguous range.  Needs a special filp in
185 	 * 'heap' to stop it being subsumed.
186 	 */
187 	if (p->next->filp == NULL) {
188 		struct mem_block *q = p->next;
189 		p->size += q->size;
190 		p->next = q->next;
191 		p->next->prev = p;
192 		drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
193 	}
194 
195 	if (p->prev->filp == NULL) {
196 		struct mem_block *q = p->prev;
197 		q->size += p->size;
198 		q->next = p->next;
199 		q->next->prev = q;
200 		drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
201 	}
202 }
203 
204 /* Initialize.  How to check for an uninitialized heap?
205  */
206 static int init_heap(struct mem_block **heap, int start, int size)
207 {
208 	struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
209 
210 	if (!blocks)
211 		return DRM_ERR(ENOMEM);
212 
213 	*heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
214 	if (!*heap) {
215 		drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
216 		return DRM_ERR(ENOMEM);
217 	}
218 
219 	blocks->start = start;
220 	blocks->size = size;
221 	blocks->filp = NULL;
222 	blocks->next = blocks->prev = *heap;
223 
224 	(void) memset(*heap, 0, sizeof(**heap));
225 	(*heap)->filp = (DRMFILE) - 1;
226 	(*heap)->next = (*heap)->prev = blocks;
227 	return (0);
228 }
229 
230 /* Free all blocks associated with the releasing file.
231  */
232 void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap)
233 {
234 	struct mem_block *p;
235 
236 	if (!heap || !heap->next)
237 		return;
238 
239 	for (p = heap->next; p != heap; p = p->next) {
240 		if (p->filp == filp) {
241 			p->filp = NULL;
242 			mark_block(dev, p, 0);
243 		}
244 	}
245 
246 	/* Assumes a single contiguous range.  Needs a special filp in
247 	 * 'heap' to stop it being subsumed.
248 	 */
249 	for (p = heap->next; p != heap; p = p->next) {
250 		while (p->filp == NULL && p->next->filp == NULL) {
251 			struct mem_block *q = p->next;
252 			p->size += q->size;
253 			p->next = q->next;
254 			p->next->prev = p;
255 			drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
256 		}
257 	}
258 }
259 
260 /* Shutdown.
261  */
262 void i915_mem_takedown(struct mem_block **heap)
263 {
264 	struct mem_block *p;
265 
266 	if (!*heap)
267 		return;
268 
269 	for (p = (*heap)->next; p != *heap;) {
270 		struct mem_block *q = p;
271 		p = p->next;
272 		drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
273 	}
274 
275 	drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
276 	*heap = NULL;
277 }
278 
279 struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
280 {
281 	switch (region) {
282 	case I915_MEM_REGION_AGP:
283 		return (&dev_priv->agp_heap);
284 	default:
285 		return (NULL);
286 	}
287 }
288 
289 /* IOCTL HANDLERS */
290 
291 /*ARGSUSED*/
292 int i915_mem_alloc(DRM_IOCTL_ARGS)
293 {
294 	DRM_DEVICE;
295 	drm_i915_private_t *dev_priv = dev->dev_private;
296 	drm_i915_mem_alloc_t alloc;
297 	struct mem_block *block, **heap;
298 
299 	if (!dev_priv) {
300 		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
301 		return DRM_ERR(EINVAL);
302 	}
303 
304 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
305 		drm_i915_mem_alloc32_t	alloc32;
306 
307 		DRM_COPY_FROM_USER_IOCTL(alloc32,
308 			(drm_i915_mem_alloc32_t __user *) data,
309 			sizeof (alloc32));
310 		alloc.region = alloc32.region;
311 		alloc.alignment = alloc32.alignment;
312 		alloc.size = alloc32.size;
313 		alloc.region_offset = (int __user *)(uintptr_t)alloc32.region_offset;
314 	} else
315 		DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data,
316 			sizeof(alloc));
317 
318 	heap = get_heap(dev_priv, alloc.region);
319 	if (!heap || !*heap)
320 		return DRM_ERR(EFAULT);
321 
322 	/* Make things easier on ourselves: all allocations at least
323 	 * 4k aligned.
324 	 */
325 	if (alloc.alignment < 12)
326 		alloc.alignment = 12;
327 
328 	block = alloc_block(*heap, alloc.size, alloc.alignment, filp);
329 
330 	if (!block)
331 		return DRM_ERR(ENOMEM);
332 
333 	mark_block(dev, block, 1);
334 
335 	if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
336 		DRM_ERROR("copy_to_user\n");
337 		return DRM_ERR(EFAULT);
338 	}
339 
340 	return (0);
341 }
342 
343 /*ARGSUSED*/
344 int i915_mem_free(DRM_IOCTL_ARGS)
345 {
346 	DRM_DEVICE;
347 	drm_i915_private_t *dev_priv = dev->dev_private;
348 	drm_i915_mem_free_t memfree;
349 	struct mem_block *block, **heap;
350 
351 	if (!dev_priv) {
352 		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
353 		return DRM_ERR(EINVAL);
354 	}
355 
356 	DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data,
357 				 sizeof(memfree));
358 
359 	heap = get_heap(dev_priv, memfree.region);
360 	if (!heap || !*heap)
361 		return DRM_ERR(EFAULT);
362 
363 	block = find_block(*heap, memfree.region_offset);
364 	if (!block)
365 		return DRM_ERR(EFAULT);
366 
367 	if (block->filp != filp)
368 		return DRM_ERR(EPERM);
369 
370 	mark_block(dev, block, 0);
371 	free_block(block);
372 	return (0);
373 }
374 
375 /*ARGSUSED*/
376 int i915_mem_init_heap(DRM_IOCTL_ARGS)
377 {
378 	DRM_DEVICE;
379 	drm_i915_private_t *dev_priv = dev->dev_private;
380 	drm_i915_mem_init_heap_t initheap;
381 	struct mem_block **heap;
382 
383 	if (!dev_priv) {
384 		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
385 		return DRM_ERR(EINVAL);
386 	}
387 
388 	DRM_COPY_FROM_USER_IOCTL(initheap,
389 				 (drm_i915_mem_init_heap_t __user *) data,
390 				 sizeof(initheap));
391 
392 	heap = get_heap(dev_priv, initheap.region);
393 	if (!heap)
394 		return DRM_ERR(EFAULT);
395 
396 	if (*heap) {
397 		DRM_ERROR("heap already initialized?");
398 		return DRM_ERR(EFAULT);
399 	}
400 
401 	return init_heap(heap, initheap.start, initheap.size);
402 }
403 
404 /*ARGSUSED*/
405 int i915_mem_destroy_heap(DRM_IOCTL_ARGS)
406 {
407 	DRM_DEVICE;
408 	drm_i915_private_t *dev_priv = dev->dev_private;
409 	drm_i915_mem_destroy_heap_t destroyheap;
410 	struct mem_block **heap;
411 
412 	if (!dev_priv) {
413 		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
414 		return DRM_ERR(EINVAL);
415 	}
416 
417 	DRM_COPY_FROM_USER_IOCTL(destroyheap, (drm_i915_mem_destroy_heap_t *)data,
418 				  sizeof(destroyheap));
419 
420 	heap = get_heap(dev_priv, destroyheap.region);
421 	if (!heap) {
422 		DRM_ERROR("get_heap failed");
423 		return DRM_ERR(EFAULT);
424 	}
425 
426 	if (!*heap) {
427 		DRM_ERROR("heap not initialized?");
428 		return DRM_ERR(EFAULT);
429 	}
430 
431 	i915_mem_takedown(heap);
432 	return (0);
433 }
434