1 /* BEGIN CSTYLED */
2
3 /* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
4 */
5 /*
6 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sub license, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial portions
19 * of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
25 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 */
30
31 /*
32 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
33 * Use is subject to license terms.
34 */
35
36 #pragma ident "%Z%%M% %I% %E% SMI"
37
38 /* This memory manager is integrated into the global/local lru
39 * mechanisms used by the clients. Specifically, it operates by
40 * setting the 'in_use' fields of the global LRU to indicate whether
41 * this region is privately allocated to a client.
42 *
43 * This does require the client to actually respect that field.
44 *
45 * Currently no effort is made to allocate 'private' memory in any
46 * clever way - the LRU information isn't used to determine which
47 * block to allocate, and the ring is drained prior to allocations --
48 * in other words allocation is expensive.
49 */
50
51 #include "drmP.h"
52 #include "drm.h"
53 #include "i915_drm.h"
54 #include "i915_drv.h"
55
mark_block(drm_device_t * dev,struct mem_block * p,int in_use)56 void mark_block(drm_device_t * dev, struct mem_block *p, int in_use)
57 {
58 drm_i915_private_t *dev_priv = dev->dev_private;
59 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
60 drm_tex_region_t *list;
61 unsigned shift, nr;
62 unsigned start;
63 unsigned end;
64 unsigned i;
65 int age;
66
67 shift = dev_priv->tex_lru_log_granularity;
68 nr = I915_NR_TEX_REGIONS;
69
70 start = p->start >> shift;
71 end = (p->start + p->size - 1) >> shift;
72
73 age = ++sarea_priv->texAge;
74 list = sarea_priv->texList;
75
76 /* Mark the regions with the new flag and update their age. Move
77 * them to head of list to preserve LRU semantics.
78 */
79 for (i = start; i <= end; i++) {
80 list[i].in_use = (unsigned char)in_use;
81 list[i].age = age;
82
83 /* remove_from_list(i)
84 */
85 list[(unsigned)list[i].next].prev = list[i].prev;
86 list[(unsigned)list[i].prev].next = list[i].next;
87
88 /* insert_at_head(list, i)
89 */
90 list[i].prev = (unsigned char)nr;
91 list[i].next = list[nr].next;
92 list[(unsigned)list[nr].next].prev = (unsigned char)i;
93 list[nr].next = (unsigned char)i;
94 }
95 }
96
97 /* Very simple allocator for agp memory, working on a static range
98 * already mapped into each client's address space.
99 */
100
split_block(struct mem_block * p,int start,int size,drm_file_t * fpriv)101 static struct mem_block *split_block(struct mem_block *p, int start, int size, drm_file_t *fpriv)
102 {
103 /* Maybe cut off the start of an existing block */
104 if (start > p->start) {
105 struct mem_block *newblock =
106 drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
107 if (!newblock)
108 goto out;
109 newblock->start = start;
110 newblock->size = p->size - (start - p->start);
111 newblock->filp = NULL;
112 newblock->next = p->next;
113 newblock->prev = p;
114 p->next->prev = newblock;
115 p->next = newblock;
116 p->size -= newblock->size;
117 p = newblock;
118 }
119
120 /* Maybe cut off the end of an existing block */
121 if (size < p->size) {
122 struct mem_block *newblock =
123 drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
124 if (!newblock)
125 goto out;
126 newblock->start = start + size;
127 newblock->size = p->size - size;
128 newblock->filp = NULL;
129 newblock->next = p->next;
130 newblock->prev = p;
131 p->next->prev = newblock;
132 p->next = newblock;
133 p->size = size;
134 }
135
136 out:
137 /* Our block is in the middle */
138 p->filp = fpriv;
139 return (p);
140 }
141
alloc_block(struct mem_block * heap,int size,int align2,drm_file_t * fpriv)142 static struct mem_block *alloc_block(struct mem_block *heap, int size,
143 int align2, drm_file_t *fpriv)
144 {
145 struct mem_block *p;
146 int mask = (1 << align2) - 1;
147
148 for (p = heap->next; p != heap; p = p->next) {
149 int start = (p->start + mask) & ~mask;
150 if (p->filp == NULL && start + size <= p->start + p->size)
151 return split_block(p, start, size, fpriv);
152 }
153
154 return NULL;
155 }
156
find_block(struct mem_block * heap,int start)157 static struct mem_block *find_block(struct mem_block *heap, int start)
158 {
159 struct mem_block *p;
160
161 for (p = heap->next; p != heap; p = p->next)
162 if (p->start == start)
163 return (p);
164
165 return (NULL);
166 }
167
find_block_by_proc(struct mem_block * heap,drm_file_t * fpriv)168 struct mem_block *find_block_by_proc(struct mem_block *heap, drm_file_t *fpriv)
169 {
170 struct mem_block *p;
171
172 for (p = heap->next; p != heap; p = p->next)
173 if (p->filp == fpriv)
174 return (p);
175
176 return (NULL);
177 }
178
free_block(struct mem_block * p)179 void free_block(struct mem_block *p)
180 {
181 p->filp = NULL;
182
183 /* Assumes a single contiguous range. Needs a special filp in
184 * 'heap' to stop it being subsumed.
185 */
186 if (p->next->filp == NULL) {
187 struct mem_block *q = p->next;
188 p->size += q->size;
189 p->next = q->next;
190 p->next->prev = p;
191 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
192 }
193
194 if (p->prev->filp == NULL) {
195 struct mem_block *q = p->prev;
196 q->size += p->size;
197 q->next = p->next;
198 q->next->prev = q;
199 drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
200 }
201 }
202
203 /* Initialize. How to check for an uninitialized heap?
204 */
init_heap(struct mem_block ** heap,int start,int size)205 static int init_heap(struct mem_block **heap, int start, int size)
206 {
207 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
208
209 if (!blocks)
210 return (ENOMEM);
211
212 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
213 if (!*heap) {
214 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
215 return (ENOMEM);
216 }
217
218 blocks->start = start;
219 blocks->size = size;
220 blocks->filp = NULL;
221 blocks->next = blocks->prev = *heap;
222
223 (void) memset(*heap, 0, sizeof(**heap));
224 (*heap)->filp = (drm_file_t *) - 1;
225 (*heap)->next = (*heap)->prev = blocks;
226 return (0);
227 }
228
229 /* Free all blocks associated with the releasing file.
230 */
i915_mem_release(drm_device_t * dev,drm_file_t * fpriv,struct mem_block * heap)231 void i915_mem_release(drm_device_t * dev, drm_file_t *fpriv, struct mem_block *heap)
232 {
233 struct mem_block *p;
234
235 if (!heap || !heap->next)
236 return;
237
238 for (p = heap->next; p != heap; p = p->next) {
239 if (p->filp == fpriv) {
240 p->filp = NULL;
241 mark_block(dev, p, 0);
242 }
243 }
244
245 /* Assumes a single contiguous range. Needs a special filp in
246 * 'heap' to stop it being subsumed.
247 */
248 for (p = heap->next; p != heap; p = p->next) {
249 while (p->filp == NULL && p->next->filp == NULL) {
250 struct mem_block *q = p->next;
251 p->size += q->size;
252 p->next = q->next;
253 p->next->prev = p;
254 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
255 }
256 }
257 }
258
259 /* Shutdown.
260 */
i915_mem_takedown(struct mem_block ** heap)261 void i915_mem_takedown(struct mem_block **heap)
262 {
263 struct mem_block *p;
264
265 if (!*heap)
266 return;
267
268 for (p = (*heap)->next; p != *heap;) {
269 struct mem_block *q = p;
270 p = p->next;
271 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
272 }
273
274 drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
275 *heap = NULL;
276 }
277
get_heap(drm_i915_private_t * dev_priv,int region)278 struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
279 {
280 switch (region) {
281 case I915_MEM_REGION_AGP:
282 return (&dev_priv->agp_heap);
283 default:
284 return (NULL);
285 }
286 }
287
288 /* IOCTL HANDLERS */
289
290 /*ARGSUSED*/
i915_mem_alloc(DRM_IOCTL_ARGS)291 int i915_mem_alloc(DRM_IOCTL_ARGS)
292 {
293 DRM_DEVICE;
294 drm_i915_private_t *dev_priv = dev->dev_private;
295 drm_i915_mem_alloc_t alloc;
296 struct mem_block *block, **heap;
297
298 if (!dev_priv) {
299 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
300 return (EINVAL);
301 }
302
303 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
304 drm_i915_mem_alloc32_t alloc32;
305
306 DRM_COPYFROM_WITH_RETURN(&alloc32, (void *)data, sizeof (alloc32));
307 alloc.region = alloc32.region;
308 alloc.alignment = alloc32.alignment;
309 alloc.size = alloc32.size;
310 alloc.region_offset = (int *)(uintptr_t)alloc32.region_offset;
311 } else
312 DRM_COPYFROM_WITH_RETURN(&alloc, (void *) data, sizeof(alloc));
313
314 heap = get_heap(dev_priv, alloc.region);
315 if (!heap || !*heap)
316 return (EFAULT);
317
318 /* Make things easier on ourselves: all allocations at least
319 * 4k aligned.
320 */
321 if (alloc.alignment < 12)
322 alloc.alignment = 12;
323
324 block = alloc_block(*heap, alloc.size, alloc.alignment, fpriv);
325
326 if (!block)
327 return (ENOMEM);
328
329 mark_block(dev, block, 1);
330
331 if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
332 DRM_ERROR("copy_to_user\n");
333 return (EFAULT);
334 }
335
336 return (0);
337 }
338
339 /*ARGSUSED*/
i915_mem_free(DRM_IOCTL_ARGS)340 int i915_mem_free(DRM_IOCTL_ARGS)
341 {
342 DRM_DEVICE;
343 drm_i915_private_t *dev_priv = dev->dev_private;
344 drm_i915_mem_free_t memfree;
345 struct mem_block *block, **heap;
346
347 if (!dev_priv) {
348 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
349 return (EINVAL);
350 }
351
352 DRM_COPYFROM_WITH_RETURN(&memfree, (void *)data, sizeof(memfree));
353
354 heap = get_heap(dev_priv, memfree.region);
355 if (!heap || !*heap)
356 return (EFAULT);
357
358 block = find_block(*heap, memfree.region_offset);
359 if (!block)
360 return (EFAULT);
361
362 if (block->filp != fpriv)
363 return (EPERM);
364
365 mark_block(dev, block, 0);
366 free_block(block);
367 return (0);
368 }
369
370 /*ARGSUSED*/
i915_mem_init_heap(DRM_IOCTL_ARGS)371 int i915_mem_init_heap(DRM_IOCTL_ARGS)
372 {
373 DRM_DEVICE;
374 drm_i915_private_t *dev_priv = dev->dev_private;
375 drm_i915_mem_init_heap_t initheap;
376 struct mem_block **heap;
377
378 if (!dev_priv) {
379 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
380 return (EINVAL);
381 }
382
383 DRM_COPYFROM_WITH_RETURN(&initheap, (void *)data, sizeof(initheap));
384
385 heap = get_heap(dev_priv, initheap.region);
386 if (!heap)
387 return (EFAULT);
388
389 if (*heap) {
390 DRM_ERROR("heap already initialized?");
391 return (EFAULT);
392 }
393
394 return init_heap(heap, initheap.start, initheap.size);
395 }
396
397 /*ARGSUSED*/
i915_mem_destroy_heap(DRM_IOCTL_ARGS)398 int i915_mem_destroy_heap(DRM_IOCTL_ARGS)
399 {
400 DRM_DEVICE;
401 drm_i915_private_t *dev_priv = dev->dev_private;
402 drm_i915_mem_destroy_heap_t destroyheap;
403 struct mem_block **heap;
404
405 if (!dev_priv) {
406 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
407 return (EINVAL);
408 }
409
410 DRM_COPYFROM_WITH_RETURN(&destroyheap, (void *)data, sizeof(destroyheap));
411
412 heap = get_heap(dev_priv, destroyheap.region);
413 if (!heap) {
414 DRM_ERROR("get_heap failed");
415 return (EFAULT);
416 }
417
418 if (!*heap) {
419 DRM_ERROR("heap not initialized?");
420 return (EFAULT);
421 }
422
423 i915_mem_takedown(heap);
424 return (0);
425 }
426