xref: /linux/drivers/gpu/drm/drm_gem.c (revision 27258e448eb301cf89e351df87aa8cb916653bf2)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include "drmP.h"
38 
39 /** @file drm_gem.c
40  *
41  * This file provides some of the base ioctls and library routines for
42  * the graphics memory manager implemented by each device driver.
43  *
44  * Because various devices have different requirements in terms of
45  * synchronization and migration strategies, implementing that is left up to
46  * the driver, and all that the general API provides should be generic --
47  * allocating objects, reading/writing data with the cpu, freeing objects.
48  * Even there, platform-dependent optimizations for reading/writing data with
49  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
50  * the DRI2 implementation wants to have at least allocate/mmap be generic.
51  *
52  * The goal was to have swap-backed object allocation managed through
53  * struct file.  However, file descriptors as handles to a struct file have
54  * two major failings:
55  * - Process limits prevent more than 1024 or so being used at a time by
56  *   default.
57  * - Inability to allocate high fds will aggravate the X Server's select()
58  *   handling, and likely that of many GL client applications as well.
59  *
60  * This led to a plan of using our own integer IDs (called handles, following
61  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
62  * ioctls.  The objects themselves will still include the struct file so
63  * that we can transition to fds if the required kernel infrastructure shows
64  * up at a later date, and as our interface with shmfs for memory allocation.
65  */
66 
67 /*
68  * We make up offsets for buffer objects so we can recognize them at
69  * mmap time.
70  */
71 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
72 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
73 
74 /**
75  * Initialize the GEM device fields
76  */
77 
78 int
79 drm_gem_init(struct drm_device *dev)
80 {
81 	struct drm_gem_mm *mm;
82 
83 	spin_lock_init(&dev->object_name_lock);
84 	idr_init(&dev->object_name_idr);
85 	atomic_set(&dev->object_count, 0);
86 	atomic_set(&dev->object_memory, 0);
87 	atomic_set(&dev->pin_count, 0);
88 	atomic_set(&dev->pin_memory, 0);
89 	atomic_set(&dev->gtt_count, 0);
90 	atomic_set(&dev->gtt_memory, 0);
91 
92 	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
93 	if (!mm) {
94 		DRM_ERROR("out of memory\n");
95 		return -ENOMEM;
96 	}
97 
98 	dev->mm_private = mm;
99 
100 	if (drm_ht_create(&mm->offset_hash, 19)) {
101 		kfree(mm);
102 		return -ENOMEM;
103 	}
104 
105 	if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
106 			DRM_FILE_PAGE_OFFSET_SIZE)) {
107 		drm_ht_remove(&mm->offset_hash);
108 		kfree(mm);
109 		return -ENOMEM;
110 	}
111 
112 	return 0;
113 }
114 
115 void
116 drm_gem_destroy(struct drm_device *dev)
117 {
118 	struct drm_gem_mm *mm = dev->mm_private;
119 
120 	drm_mm_takedown(&mm->offset_manager);
121 	drm_ht_remove(&mm->offset_hash);
122 	kfree(mm);
123 	dev->mm_private = NULL;
124 }
125 
126 /**
127  * Allocate a GEM object of the specified size with shmfs backing store
128  */
129 struct drm_gem_object *
130 drm_gem_object_alloc(struct drm_device *dev, size_t size)
131 {
132 	struct drm_gem_object *obj;
133 
134 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
135 
136 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
137 	if (!obj)
138 		goto free;
139 
140 	obj->dev = dev;
141 	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
142 	if (IS_ERR(obj->filp))
143 		goto free;
144 
145 	kref_init(&obj->refcount);
146 	kref_init(&obj->handlecount);
147 	obj->size = size;
148 	if (dev->driver->gem_init_object != NULL &&
149 	    dev->driver->gem_init_object(obj) != 0) {
150 		goto fput;
151 	}
152 	atomic_inc(&dev->object_count);
153 	atomic_add(obj->size, &dev->object_memory);
154 	return obj;
155 fput:
156 	fput(obj->filp);
157 free:
158 	kfree(obj);
159 	return NULL;
160 }
161 EXPORT_SYMBOL(drm_gem_object_alloc);
162 
163 /**
164  * Removes the mapping from handle to filp for this object.
165  */
166 static int
167 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
168 {
169 	struct drm_device *dev;
170 	struct drm_gem_object *obj;
171 
172 	/* This is gross. The idr system doesn't let us try a delete and
173 	 * return an error code.  It just spews if you fail at deleting.
174 	 * So, we have to grab a lock around finding the object and then
175 	 * doing the delete on it and dropping the refcount, or the user
176 	 * could race us to double-decrement the refcount and cause a
177 	 * use-after-free later.  Given the frequency of our handle lookups,
178 	 * we may want to use ida for number allocation and a hash table
179 	 * for the pointers, anyway.
180 	 */
181 	spin_lock(&filp->table_lock);
182 
183 	/* Check if we currently have a reference on the object */
184 	obj = idr_find(&filp->object_idr, handle);
185 	if (obj == NULL) {
186 		spin_unlock(&filp->table_lock);
187 		return -EINVAL;
188 	}
189 	dev = obj->dev;
190 
191 	/* Release reference and decrement refcount. */
192 	idr_remove(&filp->object_idr, handle);
193 	spin_unlock(&filp->table_lock);
194 
195 	mutex_lock(&dev->struct_mutex);
196 	drm_gem_object_handle_unreference(obj);
197 	mutex_unlock(&dev->struct_mutex);
198 
199 	return 0;
200 }
201 
202 /**
203  * Create a handle for this object. This adds a handle reference
204  * to the object, which includes a regular reference count. Callers
205  * will likely want to dereference the object afterwards.
206  */
207 int
208 drm_gem_handle_create(struct drm_file *file_priv,
209 		       struct drm_gem_object *obj,
210 		       u32 *handlep)
211 {
212 	int	ret;
213 
214 	/*
215 	 * Get the user-visible handle using idr.
216 	 */
217 again:
218 	/* ensure there is space available to allocate a handle */
219 	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
220 		return -ENOMEM;
221 
222 	/* do the allocation under our spinlock */
223 	spin_lock(&file_priv->table_lock);
224 	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
225 	spin_unlock(&file_priv->table_lock);
226 	if (ret == -EAGAIN)
227 		goto again;
228 
229 	if (ret != 0)
230 		return ret;
231 
232 	drm_gem_object_handle_reference(obj);
233 	return 0;
234 }
235 EXPORT_SYMBOL(drm_gem_handle_create);
236 
237 /** Returns a reference to the object named by the handle. */
238 struct drm_gem_object *
239 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
240 		      u32 handle)
241 {
242 	struct drm_gem_object *obj;
243 
244 	spin_lock(&filp->table_lock);
245 
246 	/* Check if we currently have a reference on the object */
247 	obj = idr_find(&filp->object_idr, handle);
248 	if (obj == NULL) {
249 		spin_unlock(&filp->table_lock);
250 		return NULL;
251 	}
252 
253 	drm_gem_object_reference(obj);
254 
255 	spin_unlock(&filp->table_lock);
256 
257 	return obj;
258 }
259 EXPORT_SYMBOL(drm_gem_object_lookup);
260 
261 /**
262  * Releases the handle to an mm object.
263  */
264 int
265 drm_gem_close_ioctl(struct drm_device *dev, void *data,
266 		    struct drm_file *file_priv)
267 {
268 	struct drm_gem_close *args = data;
269 	int ret;
270 
271 	if (!(dev->driver->driver_features & DRIVER_GEM))
272 		return -ENODEV;
273 
274 	ret = drm_gem_handle_delete(file_priv, args->handle);
275 
276 	return ret;
277 }
278 
279 /**
280  * Create a global name for an object, returning the name.
281  *
282  * Note that the name does not hold a reference; when the object
283  * is freed, the name goes away.
284  */
285 int
286 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
287 		    struct drm_file *file_priv)
288 {
289 	struct drm_gem_flink *args = data;
290 	struct drm_gem_object *obj;
291 	int ret;
292 
293 	if (!(dev->driver->driver_features & DRIVER_GEM))
294 		return -ENODEV;
295 
296 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
297 	if (obj == NULL)
298 		return -EBADF;
299 
300 again:
301 	if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
302 		ret = -ENOMEM;
303 		goto err;
304 	}
305 
306 	spin_lock(&dev->object_name_lock);
307 	if (!obj->name) {
308 		ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
309 					&obj->name);
310 		args->name = (uint64_t) obj->name;
311 		spin_unlock(&dev->object_name_lock);
312 
313 		if (ret == -EAGAIN)
314 			goto again;
315 
316 		if (ret != 0)
317 			goto err;
318 
319 		/* Allocate a reference for the name table.  */
320 		drm_gem_object_reference(obj);
321 	} else {
322 		args->name = (uint64_t) obj->name;
323 		spin_unlock(&dev->object_name_lock);
324 		ret = 0;
325 	}
326 
327 err:
328 	mutex_lock(&dev->struct_mutex);
329 	drm_gem_object_unreference(obj);
330 	mutex_unlock(&dev->struct_mutex);
331 	return ret;
332 }
333 
334 /**
335  * Open an object using the global name, returning a handle and the size.
336  *
337  * This handle (of course) holds a reference to the object, so the object
338  * will not go away until the handle is deleted.
339  */
340 int
341 drm_gem_open_ioctl(struct drm_device *dev, void *data,
342 		   struct drm_file *file_priv)
343 {
344 	struct drm_gem_open *args = data;
345 	struct drm_gem_object *obj;
346 	int ret;
347 	u32 handle;
348 
349 	if (!(dev->driver->driver_features & DRIVER_GEM))
350 		return -ENODEV;
351 
352 	spin_lock(&dev->object_name_lock);
353 	obj = idr_find(&dev->object_name_idr, (int) args->name);
354 	if (obj)
355 		drm_gem_object_reference(obj);
356 	spin_unlock(&dev->object_name_lock);
357 	if (!obj)
358 		return -ENOENT;
359 
360 	ret = drm_gem_handle_create(file_priv, obj, &handle);
361 	mutex_lock(&dev->struct_mutex);
362 	drm_gem_object_unreference(obj);
363 	mutex_unlock(&dev->struct_mutex);
364 	if (ret)
365 		return ret;
366 
367 	args->handle = handle;
368 	args->size = obj->size;
369 
370 	return 0;
371 }
372 
373 /**
374  * Called at device open time, sets up the structure for handling refcounting
375  * of mm objects.
376  */
377 void
378 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
379 {
380 	idr_init(&file_private->object_idr);
381 	spin_lock_init(&file_private->table_lock);
382 }
383 
384 /**
385  * Called at device close to release the file's
386  * handle references on objects.
387  */
388 static int
389 drm_gem_object_release_handle(int id, void *ptr, void *data)
390 {
391 	struct drm_gem_object *obj = ptr;
392 
393 	drm_gem_object_handle_unreference(obj);
394 
395 	return 0;
396 }
397 
398 /**
399  * Called at close time when the filp is going away.
400  *
401  * Releases any remaining references on objects by this filp.
402  */
403 void
404 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
405 {
406 	mutex_lock(&dev->struct_mutex);
407 	idr_for_each(&file_private->object_idr,
408 		     &drm_gem_object_release_handle, NULL);
409 
410 	idr_destroy(&file_private->object_idr);
411 	mutex_unlock(&dev->struct_mutex);
412 }
413 
414 /**
415  * Called after the last reference to the object has been lost.
416  *
417  * Frees the object
418  */
419 void
420 drm_gem_object_free(struct kref *kref)
421 {
422 	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
423 	struct drm_device *dev = obj->dev;
424 
425 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
426 
427 	if (dev->driver->gem_free_object != NULL)
428 		dev->driver->gem_free_object(obj);
429 
430 	fput(obj->filp);
431 	atomic_dec(&dev->object_count);
432 	atomic_sub(obj->size, &dev->object_memory);
433 	kfree(obj);
434 }
435 EXPORT_SYMBOL(drm_gem_object_free);
436 
437 /**
438  * Called after the last handle to the object has been closed
439  *
440  * Removes any name for the object. Note that this must be
441  * called before drm_gem_object_free or we'll be touching
442  * freed memory
443  */
444 void
445 drm_gem_object_handle_free(struct kref *kref)
446 {
447 	struct drm_gem_object *obj = container_of(kref,
448 						  struct drm_gem_object,
449 						  handlecount);
450 	struct drm_device *dev = obj->dev;
451 
452 	/* Remove any name for this object */
453 	spin_lock(&dev->object_name_lock);
454 	if (obj->name) {
455 		idr_remove(&dev->object_name_idr, obj->name);
456 		obj->name = 0;
457 		spin_unlock(&dev->object_name_lock);
458 		/*
459 		 * The object name held a reference to this object, drop
460 		 * that now.
461 		 */
462 		drm_gem_object_unreference(obj);
463 	} else
464 		spin_unlock(&dev->object_name_lock);
465 
466 }
467 EXPORT_SYMBOL(drm_gem_object_handle_free);
468 
469 void drm_gem_vm_open(struct vm_area_struct *vma)
470 {
471 	struct drm_gem_object *obj = vma->vm_private_data;
472 
473 	drm_gem_object_reference(obj);
474 }
475 EXPORT_SYMBOL(drm_gem_vm_open);
476 
477 void drm_gem_vm_close(struct vm_area_struct *vma)
478 {
479 	struct drm_gem_object *obj = vma->vm_private_data;
480 	struct drm_device *dev = obj->dev;
481 
482 	mutex_lock(&dev->struct_mutex);
483 	drm_gem_object_unreference(obj);
484 	mutex_unlock(&dev->struct_mutex);
485 }
486 EXPORT_SYMBOL(drm_gem_vm_close);
487 
488 
489 /**
490  * drm_gem_mmap - memory map routine for GEM objects
491  * @filp: DRM file pointer
492  * @vma: VMA for the area to be mapped
493  *
494  * If a driver supports GEM object mapping, mmap calls on the DRM file
495  * descriptor will end up here.
496  *
497  * If we find the object based on the offset passed in (vma->vm_pgoff will
498  * contain the fake offset we created when the GTT map ioctl was called on
499  * the object), we set up the driver fault handler so that any accesses
500  * to the object can be trapped, to perform migration, GTT binding, surface
501  * register allocation, or performance monitoring.
502  */
503 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
504 {
505 	struct drm_file *priv = filp->private_data;
506 	struct drm_device *dev = priv->minor->dev;
507 	struct drm_gem_mm *mm = dev->mm_private;
508 	struct drm_local_map *map = NULL;
509 	struct drm_gem_object *obj;
510 	struct drm_hash_item *hash;
511 	int ret = 0;
512 
513 	mutex_lock(&dev->struct_mutex);
514 
515 	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
516 		mutex_unlock(&dev->struct_mutex);
517 		return drm_mmap(filp, vma);
518 	}
519 
520 	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
521 	if (!map ||
522 	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
523 		ret =  -EPERM;
524 		goto out_unlock;
525 	}
526 
527 	/* Check for valid size. */
528 	if (map->size < vma->vm_end - vma->vm_start) {
529 		ret = -EINVAL;
530 		goto out_unlock;
531 	}
532 
533 	obj = map->handle;
534 	if (!obj->dev->driver->gem_vm_ops) {
535 		ret = -EINVAL;
536 		goto out_unlock;
537 	}
538 
539 	vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
540 	vma->vm_ops = obj->dev->driver->gem_vm_ops;
541 	vma->vm_private_data = map->handle;
542 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
543 
544 	/* Take a ref for this mapping of the object, so that the fault
545 	 * handler can dereference the mmap offset's pointer to the object.
546 	 * This reference is cleaned up by the corresponding vm_close
547 	 * (which should happen whether the vma was created by this call, or
548 	 * by a vm_open due to mremap or partial unmap or whatever).
549 	 */
550 	drm_gem_object_reference(obj);
551 
552 	vma->vm_file = filp;	/* Needed for drm_vm_open() */
553 	drm_vm_open_locked(vma);
554 
555 out_unlock:
556 	mutex_unlock(&dev->struct_mutex);
557 
558 	return ret;
559 }
560 EXPORT_SYMBOL(drm_gem_mmap);
561