xref: /linux/drivers/gpu/drm/drm_gem.c (revision 5a0e3ad6af8660be21ca98a971cd00f331318c05)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include "drmP.h"
38 
39 /** @file drm_gem.c
40  *
41  * This file provides some of the base ioctls and library routines for
42  * the graphics memory manager implemented by each device driver.
43  *
44  * Because various devices have different requirements in terms of
45  * synchronization and migration strategies, implementing that is left up to
46  * the driver, and all that the general API provides should be generic --
47  * allocating objects, reading/writing data with the cpu, freeing objects.
48  * Even there, platform-dependent optimizations for reading/writing data with
49  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
50  * the DRI2 implementation wants to have at least allocate/mmap be generic.
51  *
52  * The goal was to have swap-backed object allocation managed through
53  * struct file.  However, file descriptors as handles to a struct file have
54  * two major failings:
55  * - Process limits prevent more than 1024 or so being used at a time by
56  *   default.
57  * - Inability to allocate high fds will aggravate the X Server's select()
58  *   handling, and likely that of many GL client applications as well.
59  *
60  * This led to a plan of using our own integer IDs (called handles, following
61  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
62  * ioctls.  The objects themselves will still include the struct file so
63  * that we can transition to fds if the required kernel infrastructure shows
64  * up at a later date, and as our interface with shmfs for memory allocation.
65  */
66 
67 /*
68  * We make up offsets for buffer objects so we can recognize them at
69  * mmap time.
70  */
71 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
72 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
73 
74 /**
75  * Initialize the GEM device fields
76  */
77 
78 int
79 drm_gem_init(struct drm_device *dev)
80 {
81 	struct drm_gem_mm *mm;
82 
83 	spin_lock_init(&dev->object_name_lock);
84 	idr_init(&dev->object_name_idr);
85 	atomic_set(&dev->object_count, 0);
86 	atomic_set(&dev->object_memory, 0);
87 	atomic_set(&dev->pin_count, 0);
88 	atomic_set(&dev->pin_memory, 0);
89 	atomic_set(&dev->gtt_count, 0);
90 	atomic_set(&dev->gtt_memory, 0);
91 
92 	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
93 	if (!mm) {
94 		DRM_ERROR("out of memory\n");
95 		return -ENOMEM;
96 	}
97 
98 	dev->mm_private = mm;
99 
100 	if (drm_ht_create(&mm->offset_hash, 19)) {
101 		kfree(mm);
102 		return -ENOMEM;
103 	}
104 
105 	if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
106 			DRM_FILE_PAGE_OFFSET_SIZE)) {
107 		drm_ht_remove(&mm->offset_hash);
108 		kfree(mm);
109 		return -ENOMEM;
110 	}
111 
112 	return 0;
113 }
114 
115 void
116 drm_gem_destroy(struct drm_device *dev)
117 {
118 	struct drm_gem_mm *mm = dev->mm_private;
119 
120 	drm_mm_takedown(&mm->offset_manager);
121 	drm_ht_remove(&mm->offset_hash);
122 	kfree(mm);
123 	dev->mm_private = NULL;
124 }
125 
126 /**
127  * Allocate a GEM object of the specified size with shmfs backing store
128  */
129 struct drm_gem_object *
130 drm_gem_object_alloc(struct drm_device *dev, size_t size)
131 {
132 	struct drm_gem_object *obj;
133 
134 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
135 
136 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
137 	if (!obj)
138 		goto free;
139 
140 	obj->dev = dev;
141 	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
142 	if (IS_ERR(obj->filp))
143 		goto free;
144 
145 	kref_init(&obj->refcount);
146 	kref_init(&obj->handlecount);
147 	obj->size = size;
148 	if (dev->driver->gem_init_object != NULL &&
149 	    dev->driver->gem_init_object(obj) != 0) {
150 		goto fput;
151 	}
152 	atomic_inc(&dev->object_count);
153 	atomic_add(obj->size, &dev->object_memory);
154 	return obj;
155 fput:
156 	fput(obj->filp);
157 free:
158 	kfree(obj);
159 	return NULL;
160 }
161 EXPORT_SYMBOL(drm_gem_object_alloc);
162 
163 /**
164  * Removes the mapping from handle to filp for this object.
165  */
166 static int
167 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
168 {
169 	struct drm_device *dev;
170 	struct drm_gem_object *obj;
171 
172 	/* This is gross. The idr system doesn't let us try a delete and
173 	 * return an error code.  It just spews if you fail at deleting.
174 	 * So, we have to grab a lock around finding the object and then
175 	 * doing the delete on it and dropping the refcount, or the user
176 	 * could race us to double-decrement the refcount and cause a
177 	 * use-after-free later.  Given the frequency of our handle lookups,
178 	 * we may want to use ida for number allocation and a hash table
179 	 * for the pointers, anyway.
180 	 */
181 	spin_lock(&filp->table_lock);
182 
183 	/* Check if we currently have a reference on the object */
184 	obj = idr_find(&filp->object_idr, handle);
185 	if (obj == NULL) {
186 		spin_unlock(&filp->table_lock);
187 		return -EINVAL;
188 	}
189 	dev = obj->dev;
190 
191 	/* Release reference and decrement refcount. */
192 	idr_remove(&filp->object_idr, handle);
193 	spin_unlock(&filp->table_lock);
194 
195 	drm_gem_object_handle_unreference_unlocked(obj);
196 
197 	return 0;
198 }
199 
200 /**
201  * Create a handle for this object. This adds a handle reference
202  * to the object, which includes a regular reference count. Callers
203  * will likely want to dereference the object afterwards.
204  */
205 int
206 drm_gem_handle_create(struct drm_file *file_priv,
207 		       struct drm_gem_object *obj,
208 		       u32 *handlep)
209 {
210 	int	ret;
211 
212 	/*
213 	 * Get the user-visible handle using idr.
214 	 */
215 again:
216 	/* ensure there is space available to allocate a handle */
217 	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
218 		return -ENOMEM;
219 
220 	/* do the allocation under our spinlock */
221 	spin_lock(&file_priv->table_lock);
222 	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
223 	spin_unlock(&file_priv->table_lock);
224 	if (ret == -EAGAIN)
225 		goto again;
226 
227 	if (ret != 0)
228 		return ret;
229 
230 	drm_gem_object_handle_reference(obj);
231 	return 0;
232 }
233 EXPORT_SYMBOL(drm_gem_handle_create);
234 
235 /** Returns a reference to the object named by the handle. */
236 struct drm_gem_object *
237 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
238 		      u32 handle)
239 {
240 	struct drm_gem_object *obj;
241 
242 	spin_lock(&filp->table_lock);
243 
244 	/* Check if we currently have a reference on the object */
245 	obj = idr_find(&filp->object_idr, handle);
246 	if (obj == NULL) {
247 		spin_unlock(&filp->table_lock);
248 		return NULL;
249 	}
250 
251 	drm_gem_object_reference(obj);
252 
253 	spin_unlock(&filp->table_lock);
254 
255 	return obj;
256 }
257 EXPORT_SYMBOL(drm_gem_object_lookup);
258 
259 /**
260  * Releases the handle to an mm object.
261  */
262 int
263 drm_gem_close_ioctl(struct drm_device *dev, void *data,
264 		    struct drm_file *file_priv)
265 {
266 	struct drm_gem_close *args = data;
267 	int ret;
268 
269 	if (!(dev->driver->driver_features & DRIVER_GEM))
270 		return -ENODEV;
271 
272 	ret = drm_gem_handle_delete(file_priv, args->handle);
273 
274 	return ret;
275 }
276 
277 /**
278  * Create a global name for an object, returning the name.
279  *
280  * Note that the name does not hold a reference; when the object
281  * is freed, the name goes away.
282  */
283 int
284 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
285 		    struct drm_file *file_priv)
286 {
287 	struct drm_gem_flink *args = data;
288 	struct drm_gem_object *obj;
289 	int ret;
290 
291 	if (!(dev->driver->driver_features & DRIVER_GEM))
292 		return -ENODEV;
293 
294 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
295 	if (obj == NULL)
296 		return -EBADF;
297 
298 again:
299 	if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
300 		ret = -ENOMEM;
301 		goto err;
302 	}
303 
304 	spin_lock(&dev->object_name_lock);
305 	if (!obj->name) {
306 		ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
307 					&obj->name);
308 		args->name = (uint64_t) obj->name;
309 		spin_unlock(&dev->object_name_lock);
310 
311 		if (ret == -EAGAIN)
312 			goto again;
313 
314 		if (ret != 0)
315 			goto err;
316 
317 		/* Allocate a reference for the name table.  */
318 		drm_gem_object_reference(obj);
319 	} else {
320 		args->name = (uint64_t) obj->name;
321 		spin_unlock(&dev->object_name_lock);
322 		ret = 0;
323 	}
324 
325 err:
326 	drm_gem_object_unreference_unlocked(obj);
327 	return ret;
328 }
329 
330 /**
331  * Open an object using the global name, returning a handle and the size.
332  *
333  * This handle (of course) holds a reference to the object, so the object
334  * will not go away until the handle is deleted.
335  */
336 int
337 drm_gem_open_ioctl(struct drm_device *dev, void *data,
338 		   struct drm_file *file_priv)
339 {
340 	struct drm_gem_open *args = data;
341 	struct drm_gem_object *obj;
342 	int ret;
343 	u32 handle;
344 
345 	if (!(dev->driver->driver_features & DRIVER_GEM))
346 		return -ENODEV;
347 
348 	spin_lock(&dev->object_name_lock);
349 	obj = idr_find(&dev->object_name_idr, (int) args->name);
350 	if (obj)
351 		drm_gem_object_reference(obj);
352 	spin_unlock(&dev->object_name_lock);
353 	if (!obj)
354 		return -ENOENT;
355 
356 	ret = drm_gem_handle_create(file_priv, obj, &handle);
357 	drm_gem_object_unreference_unlocked(obj);
358 	if (ret)
359 		return ret;
360 
361 	args->handle = handle;
362 	args->size = obj->size;
363 
364 	return 0;
365 }
366 
367 /**
368  * Called at device open time, sets up the structure for handling refcounting
369  * of mm objects.
370  */
371 void
372 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
373 {
374 	idr_init(&file_private->object_idr);
375 	spin_lock_init(&file_private->table_lock);
376 }
377 
378 /**
379  * Called at device close to release the file's
380  * handle references on objects.
381  */
382 static int
383 drm_gem_object_release_handle(int id, void *ptr, void *data)
384 {
385 	struct drm_gem_object *obj = ptr;
386 
387 	drm_gem_object_handle_unreference_unlocked(obj);
388 
389 	return 0;
390 }
391 
392 /**
393  * Called at close time when the filp is going away.
394  *
395  * Releases any remaining references on objects by this filp.
396  */
397 void
398 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
399 {
400 	idr_for_each(&file_private->object_idr,
401 		     &drm_gem_object_release_handle, NULL);
402 
403 	idr_destroy(&file_private->object_idr);
404 }
405 
406 static void
407 drm_gem_object_free_common(struct drm_gem_object *obj)
408 {
409 	struct drm_device *dev = obj->dev;
410 	fput(obj->filp);
411 	atomic_dec(&dev->object_count);
412 	atomic_sub(obj->size, &dev->object_memory);
413 	kfree(obj);
414 }
415 
416 /**
417  * Called after the last reference to the object has been lost.
418  * Must be called holding struct_ mutex
419  *
420  * Frees the object
421  */
422 void
423 drm_gem_object_free(struct kref *kref)
424 {
425 	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
426 	struct drm_device *dev = obj->dev;
427 
428 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
429 
430 	if (dev->driver->gem_free_object != NULL)
431 		dev->driver->gem_free_object(obj);
432 
433 	drm_gem_object_free_common(obj);
434 }
435 EXPORT_SYMBOL(drm_gem_object_free);
436 
437 /**
438  * Called after the last reference to the object has been lost.
439  * Must be called without holding struct_mutex
440  *
441  * Frees the object
442  */
443 void
444 drm_gem_object_free_unlocked(struct kref *kref)
445 {
446 	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
447 	struct drm_device *dev = obj->dev;
448 
449 	if (dev->driver->gem_free_object_unlocked != NULL)
450 		dev->driver->gem_free_object_unlocked(obj);
451 	else if (dev->driver->gem_free_object != NULL) {
452 		mutex_lock(&dev->struct_mutex);
453 		dev->driver->gem_free_object(obj);
454 		mutex_unlock(&dev->struct_mutex);
455 	}
456 
457 	drm_gem_object_free_common(obj);
458 }
459 EXPORT_SYMBOL(drm_gem_object_free_unlocked);
460 
461 static void drm_gem_object_ref_bug(struct kref *list_kref)
462 {
463 	BUG();
464 }
465 
466 /**
467  * Called after the last handle to the object has been closed
468  *
469  * Removes any name for the object. Note that this must be
470  * called before drm_gem_object_free or we'll be touching
471  * freed memory
472  */
473 void
474 drm_gem_object_handle_free(struct kref *kref)
475 {
476 	struct drm_gem_object *obj = container_of(kref,
477 						  struct drm_gem_object,
478 						  handlecount);
479 	struct drm_device *dev = obj->dev;
480 
481 	/* Remove any name for this object */
482 	spin_lock(&dev->object_name_lock);
483 	if (obj->name) {
484 		idr_remove(&dev->object_name_idr, obj->name);
485 		obj->name = 0;
486 		spin_unlock(&dev->object_name_lock);
487 		/*
488 		 * The object name held a reference to this object, drop
489 		 * that now.
490 		*
491 		* This cannot be the last reference, since the handle holds one too.
492 		 */
493 		kref_put(&obj->refcount, drm_gem_object_ref_bug);
494 	} else
495 		spin_unlock(&dev->object_name_lock);
496 
497 }
498 EXPORT_SYMBOL(drm_gem_object_handle_free);
499 
500 void drm_gem_vm_open(struct vm_area_struct *vma)
501 {
502 	struct drm_gem_object *obj = vma->vm_private_data;
503 
504 	drm_gem_object_reference(obj);
505 }
506 EXPORT_SYMBOL(drm_gem_vm_open);
507 
508 void drm_gem_vm_close(struct vm_area_struct *vma)
509 {
510 	struct drm_gem_object *obj = vma->vm_private_data;
511 
512 	drm_gem_object_unreference_unlocked(obj);
513 }
514 EXPORT_SYMBOL(drm_gem_vm_close);
515 
516 
517 /**
518  * drm_gem_mmap - memory map routine for GEM objects
519  * @filp: DRM file pointer
520  * @vma: VMA for the area to be mapped
521  *
522  * If a driver supports GEM object mapping, mmap calls on the DRM file
523  * descriptor will end up here.
524  *
525  * If we find the object based on the offset passed in (vma->vm_pgoff will
526  * contain the fake offset we created when the GTT map ioctl was called on
527  * the object), we set up the driver fault handler so that any accesses
528  * to the object can be trapped, to perform migration, GTT binding, surface
529  * register allocation, or performance monitoring.
530  */
531 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
532 {
533 	struct drm_file *priv = filp->private_data;
534 	struct drm_device *dev = priv->minor->dev;
535 	struct drm_gem_mm *mm = dev->mm_private;
536 	struct drm_local_map *map = NULL;
537 	struct drm_gem_object *obj;
538 	struct drm_hash_item *hash;
539 	int ret = 0;
540 
541 	mutex_lock(&dev->struct_mutex);
542 
543 	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
544 		mutex_unlock(&dev->struct_mutex);
545 		return drm_mmap(filp, vma);
546 	}
547 
548 	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
549 	if (!map ||
550 	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
551 		ret =  -EPERM;
552 		goto out_unlock;
553 	}
554 
555 	/* Check for valid size. */
556 	if (map->size < vma->vm_end - vma->vm_start) {
557 		ret = -EINVAL;
558 		goto out_unlock;
559 	}
560 
561 	obj = map->handle;
562 	if (!obj->dev->driver->gem_vm_ops) {
563 		ret = -EINVAL;
564 		goto out_unlock;
565 	}
566 
567 	vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
568 	vma->vm_ops = obj->dev->driver->gem_vm_ops;
569 	vma->vm_private_data = map->handle;
570 	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
571 
572 	/* Take a ref for this mapping of the object, so that the fault
573 	 * handler can dereference the mmap offset's pointer to the object.
574 	 * This reference is cleaned up by the corresponding vm_close
575 	 * (which should happen whether the vma was created by this call, or
576 	 * by a vm_open due to mremap or partial unmap or whatever).
577 	 */
578 	drm_gem_object_reference(obj);
579 
580 	vma->vm_file = filp;	/* Needed for drm_vm_open() */
581 	drm_vm_open_locked(vma);
582 
583 out_unlock:
584 	mutex_unlock(&dev->struct_mutex);
585 
586 	return ret;
587 }
588 EXPORT_SYMBOL(drm_gem_mmap);
589