xref: /linux/drivers/gpu/drm/drm_gem.c (revision b889fcf63cb62e7fdb7816565e28f44dbe4a76a5)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <drm/drmP.h>
40 
41 /** @file drm_gem.c
42  *
43  * This file provides some of the base ioctls and library routines for
44  * the graphics memory manager implemented by each device driver.
45  *
46  * Because various devices have different requirements in terms of
47  * synchronization and migration strategies, implementing that is left up to
48  * the driver, and all that the general API provides should be generic --
49  * allocating objects, reading/writing data with the cpu, freeing objects.
50  * Even there, platform-dependent optimizations for reading/writing data with
51  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
52  * the DRI2 implementation wants to have at least allocate/mmap be generic.
53  *
54  * The goal was to have swap-backed object allocation managed through
55  * struct file.  However, file descriptors as handles to a struct file have
56  * two major failings:
57  * - Process limits prevent more than 1024 or so being used at a time by
58  *   default.
59  * - Inability to allocate high fds will aggravate the X Server's select()
60  *   handling, and likely that of many GL client applications as well.
61  *
62  * This led to a plan of using our own integer IDs (called handles, following
63  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
64  * ioctls.  The objects themselves will still include the struct file so
65  * that we can transition to fds if the required kernel infrastructure shows
66  * up at a later date, and as our interface with shmfs for memory allocation.
67  */
68 
69 /*
70  * We make up offsets for buffer objects so we can recognize them at
71  * mmap time.
72  */
73 
74 /* pgoff in mmap is an unsigned long, so we need to make sure that
75  * the faked up offset will fit
76  */
77 
78 #if BITS_PER_LONG == 64
79 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
80 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
81 #else
82 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
83 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
84 #endif
85 
86 /**
87  * Initialize the GEM device fields
88  */
89 
90 int
91 drm_gem_init(struct drm_device *dev)
92 {
93 	struct drm_gem_mm *mm;
94 
95 	spin_lock_init(&dev->object_name_lock);
96 	idr_init(&dev->object_name_idr);
97 
98 	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
99 	if (!mm) {
100 		DRM_ERROR("out of memory\n");
101 		return -ENOMEM;
102 	}
103 
104 	dev->mm_private = mm;
105 
106 	if (drm_ht_create(&mm->offset_hash, 12)) {
107 		kfree(mm);
108 		return -ENOMEM;
109 	}
110 
111 	if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
112 			DRM_FILE_PAGE_OFFSET_SIZE)) {
113 		drm_ht_remove(&mm->offset_hash);
114 		kfree(mm);
115 		return -ENOMEM;
116 	}
117 
118 	return 0;
119 }
120 
121 void
122 drm_gem_destroy(struct drm_device *dev)
123 {
124 	struct drm_gem_mm *mm = dev->mm_private;
125 
126 	drm_mm_takedown(&mm->offset_manager);
127 	drm_ht_remove(&mm->offset_hash);
128 	kfree(mm);
129 	dev->mm_private = NULL;
130 }
131 
132 /**
133  * Initialize an already allocated GEM object of the specified size with
134  * shmfs backing store.
135  */
136 int drm_gem_object_init(struct drm_device *dev,
137 			struct drm_gem_object *obj, size_t size)
138 {
139 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
140 
141 	obj->dev = dev;
142 	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
143 	if (IS_ERR(obj->filp))
144 		return PTR_ERR(obj->filp);
145 
146 	kref_init(&obj->refcount);
147 	atomic_set(&obj->handle_count, 0);
148 	obj->size = size;
149 
150 	return 0;
151 }
152 EXPORT_SYMBOL(drm_gem_object_init);
153 
154 /**
155  * Initialize an already allocated GEM object of the specified size with
156  * no GEM provided backing store. Instead the caller is responsible for
157  * backing the object and handling it.
158  */
159 int drm_gem_private_object_init(struct drm_device *dev,
160 			struct drm_gem_object *obj, size_t size)
161 {
162 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
163 
164 	obj->dev = dev;
165 	obj->filp = NULL;
166 
167 	kref_init(&obj->refcount);
168 	atomic_set(&obj->handle_count, 0);
169 	obj->size = size;
170 
171 	return 0;
172 }
173 EXPORT_SYMBOL(drm_gem_private_object_init);
174 
175 /**
176  * Allocate a GEM object of the specified size with shmfs backing store
177  */
178 struct drm_gem_object *
179 drm_gem_object_alloc(struct drm_device *dev, size_t size)
180 {
181 	struct drm_gem_object *obj;
182 
183 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
184 	if (!obj)
185 		goto free;
186 
187 	if (drm_gem_object_init(dev, obj, size) != 0)
188 		goto free;
189 
190 	if (dev->driver->gem_init_object != NULL &&
191 	    dev->driver->gem_init_object(obj) != 0) {
192 		goto fput;
193 	}
194 	return obj;
195 fput:
196 	/* Object_init mangles the global counters - readjust them. */
197 	fput(obj->filp);
198 free:
199 	kfree(obj);
200 	return NULL;
201 }
202 EXPORT_SYMBOL(drm_gem_object_alloc);
203 
204 static void
205 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
206 {
207 	if (obj->import_attach) {
208 		drm_prime_remove_imported_buf_handle(&filp->prime,
209 				obj->import_attach->dmabuf);
210 	}
211 	if (obj->export_dma_buf) {
212 		drm_prime_remove_imported_buf_handle(&filp->prime,
213 				obj->export_dma_buf);
214 	}
215 }
216 
217 /**
218  * Removes the mapping from handle to filp for this object.
219  */
220 int
221 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
222 {
223 	struct drm_device *dev;
224 	struct drm_gem_object *obj;
225 
226 	/* This is gross. The idr system doesn't let us try a delete and
227 	 * return an error code.  It just spews if you fail at deleting.
228 	 * So, we have to grab a lock around finding the object and then
229 	 * doing the delete on it and dropping the refcount, or the user
230 	 * could race us to double-decrement the refcount and cause a
231 	 * use-after-free later.  Given the frequency of our handle lookups,
232 	 * we may want to use ida for number allocation and a hash table
233 	 * for the pointers, anyway.
234 	 */
235 	spin_lock(&filp->table_lock);
236 
237 	/* Check if we currently have a reference on the object */
238 	obj = idr_find(&filp->object_idr, handle);
239 	if (obj == NULL) {
240 		spin_unlock(&filp->table_lock);
241 		return -EINVAL;
242 	}
243 	dev = obj->dev;
244 
245 	/* Release reference and decrement refcount. */
246 	idr_remove(&filp->object_idr, handle);
247 	spin_unlock(&filp->table_lock);
248 
249 	drm_gem_remove_prime_handles(obj, filp);
250 
251 	if (dev->driver->gem_close_object)
252 		dev->driver->gem_close_object(obj, filp);
253 	drm_gem_object_handle_unreference_unlocked(obj);
254 
255 	return 0;
256 }
257 EXPORT_SYMBOL(drm_gem_handle_delete);
258 
259 /**
260  * Create a handle for this object. This adds a handle reference
261  * to the object, which includes a regular reference count. Callers
262  * will likely want to dereference the object afterwards.
263  */
264 int
265 drm_gem_handle_create(struct drm_file *file_priv,
266 		       struct drm_gem_object *obj,
267 		       u32 *handlep)
268 {
269 	struct drm_device *dev = obj->dev;
270 	int ret;
271 
272 	/*
273 	 * Get the user-visible handle using idr.
274 	 */
275 again:
276 	/* ensure there is space available to allocate a handle */
277 	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
278 		return -ENOMEM;
279 
280 	/* do the allocation under our spinlock */
281 	spin_lock(&file_priv->table_lock);
282 	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
283 	spin_unlock(&file_priv->table_lock);
284 	if (ret == -EAGAIN)
285 		goto again;
286 	else if (ret)
287 		return ret;
288 
289 	drm_gem_object_handle_reference(obj);
290 
291 	if (dev->driver->gem_open_object) {
292 		ret = dev->driver->gem_open_object(obj, file_priv);
293 		if (ret) {
294 			drm_gem_handle_delete(file_priv, *handlep);
295 			return ret;
296 		}
297 	}
298 
299 	return 0;
300 }
301 EXPORT_SYMBOL(drm_gem_handle_create);
302 
303 
304 /**
305  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
306  * @obj: obj in question
307  *
308  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
309  */
310 void
311 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
312 {
313 	struct drm_device *dev = obj->dev;
314 	struct drm_gem_mm *mm = dev->mm_private;
315 	struct drm_map_list *list = &obj->map_list;
316 
317 	drm_ht_remove_item(&mm->offset_hash, &list->hash);
318 	drm_mm_put_block(list->file_offset_node);
319 	kfree(list->map);
320 	list->map = NULL;
321 }
322 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
323 
324 /**
325  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
326  * @obj: obj in question
327  *
328  * GEM memory mapping works by handing back to userspace a fake mmap offset
329  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
330  * up the object based on the offset and sets up the various memory mapping
331  * structures.
332  *
333  * This routine allocates and attaches a fake offset for @obj.
334  */
335 int
336 drm_gem_create_mmap_offset(struct drm_gem_object *obj)
337 {
338 	struct drm_device *dev = obj->dev;
339 	struct drm_gem_mm *mm = dev->mm_private;
340 	struct drm_map_list *list;
341 	struct drm_local_map *map;
342 	int ret;
343 
344 	/* Set the object up for mmap'ing */
345 	list = &obj->map_list;
346 	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
347 	if (!list->map)
348 		return -ENOMEM;
349 
350 	map = list->map;
351 	map->type = _DRM_GEM;
352 	map->size = obj->size;
353 	map->handle = obj;
354 
355 	/* Get a DRM GEM mmap offset allocated... */
356 	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
357 			obj->size / PAGE_SIZE, 0, false);
358 
359 	if (!list->file_offset_node) {
360 		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
361 		ret = -ENOSPC;
362 		goto out_free_list;
363 	}
364 
365 	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
366 			obj->size / PAGE_SIZE, 0);
367 	if (!list->file_offset_node) {
368 		ret = -ENOMEM;
369 		goto out_free_list;
370 	}
371 
372 	list->hash.key = list->file_offset_node->start;
373 	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
374 	if (ret) {
375 		DRM_ERROR("failed to add to map hash\n");
376 		goto out_free_mm;
377 	}
378 
379 	return 0;
380 
381 out_free_mm:
382 	drm_mm_put_block(list->file_offset_node);
383 out_free_list:
384 	kfree(list->map);
385 	list->map = NULL;
386 
387 	return ret;
388 }
389 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
390 
391 /** Returns a reference to the object named by the handle. */
392 struct drm_gem_object *
393 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
394 		      u32 handle)
395 {
396 	struct drm_gem_object *obj;
397 
398 	spin_lock(&filp->table_lock);
399 
400 	/* Check if we currently have a reference on the object */
401 	obj = idr_find(&filp->object_idr, handle);
402 	if (obj == NULL) {
403 		spin_unlock(&filp->table_lock);
404 		return NULL;
405 	}
406 
407 	drm_gem_object_reference(obj);
408 
409 	spin_unlock(&filp->table_lock);
410 
411 	return obj;
412 }
413 EXPORT_SYMBOL(drm_gem_object_lookup);
414 
415 /**
416  * Releases the handle to an mm object.
417  */
418 int
419 drm_gem_close_ioctl(struct drm_device *dev, void *data,
420 		    struct drm_file *file_priv)
421 {
422 	struct drm_gem_close *args = data;
423 	int ret;
424 
425 	if (!(dev->driver->driver_features & DRIVER_GEM))
426 		return -ENODEV;
427 
428 	ret = drm_gem_handle_delete(file_priv, args->handle);
429 
430 	return ret;
431 }
432 
433 /**
434  * Create a global name for an object, returning the name.
435  *
436  * Note that the name does not hold a reference; when the object
437  * is freed, the name goes away.
438  */
439 int
440 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
441 		    struct drm_file *file_priv)
442 {
443 	struct drm_gem_flink *args = data;
444 	struct drm_gem_object *obj;
445 	int ret;
446 
447 	if (!(dev->driver->driver_features & DRIVER_GEM))
448 		return -ENODEV;
449 
450 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
451 	if (obj == NULL)
452 		return -ENOENT;
453 
454 again:
455 	if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
456 		ret = -ENOMEM;
457 		goto err;
458 	}
459 
460 	spin_lock(&dev->object_name_lock);
461 	if (!obj->name) {
462 		ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
463 					&obj->name);
464 		args->name = (uint64_t) obj->name;
465 		spin_unlock(&dev->object_name_lock);
466 
467 		if (ret == -EAGAIN)
468 			goto again;
469 		else if (ret)
470 			goto err;
471 
472 		/* Allocate a reference for the name table.  */
473 		drm_gem_object_reference(obj);
474 	} else {
475 		args->name = (uint64_t) obj->name;
476 		spin_unlock(&dev->object_name_lock);
477 		ret = 0;
478 	}
479 
480 err:
481 	drm_gem_object_unreference_unlocked(obj);
482 	return ret;
483 }
484 
485 /**
486  * Open an object using the global name, returning a handle and the size.
487  *
488  * This handle (of course) holds a reference to the object, so the object
489  * will not go away until the handle is deleted.
490  */
491 int
492 drm_gem_open_ioctl(struct drm_device *dev, void *data,
493 		   struct drm_file *file_priv)
494 {
495 	struct drm_gem_open *args = data;
496 	struct drm_gem_object *obj;
497 	int ret;
498 	u32 handle;
499 
500 	if (!(dev->driver->driver_features & DRIVER_GEM))
501 		return -ENODEV;
502 
503 	spin_lock(&dev->object_name_lock);
504 	obj = idr_find(&dev->object_name_idr, (int) args->name);
505 	if (obj)
506 		drm_gem_object_reference(obj);
507 	spin_unlock(&dev->object_name_lock);
508 	if (!obj)
509 		return -ENOENT;
510 
511 	ret = drm_gem_handle_create(file_priv, obj, &handle);
512 	drm_gem_object_unreference_unlocked(obj);
513 	if (ret)
514 		return ret;
515 
516 	args->handle = handle;
517 	args->size = obj->size;
518 
519 	return 0;
520 }
521 
522 /**
523  * Called at device open time, sets up the structure for handling refcounting
524  * of mm objects.
525  */
526 void
527 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
528 {
529 	idr_init(&file_private->object_idr);
530 	spin_lock_init(&file_private->table_lock);
531 }
532 
533 /**
534  * Called at device close to release the file's
535  * handle references on objects.
536  */
537 static int
538 drm_gem_object_release_handle(int id, void *ptr, void *data)
539 {
540 	struct drm_file *file_priv = data;
541 	struct drm_gem_object *obj = ptr;
542 	struct drm_device *dev = obj->dev;
543 
544 	drm_gem_remove_prime_handles(obj, file_priv);
545 
546 	if (dev->driver->gem_close_object)
547 		dev->driver->gem_close_object(obj, file_priv);
548 
549 	drm_gem_object_handle_unreference_unlocked(obj);
550 
551 	return 0;
552 }
553 
554 /**
555  * Called at close time when the filp is going away.
556  *
557  * Releases any remaining references on objects by this filp.
558  */
559 void
560 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
561 {
562 	idr_for_each(&file_private->object_idr,
563 		     &drm_gem_object_release_handle, file_private);
564 
565 	idr_remove_all(&file_private->object_idr);
566 	idr_destroy(&file_private->object_idr);
567 }
568 
569 void
570 drm_gem_object_release(struct drm_gem_object *obj)
571 {
572 	if (obj->filp)
573 	    fput(obj->filp);
574 }
575 EXPORT_SYMBOL(drm_gem_object_release);
576 
577 /**
578  * Called after the last reference to the object has been lost.
579  * Must be called holding struct_ mutex
580  *
581  * Frees the object
582  */
583 void
584 drm_gem_object_free(struct kref *kref)
585 {
586 	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
587 	struct drm_device *dev = obj->dev;
588 
589 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
590 
591 	if (dev->driver->gem_free_object != NULL)
592 		dev->driver->gem_free_object(obj);
593 }
594 EXPORT_SYMBOL(drm_gem_object_free);
595 
596 static void drm_gem_object_ref_bug(struct kref *list_kref)
597 {
598 	BUG();
599 }
600 
601 /**
602  * Called after the last handle to the object has been closed
603  *
604  * Removes any name for the object. Note that this must be
605  * called before drm_gem_object_free or we'll be touching
606  * freed memory
607  */
608 void drm_gem_object_handle_free(struct drm_gem_object *obj)
609 {
610 	struct drm_device *dev = obj->dev;
611 
612 	/* Remove any name for this object */
613 	spin_lock(&dev->object_name_lock);
614 	if (obj->name) {
615 		idr_remove(&dev->object_name_idr, obj->name);
616 		obj->name = 0;
617 		spin_unlock(&dev->object_name_lock);
618 		/*
619 		 * The object name held a reference to this object, drop
620 		 * that now.
621 		*
622 		* This cannot be the last reference, since the handle holds one too.
623 		 */
624 		kref_put(&obj->refcount, drm_gem_object_ref_bug);
625 	} else
626 		spin_unlock(&dev->object_name_lock);
627 
628 }
629 EXPORT_SYMBOL(drm_gem_object_handle_free);
630 
631 void drm_gem_vm_open(struct vm_area_struct *vma)
632 {
633 	struct drm_gem_object *obj = vma->vm_private_data;
634 
635 	drm_gem_object_reference(obj);
636 
637 	mutex_lock(&obj->dev->struct_mutex);
638 	drm_vm_open_locked(obj->dev, vma);
639 	mutex_unlock(&obj->dev->struct_mutex);
640 }
641 EXPORT_SYMBOL(drm_gem_vm_open);
642 
643 void drm_gem_vm_close(struct vm_area_struct *vma)
644 {
645 	struct drm_gem_object *obj = vma->vm_private_data;
646 	struct drm_device *dev = obj->dev;
647 
648 	mutex_lock(&dev->struct_mutex);
649 	drm_vm_close_locked(obj->dev, vma);
650 	drm_gem_object_unreference(obj);
651 	mutex_unlock(&dev->struct_mutex);
652 }
653 EXPORT_SYMBOL(drm_gem_vm_close);
654 
655 
656 /**
657  * drm_gem_mmap - memory map routine for GEM objects
658  * @filp: DRM file pointer
659  * @vma: VMA for the area to be mapped
660  *
661  * If a driver supports GEM object mapping, mmap calls on the DRM file
662  * descriptor will end up here.
663  *
664  * If we find the object based on the offset passed in (vma->vm_pgoff will
665  * contain the fake offset we created when the GTT map ioctl was called on
666  * the object), we set up the driver fault handler so that any accesses
667  * to the object can be trapped, to perform migration, GTT binding, surface
668  * register allocation, or performance monitoring.
669  */
670 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
671 {
672 	struct drm_file *priv = filp->private_data;
673 	struct drm_device *dev = priv->minor->dev;
674 	struct drm_gem_mm *mm = dev->mm_private;
675 	struct drm_local_map *map = NULL;
676 	struct drm_gem_object *obj;
677 	struct drm_hash_item *hash;
678 	int ret = 0;
679 
680 	if (drm_device_is_unplugged(dev))
681 		return -ENODEV;
682 
683 	mutex_lock(&dev->struct_mutex);
684 
685 	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
686 		mutex_unlock(&dev->struct_mutex);
687 		return drm_mmap(filp, vma);
688 	}
689 
690 	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
691 	if (!map ||
692 	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
693 		ret =  -EPERM;
694 		goto out_unlock;
695 	}
696 
697 	/* Check for valid size. */
698 	if (map->size < vma->vm_end - vma->vm_start) {
699 		ret = -EINVAL;
700 		goto out_unlock;
701 	}
702 
703 	obj = map->handle;
704 	if (!obj->dev->driver->gem_vm_ops) {
705 		ret = -EINVAL;
706 		goto out_unlock;
707 	}
708 
709 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
710 	vma->vm_ops = obj->dev->driver->gem_vm_ops;
711 	vma->vm_private_data = map->handle;
712 	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
713 
714 	/* Take a ref for this mapping of the object, so that the fault
715 	 * handler can dereference the mmap offset's pointer to the object.
716 	 * This reference is cleaned up by the corresponding vm_close
717 	 * (which should happen whether the vma was created by this call, or
718 	 * by a vm_open due to mremap or partial unmap or whatever).
719 	 */
720 	drm_gem_object_reference(obj);
721 
722 	drm_vm_open_locked(dev, vma);
723 
724 out_unlock:
725 	mutex_unlock(&dev->struct_mutex);
726 
727 	return ret;
728 }
729 EXPORT_SYMBOL(drm_gem_mmap);
730