xref: /linux/drivers/gpu/drm/exynos/exynos_drm_gem.c (revision f49f4ab95c301dbccad0efe85296d908b8ae7ad4)
1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include <drm/drmP.h>
27 
28 #include <linux/shmem_fs.h>
29 #include <drm/exynos_drm.h>
30 
31 #include "exynos_drm_drv.h"
32 #include "exynos_drm_gem.h"
33 #include "exynos_drm_buf.h"
34 
35 static unsigned int convert_to_vm_err_msg(int msg)
36 {
37 	unsigned int out_msg;
38 
39 	switch (msg) {
40 	case 0:
41 	case -ERESTARTSYS:
42 	case -EINTR:
43 		out_msg = VM_FAULT_NOPAGE;
44 		break;
45 
46 	case -ENOMEM:
47 		out_msg = VM_FAULT_OOM;
48 		break;
49 
50 	default:
51 		out_msg = VM_FAULT_SIGBUS;
52 		break;
53 	}
54 
55 	return out_msg;
56 }
57 
58 static int check_gem_flags(unsigned int flags)
59 {
60 	if (flags & ~(EXYNOS_BO_MASK)) {
61 		DRM_ERROR("invalid flags.\n");
62 		return -EINVAL;
63 	}
64 
65 	return 0;
66 }
67 
68 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
69 					struct vm_area_struct *vma)
70 {
71 	DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
72 
73 	/* non-cachable as default. */
74 	if (obj->flags & EXYNOS_BO_CACHABLE)
75 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
76 	else if (obj->flags & EXYNOS_BO_WC)
77 		vma->vm_page_prot =
78 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
79 	else
80 		vma->vm_page_prot =
81 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
82 }
83 
84 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
85 {
86 	if (!IS_NONCONTIG_BUFFER(flags)) {
87 		if (size >= SZ_1M)
88 			return roundup(size, SECTION_SIZE);
89 		else if (size >= SZ_64K)
90 			return roundup(size, SZ_64K);
91 		else
92 			goto out;
93 	}
94 out:
95 	return roundup(size, PAGE_SIZE);
96 }
97 
98 struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
99 						gfp_t gfpmask)
100 {
101 	struct page *p, **pages;
102 	int i, npages;
103 
104 	npages = obj->size >> PAGE_SHIFT;
105 
106 	pages = drm_malloc_ab(npages, sizeof(struct page *));
107 	if (pages == NULL)
108 		return ERR_PTR(-ENOMEM);
109 
110 	for (i = 0; i < npages; i++) {
111 		p = alloc_page(gfpmask);
112 		if (IS_ERR(p))
113 			goto fail;
114 		pages[i] = p;
115 	}
116 
117 	return pages;
118 
119 fail:
120 	while (--i)
121 		__free_page(pages[i]);
122 
123 	drm_free_large(pages);
124 	return ERR_CAST(p);
125 }
126 
127 static void exynos_gem_put_pages(struct drm_gem_object *obj,
128 					struct page **pages)
129 {
130 	int npages;
131 
132 	npages = obj->size >> PAGE_SHIFT;
133 
134 	while (--npages >= 0)
135 		__free_page(pages[npages]);
136 
137 	drm_free_large(pages);
138 }
139 
140 static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
141 					struct vm_area_struct *vma,
142 					unsigned long f_vaddr,
143 					pgoff_t page_offset)
144 {
145 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
146 	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
147 	unsigned long pfn;
148 
149 	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
150 		if (!buf->pages)
151 			return -EINTR;
152 
153 		pfn = page_to_pfn(buf->pages[page_offset++]);
154 	} else
155 		pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
156 
157 	return vm_insert_mixed(vma, f_vaddr, pfn);
158 }
159 
160 static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
161 {
162 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
163 	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
164 	struct scatterlist *sgl;
165 	struct page **pages;
166 	unsigned int npages, i = 0;
167 	int ret;
168 
169 	if (buf->pages) {
170 		DRM_DEBUG_KMS("already allocated.\n");
171 		return -EINVAL;
172 	}
173 
174 	pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
175 	if (IS_ERR(pages)) {
176 		DRM_ERROR("failed to get pages.\n");
177 		return PTR_ERR(pages);
178 	}
179 
180 	npages = obj->size >> PAGE_SHIFT;
181 	buf->page_size = PAGE_SIZE;
182 
183 	buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
184 	if (!buf->sgt) {
185 		DRM_ERROR("failed to allocate sg table.\n");
186 		ret = -ENOMEM;
187 		goto err;
188 	}
189 
190 	ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
191 	if (ret < 0) {
192 		DRM_ERROR("failed to initialize sg table.\n");
193 		ret = -EFAULT;
194 		goto err1;
195 	}
196 
197 	sgl = buf->sgt->sgl;
198 
199 	/* set all pages to sg list. */
200 	while (i < npages) {
201 		sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
202 		sg_dma_address(sgl) = page_to_phys(pages[i]);
203 		i++;
204 		sgl = sg_next(sgl);
205 	}
206 
207 	/* add some codes for UNCACHED type here. TODO */
208 
209 	buf->pages = pages;
210 	return ret;
211 err1:
212 	kfree(buf->sgt);
213 	buf->sgt = NULL;
214 err:
215 	exynos_gem_put_pages(obj, pages);
216 	return ret;
217 
218 }
219 
220 static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
221 {
222 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
223 	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
224 
225 	/*
226 	 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
227 	 * allocated at gem fault handler.
228 	 */
229 	sg_free_table(buf->sgt);
230 	kfree(buf->sgt);
231 	buf->sgt = NULL;
232 
233 	exynos_gem_put_pages(obj, buf->pages);
234 	buf->pages = NULL;
235 
236 	/* add some codes for UNCACHED type here. TODO */
237 }
238 
239 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
240 					struct drm_file *file_priv,
241 					unsigned int *handle)
242 {
243 	int ret;
244 
245 	/*
246 	 * allocate a id of idr table where the obj is registered
247 	 * and handle has the id what user can see.
248 	 */
249 	ret = drm_gem_handle_create(file_priv, obj, handle);
250 	if (ret)
251 		return ret;
252 
253 	DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
254 
255 	/* drop reference from allocate - handle holds it now. */
256 	drm_gem_object_unreference_unlocked(obj);
257 
258 	return 0;
259 }
260 
261 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
262 {
263 	struct drm_gem_object *obj;
264 	struct exynos_drm_gem_buf *buf;
265 
266 	DRM_DEBUG_KMS("%s\n", __FILE__);
267 
268 	obj = &exynos_gem_obj->base;
269 	buf = exynos_gem_obj->buffer;
270 
271 	DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
272 
273 	if (!buf->pages)
274 		return;
275 
276 	/*
277 	 * do not release memory region from exporter.
278 	 *
279 	 * the region will be released by exporter
280 	 * once dmabuf's refcount becomes 0.
281 	 */
282 	if (obj->import_attach)
283 		goto out;
284 
285 	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
286 		exynos_drm_gem_put_pages(obj);
287 	else
288 		exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
289 
290 out:
291 	exynos_drm_fini_buf(obj->dev, buf);
292 	exynos_gem_obj->buffer = NULL;
293 
294 	if (obj->map_list.map)
295 		drm_gem_free_mmap_offset(obj);
296 
297 	/* release file pointer to gem object. */
298 	drm_gem_object_release(obj);
299 
300 	kfree(exynos_gem_obj);
301 	exynos_gem_obj = NULL;
302 }
303 
304 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
305 						      unsigned long size)
306 {
307 	struct exynos_drm_gem_obj *exynos_gem_obj;
308 	struct drm_gem_object *obj;
309 	int ret;
310 
311 	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
312 	if (!exynos_gem_obj) {
313 		DRM_ERROR("failed to allocate exynos gem object\n");
314 		return NULL;
315 	}
316 
317 	exynos_gem_obj->size = size;
318 	obj = &exynos_gem_obj->base;
319 
320 	ret = drm_gem_object_init(dev, obj, size);
321 	if (ret < 0) {
322 		DRM_ERROR("failed to initialize gem object\n");
323 		kfree(exynos_gem_obj);
324 		return NULL;
325 	}
326 
327 	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
328 
329 	return exynos_gem_obj;
330 }
331 
332 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
333 						unsigned int flags,
334 						unsigned long size)
335 {
336 	struct exynos_drm_gem_obj *exynos_gem_obj;
337 	struct exynos_drm_gem_buf *buf;
338 	int ret;
339 
340 	if (!size) {
341 		DRM_ERROR("invalid size.\n");
342 		return ERR_PTR(-EINVAL);
343 	}
344 
345 	size = roundup_gem_size(size, flags);
346 	DRM_DEBUG_KMS("%s\n", __FILE__);
347 
348 	ret = check_gem_flags(flags);
349 	if (ret)
350 		return ERR_PTR(ret);
351 
352 	buf = exynos_drm_init_buf(dev, size);
353 	if (!buf)
354 		return ERR_PTR(-ENOMEM);
355 
356 	exynos_gem_obj = exynos_drm_gem_init(dev, size);
357 	if (!exynos_gem_obj) {
358 		ret = -ENOMEM;
359 		goto err_fini_buf;
360 	}
361 
362 	exynos_gem_obj->buffer = buf;
363 
364 	/* set memory type and cache attribute from user side. */
365 	exynos_gem_obj->flags = flags;
366 
367 	/*
368 	 * allocate all pages as desired size if user wants to allocate
369 	 * physically non-continuous memory.
370 	 */
371 	if (flags & EXYNOS_BO_NONCONTIG) {
372 		ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
373 		if (ret < 0) {
374 			drm_gem_object_release(&exynos_gem_obj->base);
375 			goto err_fini_buf;
376 		}
377 	} else {
378 		ret = exynos_drm_alloc_buf(dev, buf, flags);
379 		if (ret < 0) {
380 			drm_gem_object_release(&exynos_gem_obj->base);
381 			goto err_fini_buf;
382 		}
383 	}
384 
385 	return exynos_gem_obj;
386 
387 err_fini_buf:
388 	exynos_drm_fini_buf(dev, buf);
389 	return ERR_PTR(ret);
390 }
391 
392 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
393 				struct drm_file *file_priv)
394 {
395 	struct drm_exynos_gem_create *args = data;
396 	struct exynos_drm_gem_obj *exynos_gem_obj;
397 	int ret;
398 
399 	DRM_DEBUG_KMS("%s\n", __FILE__);
400 
401 	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
402 	if (IS_ERR(exynos_gem_obj))
403 		return PTR_ERR(exynos_gem_obj);
404 
405 	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
406 			&args->handle);
407 	if (ret) {
408 		exynos_drm_gem_destroy(exynos_gem_obj);
409 		return ret;
410 	}
411 
412 	return 0;
413 }
414 
415 void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
416 					unsigned int gem_handle,
417 					struct drm_file *file_priv)
418 {
419 	struct exynos_drm_gem_obj *exynos_gem_obj;
420 	struct drm_gem_object *obj;
421 
422 	obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
423 	if (!obj) {
424 		DRM_ERROR("failed to lookup gem object.\n");
425 		return ERR_PTR(-EINVAL);
426 	}
427 
428 	exynos_gem_obj = to_exynos_gem_obj(obj);
429 
430 	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
431 		DRM_DEBUG_KMS("not support NONCONTIG type.\n");
432 		drm_gem_object_unreference_unlocked(obj);
433 
434 		/* TODO */
435 		return ERR_PTR(-EINVAL);
436 	}
437 
438 	return &exynos_gem_obj->buffer->dma_addr;
439 }
440 
441 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
442 					unsigned int gem_handle,
443 					struct drm_file *file_priv)
444 {
445 	struct exynos_drm_gem_obj *exynos_gem_obj;
446 	struct drm_gem_object *obj;
447 
448 	obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
449 	if (!obj) {
450 		DRM_ERROR("failed to lookup gem object.\n");
451 		return;
452 	}
453 
454 	exynos_gem_obj = to_exynos_gem_obj(obj);
455 
456 	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
457 		DRM_DEBUG_KMS("not support NONCONTIG type.\n");
458 		drm_gem_object_unreference_unlocked(obj);
459 
460 		/* TODO */
461 		return;
462 	}
463 
464 	drm_gem_object_unreference_unlocked(obj);
465 
466 	/*
467 	 * decrease obj->refcount one more time because we has already
468 	 * increased it at exynos_drm_gem_get_dma_addr().
469 	 */
470 	drm_gem_object_unreference_unlocked(obj);
471 }
472 
473 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
474 				    struct drm_file *file_priv)
475 {
476 	struct drm_exynos_gem_map_off *args = data;
477 
478 	DRM_DEBUG_KMS("%s\n", __FILE__);
479 
480 	DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
481 			args->handle, (unsigned long)args->offset);
482 
483 	if (!(dev->driver->driver_features & DRIVER_GEM)) {
484 		DRM_ERROR("does not support GEM.\n");
485 		return -ENODEV;
486 	}
487 
488 	return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
489 			&args->offset);
490 }
491 
492 static int exynos_drm_gem_mmap_buffer(struct file *filp,
493 				      struct vm_area_struct *vma)
494 {
495 	struct drm_gem_object *obj = filp->private_data;
496 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
497 	struct exynos_drm_gem_buf *buffer;
498 	unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
499 	int ret;
500 
501 	DRM_DEBUG_KMS("%s\n", __FILE__);
502 
503 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
504 
505 	update_vm_cache_attr(exynos_gem_obj, vma);
506 
507 	vm_size = usize = vma->vm_end - vma->vm_start;
508 
509 	/*
510 	 * a buffer contains information to physically continuous memory
511 	 * allocated by user request or at framebuffer creation.
512 	 */
513 	buffer = exynos_gem_obj->buffer;
514 
515 	/* check if user-requested size is valid. */
516 	if (vm_size > buffer->size)
517 		return -EINVAL;
518 
519 	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
520 		int i = 0;
521 
522 		if (!buffer->pages)
523 			return -EINVAL;
524 
525 		vma->vm_flags |= VM_MIXEDMAP;
526 
527 		do {
528 			ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
529 			if (ret) {
530 				DRM_ERROR("failed to remap user space.\n");
531 				return ret;
532 			}
533 
534 			uaddr += PAGE_SIZE;
535 			usize -= PAGE_SIZE;
536 		} while (usize > 0);
537 	} else {
538 		/*
539 		 * get page frame number to physical memory to be mapped
540 		 * to user space.
541 		 */
542 		pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
543 								PAGE_SHIFT;
544 
545 		DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
546 
547 		if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
548 					vma->vm_page_prot)) {
549 			DRM_ERROR("failed to remap pfn range.\n");
550 			return -EAGAIN;
551 		}
552 	}
553 
554 	return 0;
555 }
556 
557 static const struct file_operations exynos_drm_gem_fops = {
558 	.mmap = exynos_drm_gem_mmap_buffer,
559 };
560 
561 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
562 			      struct drm_file *file_priv)
563 {
564 	struct drm_exynos_gem_mmap *args = data;
565 	struct drm_gem_object *obj;
566 	unsigned int addr;
567 
568 	DRM_DEBUG_KMS("%s\n", __FILE__);
569 
570 	if (!(dev->driver->driver_features & DRIVER_GEM)) {
571 		DRM_ERROR("does not support GEM.\n");
572 		return -ENODEV;
573 	}
574 
575 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
576 	if (!obj) {
577 		DRM_ERROR("failed to lookup gem object.\n");
578 		return -EINVAL;
579 	}
580 
581 	obj->filp->f_op = &exynos_drm_gem_fops;
582 	obj->filp->private_data = obj;
583 
584 	addr = vm_mmap(obj->filp, 0, args->size,
585 			PROT_READ | PROT_WRITE, MAP_SHARED, 0);
586 
587 	drm_gem_object_unreference_unlocked(obj);
588 
589 	if (IS_ERR((void *)addr))
590 		return PTR_ERR((void *)addr);
591 
592 	args->mapped = addr;
593 
594 	DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
595 
596 	return 0;
597 }
598 
599 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
600 				      struct drm_file *file_priv)
601 {	struct exynos_drm_gem_obj *exynos_gem_obj;
602 	struct drm_exynos_gem_info *args = data;
603 	struct drm_gem_object *obj;
604 
605 	mutex_lock(&dev->struct_mutex);
606 
607 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
608 	if (!obj) {
609 		DRM_ERROR("failed to lookup gem object.\n");
610 		mutex_unlock(&dev->struct_mutex);
611 		return -EINVAL;
612 	}
613 
614 	exynos_gem_obj = to_exynos_gem_obj(obj);
615 
616 	args->flags = exynos_gem_obj->flags;
617 	args->size = exynos_gem_obj->size;
618 
619 	drm_gem_object_unreference(obj);
620 	mutex_unlock(&dev->struct_mutex);
621 
622 	return 0;
623 }
624 
625 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
626 {
627 	DRM_DEBUG_KMS("%s\n", __FILE__);
628 
629 	return 0;
630 }
631 
632 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
633 {
634 	struct exynos_drm_gem_obj *exynos_gem_obj;
635 	struct exynos_drm_gem_buf *buf;
636 
637 	DRM_DEBUG_KMS("%s\n", __FILE__);
638 
639 	exynos_gem_obj = to_exynos_gem_obj(obj);
640 	buf = exynos_gem_obj->buffer;
641 
642 	if (obj->import_attach)
643 		drm_prime_gem_destroy(obj, buf->sgt);
644 
645 	exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
646 }
647 
648 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
649 			       struct drm_device *dev,
650 			       struct drm_mode_create_dumb *args)
651 {
652 	struct exynos_drm_gem_obj *exynos_gem_obj;
653 	int ret;
654 
655 	DRM_DEBUG_KMS("%s\n", __FILE__);
656 
657 	/*
658 	 * alocate memory to be used for framebuffer.
659 	 * - this callback would be called by user application
660 	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
661 	 */
662 
663 	args->pitch = args->width * ((args->bpp + 7) / 8);
664 	args->size = args->pitch * args->height;
665 
666 	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
667 	if (IS_ERR(exynos_gem_obj))
668 		return PTR_ERR(exynos_gem_obj);
669 
670 	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
671 			&args->handle);
672 	if (ret) {
673 		exynos_drm_gem_destroy(exynos_gem_obj);
674 		return ret;
675 	}
676 
677 	return 0;
678 }
679 
680 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
681 				   struct drm_device *dev, uint32_t handle,
682 				   uint64_t *offset)
683 {
684 	struct drm_gem_object *obj;
685 	int ret = 0;
686 
687 	DRM_DEBUG_KMS("%s\n", __FILE__);
688 
689 	mutex_lock(&dev->struct_mutex);
690 
691 	/*
692 	 * get offset of memory allocated for drm framebuffer.
693 	 * - this callback would be called by user application
694 	 *	with DRM_IOCTL_MODE_MAP_DUMB command.
695 	 */
696 
697 	obj = drm_gem_object_lookup(dev, file_priv, handle);
698 	if (!obj) {
699 		DRM_ERROR("failed to lookup gem object.\n");
700 		ret = -EINVAL;
701 		goto unlock;
702 	}
703 
704 	if (!obj->map_list.map) {
705 		ret = drm_gem_create_mmap_offset(obj);
706 		if (ret)
707 			goto out;
708 	}
709 
710 	*offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
711 	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
712 
713 out:
714 	drm_gem_object_unreference(obj);
715 unlock:
716 	mutex_unlock(&dev->struct_mutex);
717 	return ret;
718 }
719 
720 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
721 				struct drm_device *dev,
722 				unsigned int handle)
723 {
724 	int ret;
725 
726 	DRM_DEBUG_KMS("%s\n", __FILE__);
727 
728 	/*
729 	 * obj->refcount and obj->handle_count are decreased and
730 	 * if both them are 0 then exynos_drm_gem_free_object()
731 	 * would be called by callback to release resources.
732 	 */
733 	ret = drm_gem_handle_delete(file_priv, handle);
734 	if (ret < 0) {
735 		DRM_ERROR("failed to delete drm_gem_handle.\n");
736 		return ret;
737 	}
738 
739 	return 0;
740 }
741 
742 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
743 {
744 	struct drm_gem_object *obj = vma->vm_private_data;
745 	struct drm_device *dev = obj->dev;
746 	unsigned long f_vaddr;
747 	pgoff_t page_offset;
748 	int ret;
749 
750 	page_offset = ((unsigned long)vmf->virtual_address -
751 			vma->vm_start) >> PAGE_SHIFT;
752 	f_vaddr = (unsigned long)vmf->virtual_address;
753 
754 	mutex_lock(&dev->struct_mutex);
755 
756 	ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
757 	if (ret < 0)
758 		DRM_ERROR("failed to map pages.\n");
759 
760 	mutex_unlock(&dev->struct_mutex);
761 
762 	return convert_to_vm_err_msg(ret);
763 }
764 
765 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
766 {
767 	struct exynos_drm_gem_obj *exynos_gem_obj;
768 	struct drm_gem_object *obj;
769 	int ret;
770 
771 	DRM_DEBUG_KMS("%s\n", __FILE__);
772 
773 	/* set vm_area_struct. */
774 	ret = drm_gem_mmap(filp, vma);
775 	if (ret < 0) {
776 		DRM_ERROR("failed to mmap.\n");
777 		return ret;
778 	}
779 
780 	obj = vma->vm_private_data;
781 	exynos_gem_obj = to_exynos_gem_obj(obj);
782 
783 	ret = check_gem_flags(exynos_gem_obj->flags);
784 	if (ret) {
785 		drm_gem_vm_close(vma);
786 		drm_gem_free_mmap_offset(obj);
787 		return ret;
788 	}
789 
790 	vma->vm_flags &= ~VM_PFNMAP;
791 	vma->vm_flags |= VM_MIXEDMAP;
792 
793 	update_vm_cache_attr(exynos_gem_obj, vma);
794 
795 	return ret;
796 }
797