xref: /linux/drivers/gpu/drm/exynos/exynos_drm_gem.c (revision b9ccfda293ee6fca9a89a1584f0900e0627b975e)
1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include "drmP.h"
27 #include "drm.h"
28 
29 #include <linux/shmem_fs.h>
30 #include <drm/exynos_drm.h>
31 
32 #include "exynos_drm_drv.h"
33 #include "exynos_drm_gem.h"
34 #include "exynos_drm_buf.h"
35 
36 static unsigned int convert_to_vm_err_msg(int msg)
37 {
38 	unsigned int out_msg;
39 
40 	switch (msg) {
41 	case 0:
42 	case -ERESTARTSYS:
43 	case -EINTR:
44 		out_msg = VM_FAULT_NOPAGE;
45 		break;
46 
47 	case -ENOMEM:
48 		out_msg = VM_FAULT_OOM;
49 		break;
50 
51 	default:
52 		out_msg = VM_FAULT_SIGBUS;
53 		break;
54 	}
55 
56 	return out_msg;
57 }
58 
59 static int check_gem_flags(unsigned int flags)
60 {
61 	if (flags & ~(EXYNOS_BO_MASK)) {
62 		DRM_ERROR("invalid flags.\n");
63 		return -EINVAL;
64 	}
65 
66 	return 0;
67 }
68 
69 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
70 					struct vm_area_struct *vma)
71 {
72 	DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
73 
74 	/* non-cachable as default. */
75 	if (obj->flags & EXYNOS_BO_CACHABLE)
76 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
77 	else if (obj->flags & EXYNOS_BO_WC)
78 		vma->vm_page_prot =
79 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
80 	else
81 		vma->vm_page_prot =
82 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
83 }
84 
85 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
86 {
87 	if (!IS_NONCONTIG_BUFFER(flags)) {
88 		if (size >= SZ_1M)
89 			return roundup(size, SECTION_SIZE);
90 		else if (size >= SZ_64K)
91 			return roundup(size, SZ_64K);
92 		else
93 			goto out;
94 	}
95 out:
96 	return roundup(size, PAGE_SIZE);
97 }
98 
99 struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
100 						gfp_t gfpmask)
101 {
102 	struct inode *inode;
103 	struct address_space *mapping;
104 	struct page *p, **pages;
105 	int i, npages;
106 
107 	/* This is the shared memory object that backs the GEM resource */
108 	inode = obj->filp->f_path.dentry->d_inode;
109 	mapping = inode->i_mapping;
110 
111 	npages = obj->size >> PAGE_SHIFT;
112 
113 	pages = drm_malloc_ab(npages, sizeof(struct page *));
114 	if (pages == NULL)
115 		return ERR_PTR(-ENOMEM);
116 
117 	gfpmask |= mapping_gfp_mask(mapping);
118 
119 	for (i = 0; i < npages; i++) {
120 		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
121 		if (IS_ERR(p))
122 			goto fail;
123 		pages[i] = p;
124 	}
125 
126 	return pages;
127 
128 fail:
129 	while (i--)
130 		page_cache_release(pages[i]);
131 
132 	drm_free_large(pages);
133 	return ERR_PTR(PTR_ERR(p));
134 }
135 
136 static void exynos_gem_put_pages(struct drm_gem_object *obj,
137 					struct page **pages,
138 					bool dirty, bool accessed)
139 {
140 	int i, npages;
141 
142 	npages = obj->size >> PAGE_SHIFT;
143 
144 	for (i = 0; i < npages; i++) {
145 		if (dirty)
146 			set_page_dirty(pages[i]);
147 
148 		if (accessed)
149 			mark_page_accessed(pages[i]);
150 
151 		/* Undo the reference we took when populating the table */
152 		page_cache_release(pages[i]);
153 	}
154 
155 	drm_free_large(pages);
156 }
157 
158 static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
159 					struct vm_area_struct *vma,
160 					unsigned long f_vaddr,
161 					pgoff_t page_offset)
162 {
163 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
164 	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
165 	unsigned long pfn;
166 
167 	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
168 		if (!buf->pages)
169 			return -EINTR;
170 
171 		pfn = page_to_pfn(buf->pages[page_offset++]);
172 	} else
173 		pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
174 
175 	return vm_insert_mixed(vma, f_vaddr, pfn);
176 }
177 
178 static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
179 {
180 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
181 	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
182 	struct scatterlist *sgl;
183 	struct page **pages;
184 	unsigned int npages, i = 0;
185 	int ret;
186 
187 	if (buf->pages) {
188 		DRM_DEBUG_KMS("already allocated.\n");
189 		return -EINVAL;
190 	}
191 
192 	pages = exynos_gem_get_pages(obj, GFP_KERNEL);
193 	if (IS_ERR(pages)) {
194 		DRM_ERROR("failed to get pages.\n");
195 		return PTR_ERR(pages);
196 	}
197 
198 	npages = obj->size >> PAGE_SHIFT;
199 	buf->page_size = PAGE_SIZE;
200 
201 	buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
202 	if (!buf->sgt) {
203 		DRM_ERROR("failed to allocate sg table.\n");
204 		ret = -ENOMEM;
205 		goto err;
206 	}
207 
208 	ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
209 	if (ret < 0) {
210 		DRM_ERROR("failed to initialize sg table.\n");
211 		ret = -EFAULT;
212 		goto err1;
213 	}
214 
215 	sgl = buf->sgt->sgl;
216 
217 	/* set all pages to sg list. */
218 	while (i < npages) {
219 		sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
220 		sg_dma_address(sgl) = page_to_phys(pages[i]);
221 		i++;
222 		sgl = sg_next(sgl);
223 	}
224 
225 	/* add some codes for UNCACHED type here. TODO */
226 
227 	buf->pages = pages;
228 	return ret;
229 err1:
230 	kfree(buf->sgt);
231 	buf->sgt = NULL;
232 err:
233 	exynos_gem_put_pages(obj, pages, true, false);
234 	return ret;
235 
236 }
237 
238 static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
239 {
240 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
241 	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
242 
243 	/*
244 	 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
245 	 * allocated at gem fault handler.
246 	 */
247 	sg_free_table(buf->sgt);
248 	kfree(buf->sgt);
249 	buf->sgt = NULL;
250 
251 	exynos_gem_put_pages(obj, buf->pages, true, false);
252 	buf->pages = NULL;
253 
254 	/* add some codes for UNCACHED type here. TODO */
255 }
256 
257 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
258 					struct drm_file *file_priv,
259 					unsigned int *handle)
260 {
261 	int ret;
262 
263 	/*
264 	 * allocate a id of idr table where the obj is registered
265 	 * and handle has the id what user can see.
266 	 */
267 	ret = drm_gem_handle_create(file_priv, obj, handle);
268 	if (ret)
269 		return ret;
270 
271 	DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
272 
273 	/* drop reference from allocate - handle holds it now. */
274 	drm_gem_object_unreference_unlocked(obj);
275 
276 	return 0;
277 }
278 
279 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
280 {
281 	struct drm_gem_object *obj;
282 	struct exynos_drm_gem_buf *buf;
283 
284 	DRM_DEBUG_KMS("%s\n", __FILE__);
285 
286 	obj = &exynos_gem_obj->base;
287 	buf = exynos_gem_obj->buffer;
288 
289 	DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
290 
291 	if (!buf->pages)
292 		return;
293 
294 	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
295 		exynos_drm_gem_put_pages(obj);
296 	else
297 		exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
298 
299 	exynos_drm_fini_buf(obj->dev, buf);
300 	exynos_gem_obj->buffer = NULL;
301 
302 	if (obj->map_list.map)
303 		drm_gem_free_mmap_offset(obj);
304 
305 	/* release file pointer to gem object. */
306 	drm_gem_object_release(obj);
307 
308 	kfree(exynos_gem_obj);
309 	exynos_gem_obj = NULL;
310 }
311 
312 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
313 						      unsigned long size)
314 {
315 	struct exynos_drm_gem_obj *exynos_gem_obj;
316 	struct drm_gem_object *obj;
317 	int ret;
318 
319 	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
320 	if (!exynos_gem_obj) {
321 		DRM_ERROR("failed to allocate exynos gem object\n");
322 		return NULL;
323 	}
324 
325 	exynos_gem_obj->size = size;
326 	obj = &exynos_gem_obj->base;
327 
328 	ret = drm_gem_object_init(dev, obj, size);
329 	if (ret < 0) {
330 		DRM_ERROR("failed to initialize gem object\n");
331 		kfree(exynos_gem_obj);
332 		return NULL;
333 	}
334 
335 	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
336 
337 	return exynos_gem_obj;
338 }
339 
340 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
341 						unsigned int flags,
342 						unsigned long size)
343 {
344 	struct exynos_drm_gem_obj *exynos_gem_obj;
345 	struct exynos_drm_gem_buf *buf;
346 	int ret;
347 
348 	if (!size) {
349 		DRM_ERROR("invalid size.\n");
350 		return ERR_PTR(-EINVAL);
351 	}
352 
353 	size = roundup_gem_size(size, flags);
354 	DRM_DEBUG_KMS("%s\n", __FILE__);
355 
356 	ret = check_gem_flags(flags);
357 	if (ret)
358 		return ERR_PTR(ret);
359 
360 	buf = exynos_drm_init_buf(dev, size);
361 	if (!buf)
362 		return ERR_PTR(-ENOMEM);
363 
364 	exynos_gem_obj = exynos_drm_gem_init(dev, size);
365 	if (!exynos_gem_obj) {
366 		ret = -ENOMEM;
367 		goto err_fini_buf;
368 	}
369 
370 	exynos_gem_obj->buffer = buf;
371 
372 	/* set memory type and cache attribute from user side. */
373 	exynos_gem_obj->flags = flags;
374 
375 	/*
376 	 * allocate all pages as desired size if user wants to allocate
377 	 * physically non-continuous memory.
378 	 */
379 	if (flags & EXYNOS_BO_NONCONTIG) {
380 		ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
381 		if (ret < 0) {
382 			drm_gem_object_release(&exynos_gem_obj->base);
383 			goto err_fini_buf;
384 		}
385 	} else {
386 		ret = exynos_drm_alloc_buf(dev, buf, flags);
387 		if (ret < 0) {
388 			drm_gem_object_release(&exynos_gem_obj->base);
389 			goto err_fini_buf;
390 		}
391 	}
392 
393 	return exynos_gem_obj;
394 
395 err_fini_buf:
396 	exynos_drm_fini_buf(dev, buf);
397 	return ERR_PTR(ret);
398 }
399 
400 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
401 				struct drm_file *file_priv)
402 {
403 	struct drm_exynos_gem_create *args = data;
404 	struct exynos_drm_gem_obj *exynos_gem_obj;
405 	int ret;
406 
407 	DRM_DEBUG_KMS("%s\n", __FILE__);
408 
409 	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
410 	if (IS_ERR(exynos_gem_obj))
411 		return PTR_ERR(exynos_gem_obj);
412 
413 	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
414 			&args->handle);
415 	if (ret) {
416 		exynos_drm_gem_destroy(exynos_gem_obj);
417 		return ret;
418 	}
419 
420 	return 0;
421 }
422 
423 void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
424 					unsigned int gem_handle,
425 					struct drm_file *file_priv)
426 {
427 	struct exynos_drm_gem_obj *exynos_gem_obj;
428 	struct drm_gem_object *obj;
429 
430 	obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
431 	if (!obj) {
432 		DRM_ERROR("failed to lookup gem object.\n");
433 		return ERR_PTR(-EINVAL);
434 	}
435 
436 	exynos_gem_obj = to_exynos_gem_obj(obj);
437 
438 	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
439 		DRM_DEBUG_KMS("not support NONCONTIG type.\n");
440 		drm_gem_object_unreference_unlocked(obj);
441 
442 		/* TODO */
443 		return ERR_PTR(-EINVAL);
444 	}
445 
446 	return &exynos_gem_obj->buffer->dma_addr;
447 }
448 
449 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
450 					unsigned int gem_handle,
451 					struct drm_file *file_priv)
452 {
453 	struct exynos_drm_gem_obj *exynos_gem_obj;
454 	struct drm_gem_object *obj;
455 
456 	obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
457 	if (!obj) {
458 		DRM_ERROR("failed to lookup gem object.\n");
459 		return;
460 	}
461 
462 	exynos_gem_obj = to_exynos_gem_obj(obj);
463 
464 	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
465 		DRM_DEBUG_KMS("not support NONCONTIG type.\n");
466 		drm_gem_object_unreference_unlocked(obj);
467 
468 		/* TODO */
469 		return;
470 	}
471 
472 	drm_gem_object_unreference_unlocked(obj);
473 
474 	/*
475 	 * decrease obj->refcount one more time because we has already
476 	 * increased it at exynos_drm_gem_get_dma_addr().
477 	 */
478 	drm_gem_object_unreference_unlocked(obj);
479 }
480 
481 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
482 				    struct drm_file *file_priv)
483 {
484 	struct drm_exynos_gem_map_off *args = data;
485 
486 	DRM_DEBUG_KMS("%s\n", __FILE__);
487 
488 	DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
489 			args->handle, (unsigned long)args->offset);
490 
491 	if (!(dev->driver->driver_features & DRIVER_GEM)) {
492 		DRM_ERROR("does not support GEM.\n");
493 		return -ENODEV;
494 	}
495 
496 	return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
497 			&args->offset);
498 }
499 
500 static int exynos_drm_gem_mmap_buffer(struct file *filp,
501 				      struct vm_area_struct *vma)
502 {
503 	struct drm_gem_object *obj = filp->private_data;
504 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
505 	struct exynos_drm_gem_buf *buffer;
506 	unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
507 	int ret;
508 
509 	DRM_DEBUG_KMS("%s\n", __FILE__);
510 
511 	vma->vm_flags |= (VM_IO | VM_RESERVED);
512 
513 	update_vm_cache_attr(exynos_gem_obj, vma);
514 
515 	vm_size = usize = vma->vm_end - vma->vm_start;
516 
517 	/*
518 	 * a buffer contains information to physically continuous memory
519 	 * allocated by user request or at framebuffer creation.
520 	 */
521 	buffer = exynos_gem_obj->buffer;
522 
523 	/* check if user-requested size is valid. */
524 	if (vm_size > buffer->size)
525 		return -EINVAL;
526 
527 	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
528 		int i = 0;
529 
530 		if (!buffer->pages)
531 			return -EINVAL;
532 
533 		vma->vm_flags |= VM_MIXEDMAP;
534 
535 		do {
536 			ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
537 			if (ret) {
538 				DRM_ERROR("failed to remap user space.\n");
539 				return ret;
540 			}
541 
542 			uaddr += PAGE_SIZE;
543 			usize -= PAGE_SIZE;
544 		} while (usize > 0);
545 	} else {
546 		/*
547 		 * get page frame number to physical memory to be mapped
548 		 * to user space.
549 		 */
550 		pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
551 								PAGE_SHIFT;
552 
553 		DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
554 
555 		if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
556 					vma->vm_page_prot)) {
557 			DRM_ERROR("failed to remap pfn range.\n");
558 			return -EAGAIN;
559 		}
560 	}
561 
562 	return 0;
563 }
564 
565 static const struct file_operations exynos_drm_gem_fops = {
566 	.mmap = exynos_drm_gem_mmap_buffer,
567 };
568 
569 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
570 			      struct drm_file *file_priv)
571 {
572 	struct drm_exynos_gem_mmap *args = data;
573 	struct drm_gem_object *obj;
574 	unsigned int addr;
575 
576 	DRM_DEBUG_KMS("%s\n", __FILE__);
577 
578 	if (!(dev->driver->driver_features & DRIVER_GEM)) {
579 		DRM_ERROR("does not support GEM.\n");
580 		return -ENODEV;
581 	}
582 
583 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
584 	if (!obj) {
585 		DRM_ERROR("failed to lookup gem object.\n");
586 		return -EINVAL;
587 	}
588 
589 	obj->filp->f_op = &exynos_drm_gem_fops;
590 	obj->filp->private_data = obj;
591 
592 	addr = vm_mmap(obj->filp, 0, args->size,
593 			PROT_READ | PROT_WRITE, MAP_SHARED, 0);
594 
595 	drm_gem_object_unreference_unlocked(obj);
596 
597 	if (IS_ERR((void *)addr))
598 		return PTR_ERR((void *)addr);
599 
600 	args->mapped = addr;
601 
602 	DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
603 
604 	return 0;
605 }
606 
607 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
608 				      struct drm_file *file_priv)
609 {	struct exynos_drm_gem_obj *exynos_gem_obj;
610 	struct drm_exynos_gem_info *args = data;
611 	struct drm_gem_object *obj;
612 
613 	mutex_lock(&dev->struct_mutex);
614 
615 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
616 	if (!obj) {
617 		DRM_ERROR("failed to lookup gem object.\n");
618 		mutex_unlock(&dev->struct_mutex);
619 		return -EINVAL;
620 	}
621 
622 	exynos_gem_obj = to_exynos_gem_obj(obj);
623 
624 	args->flags = exynos_gem_obj->flags;
625 	args->size = exynos_gem_obj->size;
626 
627 	drm_gem_object_unreference(obj);
628 	mutex_unlock(&dev->struct_mutex);
629 
630 	return 0;
631 }
632 
633 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
634 {
635 	DRM_DEBUG_KMS("%s\n", __FILE__);
636 
637 	return 0;
638 }
639 
640 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
641 {
642 	struct exynos_drm_gem_obj *exynos_gem_obj;
643 	struct exynos_drm_gem_buf *buf;
644 
645 	DRM_DEBUG_KMS("%s\n", __FILE__);
646 
647 	exynos_gem_obj = to_exynos_gem_obj(obj);
648 	buf = exynos_gem_obj->buffer;
649 
650 	if (obj->import_attach)
651 		drm_prime_gem_destroy(obj, buf->sgt);
652 
653 	exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
654 }
655 
656 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
657 			       struct drm_device *dev,
658 			       struct drm_mode_create_dumb *args)
659 {
660 	struct exynos_drm_gem_obj *exynos_gem_obj;
661 	int ret;
662 
663 	DRM_DEBUG_KMS("%s\n", __FILE__);
664 
665 	/*
666 	 * alocate memory to be used for framebuffer.
667 	 * - this callback would be called by user application
668 	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
669 	 */
670 
671 	args->pitch = args->width * args->bpp >> 3;
672 	args->size = PAGE_ALIGN(args->pitch * args->height);
673 
674 	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
675 	if (IS_ERR(exynos_gem_obj))
676 		return PTR_ERR(exynos_gem_obj);
677 
678 	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
679 			&args->handle);
680 	if (ret) {
681 		exynos_drm_gem_destroy(exynos_gem_obj);
682 		return ret;
683 	}
684 
685 	return 0;
686 }
687 
688 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
689 				   struct drm_device *dev, uint32_t handle,
690 				   uint64_t *offset)
691 {
692 	struct drm_gem_object *obj;
693 	int ret = 0;
694 
695 	DRM_DEBUG_KMS("%s\n", __FILE__);
696 
697 	mutex_lock(&dev->struct_mutex);
698 
699 	/*
700 	 * get offset of memory allocated for drm framebuffer.
701 	 * - this callback would be called by user application
702 	 *	with DRM_IOCTL_MODE_MAP_DUMB command.
703 	 */
704 
705 	obj = drm_gem_object_lookup(dev, file_priv, handle);
706 	if (!obj) {
707 		DRM_ERROR("failed to lookup gem object.\n");
708 		ret = -EINVAL;
709 		goto unlock;
710 	}
711 
712 	if (!obj->map_list.map) {
713 		ret = drm_gem_create_mmap_offset(obj);
714 		if (ret)
715 			goto out;
716 	}
717 
718 	*offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
719 	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
720 
721 out:
722 	drm_gem_object_unreference(obj);
723 unlock:
724 	mutex_unlock(&dev->struct_mutex);
725 	return ret;
726 }
727 
728 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
729 				struct drm_device *dev,
730 				unsigned int handle)
731 {
732 	int ret;
733 
734 	DRM_DEBUG_KMS("%s\n", __FILE__);
735 
736 	/*
737 	 * obj->refcount and obj->handle_count are decreased and
738 	 * if both them are 0 then exynos_drm_gem_free_object()
739 	 * would be called by callback to release resources.
740 	 */
741 	ret = drm_gem_handle_delete(file_priv, handle);
742 	if (ret < 0) {
743 		DRM_ERROR("failed to delete drm_gem_handle.\n");
744 		return ret;
745 	}
746 
747 	return 0;
748 }
749 
750 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
751 {
752 	struct drm_gem_object *obj = vma->vm_private_data;
753 	struct drm_device *dev = obj->dev;
754 	unsigned long f_vaddr;
755 	pgoff_t page_offset;
756 	int ret;
757 
758 	page_offset = ((unsigned long)vmf->virtual_address -
759 			vma->vm_start) >> PAGE_SHIFT;
760 	f_vaddr = (unsigned long)vmf->virtual_address;
761 
762 	mutex_lock(&dev->struct_mutex);
763 
764 	ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
765 	if (ret < 0)
766 		DRM_ERROR("failed to map pages.\n");
767 
768 	mutex_unlock(&dev->struct_mutex);
769 
770 	return convert_to_vm_err_msg(ret);
771 }
772 
773 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
774 {
775 	struct exynos_drm_gem_obj *exynos_gem_obj;
776 	struct drm_gem_object *obj;
777 	int ret;
778 
779 	DRM_DEBUG_KMS("%s\n", __FILE__);
780 
781 	/* set vm_area_struct. */
782 	ret = drm_gem_mmap(filp, vma);
783 	if (ret < 0) {
784 		DRM_ERROR("failed to mmap.\n");
785 		return ret;
786 	}
787 
788 	obj = vma->vm_private_data;
789 	exynos_gem_obj = to_exynos_gem_obj(obj);
790 
791 	ret = check_gem_flags(exynos_gem_obj->flags);
792 	if (ret) {
793 		drm_gem_vm_close(vma);
794 		drm_gem_free_mmap_offset(obj);
795 		return ret;
796 	}
797 
798 	vma->vm_flags &= ~VM_PFNMAP;
799 	vma->vm_flags |= VM_MIXEDMAP;
800 
801 	update_vm_cache_attr(exynos_gem_obj, vma);
802 
803 	return ret;
804 }
805