xref: /linux/drivers/gpu/drm/exynos/exynos_drm_gem.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11 
12 #include <drm/drmP.h>
13 #include <drm/drm_vma_manager.h>
14 
15 #include <linux/shmem_fs.h>
16 #include <linux/dma-buf.h>
17 #include <drm/exynos_drm.h>
18 
19 #include "exynos_drm_drv.h"
20 #include "exynos_drm_gem.h"
21 #include "exynos_drm_iommu.h"
22 
23 static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
24 {
25 	struct drm_device *dev = obj->base.dev;
26 	enum dma_attr attr;
27 	unsigned int nr_pages;
28 
29 	if (obj->dma_addr) {
30 		DRM_DEBUG_KMS("already allocated.\n");
31 		return 0;
32 	}
33 
34 	init_dma_attrs(&obj->dma_attrs);
35 
36 	/*
37 	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
38 	 * region will be allocated else physically contiguous
39 	 * as possible.
40 	 */
41 	if (!(obj->flags & EXYNOS_BO_NONCONTIG))
42 		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs);
43 
44 	/*
45 	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
46 	 * else cachable mapping.
47 	 */
48 	if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE))
49 		attr = DMA_ATTR_WRITE_COMBINE;
50 	else
51 		attr = DMA_ATTR_NON_CONSISTENT;
52 
53 	dma_set_attr(attr, &obj->dma_attrs);
54 	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs);
55 
56 	nr_pages = obj->size >> PAGE_SHIFT;
57 
58 	if (!is_drm_iommu_supported(dev)) {
59 		obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
60 		if (!obj->pages) {
61 			DRM_ERROR("failed to allocate pages.\n");
62 			return -ENOMEM;
63 		}
64 	}
65 
66 	obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr,
67 				      GFP_KERNEL, &obj->dma_attrs);
68 	if (!obj->cookie) {
69 		DRM_ERROR("failed to allocate buffer.\n");
70 		if (obj->pages)
71 			drm_free_large(obj->pages);
72 		return -ENOMEM;
73 	}
74 
75 	if (obj->pages) {
76 		dma_addr_t start_addr;
77 		unsigned int i = 0;
78 
79 		start_addr = obj->dma_addr;
80 		while (i < nr_pages) {
81 			obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev,
82 							       start_addr));
83 			start_addr += PAGE_SIZE;
84 			i++;
85 		}
86 	} else {
87 		obj->pages = obj->cookie;
88 	}
89 
90 	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
91 			(unsigned long)obj->dma_addr,
92 			obj->size);
93 
94 	return 0;
95 }
96 
97 static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
98 {
99 	struct drm_device *dev = obj->base.dev;
100 
101 	if (!obj->dma_addr) {
102 		DRM_DEBUG_KMS("dma_addr is invalid.\n");
103 		return;
104 	}
105 
106 	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
107 			(unsigned long)obj->dma_addr, obj->size);
108 
109 	dma_free_attrs(dev->dev, obj->size, obj->cookie,
110 			(dma_addr_t)obj->dma_addr, &obj->dma_attrs);
111 
112 	if (!is_drm_iommu_supported(dev))
113 		drm_free_large(obj->pages);
114 }
115 
116 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
117 					struct drm_file *file_priv,
118 					unsigned int *handle)
119 {
120 	int ret;
121 
122 	/*
123 	 * allocate a id of idr table where the obj is registered
124 	 * and handle has the id what user can see.
125 	 */
126 	ret = drm_gem_handle_create(file_priv, obj, handle);
127 	if (ret)
128 		return ret;
129 
130 	DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
131 
132 	/* drop reference from allocate - handle holds it now. */
133 	drm_gem_object_unreference_unlocked(obj);
134 
135 	return 0;
136 }
137 
138 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
139 {
140 	struct drm_gem_object *obj = &exynos_gem_obj->base;
141 
142 	DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
143 
144 	/*
145 	 * do not release memory region from exporter.
146 	 *
147 	 * the region will be released by exporter
148 	 * once dmabuf's refcount becomes 0.
149 	 */
150 	if (obj->import_attach)
151 		drm_prime_gem_destroy(obj, exynos_gem_obj->sgt);
152 	else
153 		exynos_drm_free_buf(exynos_gem_obj);
154 
155 	/* release file pointer to gem object. */
156 	drm_gem_object_release(obj);
157 
158 	kfree(exynos_gem_obj);
159 }
160 
161 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
162 						unsigned int gem_handle,
163 						struct drm_file *file_priv)
164 {
165 	struct exynos_drm_gem_obj *exynos_gem_obj;
166 	struct drm_gem_object *obj;
167 
168 	obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
169 	if (!obj) {
170 		DRM_ERROR("failed to lookup gem object.\n");
171 		return 0;
172 	}
173 
174 	exynos_gem_obj = to_exynos_gem_obj(obj);
175 
176 	drm_gem_object_unreference_unlocked(obj);
177 
178 	return exynos_gem_obj->size;
179 }
180 
181 static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
182 						      unsigned long size)
183 {
184 	struct exynos_drm_gem_obj *exynos_gem_obj;
185 	struct drm_gem_object *obj;
186 	int ret;
187 
188 	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
189 	if (!exynos_gem_obj)
190 		return ERR_PTR(-ENOMEM);
191 
192 	exynos_gem_obj->size = size;
193 	obj = &exynos_gem_obj->base;
194 
195 	ret = drm_gem_object_init(dev, obj, size);
196 	if (ret < 0) {
197 		DRM_ERROR("failed to initialize gem object\n");
198 		kfree(exynos_gem_obj);
199 		return ERR_PTR(ret);
200 	}
201 
202 	ret = drm_gem_create_mmap_offset(obj);
203 	if (ret < 0) {
204 		drm_gem_object_release(obj);
205 		kfree(exynos_gem_obj);
206 		return ERR_PTR(ret);
207 	}
208 
209 	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
210 
211 	return exynos_gem_obj;
212 }
213 
214 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
215 						unsigned int flags,
216 						unsigned long size)
217 {
218 	struct exynos_drm_gem_obj *exynos_gem_obj;
219 	int ret;
220 
221 	if (flags & ~(EXYNOS_BO_MASK)) {
222 		DRM_ERROR("invalid flags.\n");
223 		return ERR_PTR(-EINVAL);
224 	}
225 
226 	if (!size) {
227 		DRM_ERROR("invalid size.\n");
228 		return ERR_PTR(-EINVAL);
229 	}
230 
231 	size = roundup(size, PAGE_SIZE);
232 
233 	exynos_gem_obj = exynos_drm_gem_init(dev, size);
234 	if (IS_ERR(exynos_gem_obj))
235 		return exynos_gem_obj;
236 
237 	/* set memory type and cache attribute from user side. */
238 	exynos_gem_obj->flags = flags;
239 
240 	ret = exynos_drm_alloc_buf(exynos_gem_obj);
241 	if (ret < 0) {
242 		drm_gem_object_release(&exynos_gem_obj->base);
243 		kfree(exynos_gem_obj);
244 		return ERR_PTR(ret);
245 	}
246 
247 	return exynos_gem_obj;
248 }
249 
250 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
251 				struct drm_file *file_priv)
252 {
253 	struct drm_exynos_gem_create *args = data;
254 	struct exynos_drm_gem_obj *exynos_gem_obj;
255 	int ret;
256 
257 	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
258 	if (IS_ERR(exynos_gem_obj))
259 		return PTR_ERR(exynos_gem_obj);
260 
261 	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
262 			&args->handle);
263 	if (ret) {
264 		exynos_drm_gem_destroy(exynos_gem_obj);
265 		return ret;
266 	}
267 
268 	return 0;
269 }
270 
271 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
272 					unsigned int gem_handle,
273 					struct drm_file *filp)
274 {
275 	struct exynos_drm_gem_obj *exynos_gem_obj;
276 	struct drm_gem_object *obj;
277 
278 	obj = drm_gem_object_lookup(dev, filp, gem_handle);
279 	if (!obj) {
280 		DRM_ERROR("failed to lookup gem object.\n");
281 		return ERR_PTR(-EINVAL);
282 	}
283 
284 	exynos_gem_obj = to_exynos_gem_obj(obj);
285 
286 	return &exynos_gem_obj->dma_addr;
287 }
288 
289 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
290 					unsigned int gem_handle,
291 					struct drm_file *filp)
292 {
293 	struct drm_gem_object *obj;
294 
295 	obj = drm_gem_object_lookup(dev, filp, gem_handle);
296 	if (!obj) {
297 		DRM_ERROR("failed to lookup gem object.\n");
298 		return;
299 	}
300 
301 	drm_gem_object_unreference_unlocked(obj);
302 
303 	/*
304 	 * decrease obj->refcount one more time because we has already
305 	 * increased it at exynos_drm_gem_get_dma_addr().
306 	 */
307 	drm_gem_object_unreference_unlocked(obj);
308 }
309 
310 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
311 				      struct vm_area_struct *vma)
312 {
313 	struct drm_device *drm_dev = exynos_gem_obj->base.dev;
314 	unsigned long vm_size;
315 	int ret;
316 
317 	vma->vm_flags &= ~VM_PFNMAP;
318 	vma->vm_pgoff = 0;
319 
320 	vm_size = vma->vm_end - vma->vm_start;
321 
322 	/* check if user-requested size is valid. */
323 	if (vm_size > exynos_gem_obj->size)
324 		return -EINVAL;
325 
326 	ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages,
327 				exynos_gem_obj->dma_addr, exynos_gem_obj->size,
328 				&exynos_gem_obj->dma_attrs);
329 	if (ret < 0) {
330 		DRM_ERROR("failed to mmap.\n");
331 		return ret;
332 	}
333 
334 	return 0;
335 }
336 
337 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
338 				      struct drm_file *file_priv)
339 {
340 	struct exynos_drm_gem_obj *exynos_gem_obj;
341 	struct drm_exynos_gem_info *args = data;
342 	struct drm_gem_object *obj;
343 
344 	mutex_lock(&dev->struct_mutex);
345 
346 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
347 	if (!obj) {
348 		DRM_ERROR("failed to lookup gem object.\n");
349 		mutex_unlock(&dev->struct_mutex);
350 		return -EINVAL;
351 	}
352 
353 	exynos_gem_obj = to_exynos_gem_obj(obj);
354 
355 	args->flags = exynos_gem_obj->flags;
356 	args->size = exynos_gem_obj->size;
357 
358 	drm_gem_object_unreference(obj);
359 	mutex_unlock(&dev->struct_mutex);
360 
361 	return 0;
362 }
363 
364 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
365 				struct sg_table *sgt,
366 				enum dma_data_direction dir)
367 {
368 	int nents;
369 
370 	mutex_lock(&drm_dev->struct_mutex);
371 
372 	nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
373 	if (!nents) {
374 		DRM_ERROR("failed to map sgl with dma.\n");
375 		mutex_unlock(&drm_dev->struct_mutex);
376 		return nents;
377 	}
378 
379 	mutex_unlock(&drm_dev->struct_mutex);
380 	return 0;
381 }
382 
383 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
384 				struct sg_table *sgt,
385 				enum dma_data_direction dir)
386 {
387 	dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
388 }
389 
390 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
391 {
392 	exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
393 }
394 
395 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
396 			       struct drm_device *dev,
397 			       struct drm_mode_create_dumb *args)
398 {
399 	struct exynos_drm_gem_obj *exynos_gem_obj;
400 	unsigned int flags;
401 	int ret;
402 
403 	/*
404 	 * allocate memory to be used for framebuffer.
405 	 * - this callback would be called by user application
406 	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
407 	 */
408 
409 	args->pitch = args->width * ((args->bpp + 7) / 8);
410 	args->size = args->pitch * args->height;
411 
412 	if (is_drm_iommu_supported(dev))
413 		flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
414 	else
415 		flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
416 
417 	exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size);
418 	if (IS_ERR(exynos_gem_obj)) {
419 		dev_warn(dev->dev, "FB allocation failed.\n");
420 		return PTR_ERR(exynos_gem_obj);
421 	}
422 
423 	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
424 			&args->handle);
425 	if (ret) {
426 		exynos_drm_gem_destroy(exynos_gem_obj);
427 		return ret;
428 	}
429 
430 	return 0;
431 }
432 
433 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
434 				   struct drm_device *dev, uint32_t handle,
435 				   uint64_t *offset)
436 {
437 	struct drm_gem_object *obj;
438 	int ret = 0;
439 
440 	mutex_lock(&dev->struct_mutex);
441 
442 	/*
443 	 * get offset of memory allocated for drm framebuffer.
444 	 * - this callback would be called by user application
445 	 *	with DRM_IOCTL_MODE_MAP_DUMB command.
446 	 */
447 
448 	obj = drm_gem_object_lookup(dev, file_priv, handle);
449 	if (!obj) {
450 		DRM_ERROR("failed to lookup gem object.\n");
451 		ret = -EINVAL;
452 		goto unlock;
453 	}
454 
455 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
456 	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
457 
458 	drm_gem_object_unreference(obj);
459 unlock:
460 	mutex_unlock(&dev->struct_mutex);
461 	return ret;
462 }
463 
464 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
465 {
466 	struct drm_gem_object *obj = vma->vm_private_data;
467 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
468 	unsigned long pfn;
469 	pgoff_t page_offset;
470 	int ret;
471 
472 	page_offset = ((unsigned long)vmf->virtual_address -
473 			vma->vm_start) >> PAGE_SHIFT;
474 
475 	if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) {
476 		DRM_ERROR("invalid page offset\n");
477 		ret = -EINVAL;
478 		goto out;
479 	}
480 
481 	pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]);
482 	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
483 
484 out:
485 	switch (ret) {
486 	case 0:
487 	case -ERESTARTSYS:
488 	case -EINTR:
489 		return VM_FAULT_NOPAGE;
490 	case -ENOMEM:
491 		return VM_FAULT_OOM;
492 	default:
493 		return VM_FAULT_SIGBUS;
494 	}
495 }
496 
497 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
498 {
499 	struct exynos_drm_gem_obj *exynos_gem_obj;
500 	struct drm_gem_object *obj;
501 	int ret;
502 
503 	/* set vm_area_struct. */
504 	ret = drm_gem_mmap(filp, vma);
505 	if (ret < 0) {
506 		DRM_ERROR("failed to mmap.\n");
507 		return ret;
508 	}
509 
510 	obj = vma->vm_private_data;
511 	exynos_gem_obj = to_exynos_gem_obj(obj);
512 
513 	DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj->flags);
514 
515 	/* non-cachable as default. */
516 	if (exynos_gem_obj->flags & EXYNOS_BO_CACHABLE)
517 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
518 	else if (exynos_gem_obj->flags & EXYNOS_BO_WC)
519 		vma->vm_page_prot =
520 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
521 	else
522 		vma->vm_page_prot =
523 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
524 
525 	ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
526 	if (ret)
527 		goto err_close_vm;
528 
529 	return ret;
530 
531 err_close_vm:
532 	drm_gem_vm_close(vma);
533 
534 	return ret;
535 }
536 
537 /* low-level interface prime helpers */
538 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
539 {
540 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
541 	int npages;
542 
543 	npages = exynos_gem_obj->size >> PAGE_SHIFT;
544 
545 	return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages);
546 }
547 
548 struct drm_gem_object *
549 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
550 				     struct dma_buf_attachment *attach,
551 				     struct sg_table *sgt)
552 {
553 	struct exynos_drm_gem_obj *exynos_gem_obj;
554 	int npages;
555 	int ret;
556 
557 	exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size);
558 	if (IS_ERR(exynos_gem_obj)) {
559 		ret = PTR_ERR(exynos_gem_obj);
560 		return ERR_PTR(ret);
561 	}
562 
563 	exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl);
564 
565 	npages = exynos_gem_obj->size >> PAGE_SHIFT;
566 	exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
567 	if (!exynos_gem_obj->pages) {
568 		ret = -ENOMEM;
569 		goto err;
570 	}
571 
572 	ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL,
573 			npages);
574 	if (ret < 0)
575 		goto err_free_large;
576 
577 	exynos_gem_obj->sgt = sgt;
578 
579 	if (sgt->nents == 1) {
580 		/* always physically continuous memory if sgt->nents is 1. */
581 		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
582 	} else {
583 		/*
584 		 * this case could be CONTIG or NONCONTIG type but for now
585 		 * sets NONCONTIG.
586 		 * TODO. we have to find a way that exporter can notify
587 		 * the type of its own buffer to importer.
588 		 */
589 		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
590 	}
591 
592 	return &exynos_gem_obj->base;
593 
594 err_free_large:
595 	drm_free_large(exynos_gem_obj->pages);
596 err:
597 	drm_gem_object_release(&exynos_gem_obj->base);
598 	kfree(exynos_gem_obj);
599 	return ERR_PTR(ret);
600 }
601 
602 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
603 {
604 	return NULL;
605 }
606 
607 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
608 {
609 	/* Nothing to do */
610 }
611