xref: /linux/drivers/gpu/drm/exynos/exynos_drm_gem.c (revision eb3fcf007fffe5830d815e713591f3e858f2a365)
1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11 
12 #include <drm/drmP.h>
13 #include <drm/drm_vma_manager.h>
14 
15 #include <linux/shmem_fs.h>
16 #include <linux/dma-buf.h>
17 #include <drm/exynos_drm.h>
18 
19 #include "exynos_drm_drv.h"
20 #include "exynos_drm_gem.h"
21 #include "exynos_drm_iommu.h"
22 
23 static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
24 {
25 	struct drm_device *dev = obj->base.dev;
26 	enum dma_attr attr;
27 	unsigned int nr_pages;
28 
29 	if (obj->dma_addr) {
30 		DRM_DEBUG_KMS("already allocated.\n");
31 		return 0;
32 	}
33 
34 	init_dma_attrs(&obj->dma_attrs);
35 
36 	/*
37 	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
38 	 * region will be allocated else physically contiguous
39 	 * as possible.
40 	 */
41 	if (!(obj->flags & EXYNOS_BO_NONCONTIG))
42 		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs);
43 
44 	/*
45 	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
46 	 * else cachable mapping.
47 	 */
48 	if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE))
49 		attr = DMA_ATTR_WRITE_COMBINE;
50 	else
51 		attr = DMA_ATTR_NON_CONSISTENT;
52 
53 	dma_set_attr(attr, &obj->dma_attrs);
54 	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs);
55 
56 	nr_pages = obj->size >> PAGE_SHIFT;
57 
58 	if (!is_drm_iommu_supported(dev)) {
59 		dma_addr_t start_addr;
60 		unsigned int i = 0;
61 
62 		obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
63 		if (!obj->pages) {
64 			DRM_ERROR("failed to allocate pages.\n");
65 			return -ENOMEM;
66 		}
67 
68 		obj->cookie = dma_alloc_attrs(dev->dev,
69 					obj->size,
70 					&obj->dma_addr, GFP_KERNEL,
71 					&obj->dma_attrs);
72 		if (!obj->cookie) {
73 			DRM_ERROR("failed to allocate buffer.\n");
74 			drm_free_large(obj->pages);
75 			return -ENOMEM;
76 		}
77 
78 		start_addr = obj->dma_addr;
79 		while (i < nr_pages) {
80 			obj->pages[i] = phys_to_page(start_addr);
81 			start_addr += PAGE_SIZE;
82 			i++;
83 		}
84 	} else {
85 		obj->pages = dma_alloc_attrs(dev->dev, obj->size,
86 					&obj->dma_addr, GFP_KERNEL,
87 					&obj->dma_attrs);
88 		if (!obj->pages) {
89 			DRM_ERROR("failed to allocate buffer.\n");
90 			return -ENOMEM;
91 		}
92 	}
93 
94 	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
95 			(unsigned long)obj->dma_addr,
96 			obj->size);
97 
98 	return 0;
99 }
100 
101 static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
102 {
103 	struct drm_device *dev = obj->base.dev;
104 
105 	if (!obj->dma_addr) {
106 		DRM_DEBUG_KMS("dma_addr is invalid.\n");
107 		return;
108 	}
109 
110 	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
111 			(unsigned long)obj->dma_addr, obj->size);
112 
113 	if (!is_drm_iommu_supported(dev)) {
114 		dma_free_attrs(dev->dev, obj->size, obj->cookie,
115 				(dma_addr_t)obj->dma_addr, &obj->dma_attrs);
116 		drm_free_large(obj->pages);
117 	} else
118 		dma_free_attrs(dev->dev, obj->size, obj->pages,
119 				(dma_addr_t)obj->dma_addr, &obj->dma_attrs);
120 
121 	obj->dma_addr = (dma_addr_t)NULL;
122 }
123 
124 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
125 					struct drm_file *file_priv,
126 					unsigned int *handle)
127 {
128 	int ret;
129 
130 	/*
131 	 * allocate a id of idr table where the obj is registered
132 	 * and handle has the id what user can see.
133 	 */
134 	ret = drm_gem_handle_create(file_priv, obj, handle);
135 	if (ret)
136 		return ret;
137 
138 	DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
139 
140 	/* drop reference from allocate - handle holds it now. */
141 	drm_gem_object_unreference_unlocked(obj);
142 
143 	return 0;
144 }
145 
146 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
147 {
148 	struct drm_gem_object *obj = &exynos_gem_obj->base;
149 
150 	DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
151 
152 	/*
153 	 * do not release memory region from exporter.
154 	 *
155 	 * the region will be released by exporter
156 	 * once dmabuf's refcount becomes 0.
157 	 */
158 	if (obj->import_attach)
159 		goto out;
160 
161 	exynos_drm_free_buf(exynos_gem_obj);
162 
163 out:
164 	drm_gem_free_mmap_offset(obj);
165 
166 	/* release file pointer to gem object. */
167 	drm_gem_object_release(obj);
168 
169 	kfree(exynos_gem_obj);
170 	exynos_gem_obj = NULL;
171 }
172 
173 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
174 						unsigned int gem_handle,
175 						struct drm_file *file_priv)
176 {
177 	struct exynos_drm_gem_obj *exynos_gem_obj;
178 	struct drm_gem_object *obj;
179 
180 	obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
181 	if (!obj) {
182 		DRM_ERROR("failed to lookup gem object.\n");
183 		return 0;
184 	}
185 
186 	exynos_gem_obj = to_exynos_gem_obj(obj);
187 
188 	drm_gem_object_unreference_unlocked(obj);
189 
190 	return exynos_gem_obj->size;
191 }
192 
193 
194 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
195 						      unsigned long size)
196 {
197 	struct exynos_drm_gem_obj *exynos_gem_obj;
198 	struct drm_gem_object *obj;
199 	int ret;
200 
201 	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
202 	if (!exynos_gem_obj)
203 		return ERR_PTR(-ENOMEM);
204 
205 	exynos_gem_obj->size = size;
206 	obj = &exynos_gem_obj->base;
207 
208 	ret = drm_gem_object_init(dev, obj, size);
209 	if (ret < 0) {
210 		DRM_ERROR("failed to initialize gem object\n");
211 		kfree(exynos_gem_obj);
212 		return ERR_PTR(ret);
213 	}
214 
215 	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
216 
217 	return exynos_gem_obj;
218 }
219 
220 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
221 						unsigned int flags,
222 						unsigned long size)
223 {
224 	struct exynos_drm_gem_obj *exynos_gem_obj;
225 	int ret;
226 
227 	if (flags & ~(EXYNOS_BO_MASK)) {
228 		DRM_ERROR("invalid flags.\n");
229 		return ERR_PTR(-EINVAL);
230 	}
231 
232 	if (!size) {
233 		DRM_ERROR("invalid size.\n");
234 		return ERR_PTR(-EINVAL);
235 	}
236 
237 	size = roundup(size, PAGE_SIZE);
238 
239 	exynos_gem_obj = exynos_drm_gem_init(dev, size);
240 	if (IS_ERR(exynos_gem_obj))
241 		return exynos_gem_obj;
242 
243 	/* set memory type and cache attribute from user side. */
244 	exynos_gem_obj->flags = flags;
245 
246 	ret = exynos_drm_alloc_buf(exynos_gem_obj);
247 	if (ret < 0) {
248 		drm_gem_object_release(&exynos_gem_obj->base);
249 		kfree(exynos_gem_obj);
250 		return ERR_PTR(ret);
251 	}
252 
253 	return exynos_gem_obj;
254 }
255 
256 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
257 				struct drm_file *file_priv)
258 {
259 	struct drm_exynos_gem_create *args = data;
260 	struct exynos_drm_gem_obj *exynos_gem_obj;
261 	int ret;
262 
263 	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
264 	if (IS_ERR(exynos_gem_obj))
265 		return PTR_ERR(exynos_gem_obj);
266 
267 	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
268 			&args->handle);
269 	if (ret) {
270 		exynos_drm_gem_destroy(exynos_gem_obj);
271 		return ret;
272 	}
273 
274 	return 0;
275 }
276 
277 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
278 					unsigned int gem_handle,
279 					struct drm_file *filp)
280 {
281 	struct exynos_drm_gem_obj *exynos_gem_obj;
282 	struct drm_gem_object *obj;
283 
284 	obj = drm_gem_object_lookup(dev, filp, gem_handle);
285 	if (!obj) {
286 		DRM_ERROR("failed to lookup gem object.\n");
287 		return ERR_PTR(-EINVAL);
288 	}
289 
290 	exynos_gem_obj = to_exynos_gem_obj(obj);
291 
292 	return &exynos_gem_obj->dma_addr;
293 }
294 
295 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
296 					unsigned int gem_handle,
297 					struct drm_file *filp)
298 {
299 	struct drm_gem_object *obj;
300 
301 	obj = drm_gem_object_lookup(dev, filp, gem_handle);
302 	if (!obj) {
303 		DRM_ERROR("failed to lookup gem object.\n");
304 		return;
305 	}
306 
307 	drm_gem_object_unreference_unlocked(obj);
308 
309 	/*
310 	 * decrease obj->refcount one more time because we has already
311 	 * increased it at exynos_drm_gem_get_dma_addr().
312 	 */
313 	drm_gem_object_unreference_unlocked(obj);
314 }
315 
316 int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
317 				      struct vm_area_struct *vma)
318 {
319 	struct drm_device *drm_dev = exynos_gem_obj->base.dev;
320 	unsigned long vm_size;
321 	int ret;
322 
323 	vma->vm_flags &= ~VM_PFNMAP;
324 	vma->vm_pgoff = 0;
325 
326 	vm_size = vma->vm_end - vma->vm_start;
327 
328 	/* check if user-requested size is valid. */
329 	if (vm_size > exynos_gem_obj->size)
330 		return -EINVAL;
331 
332 	ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages,
333 				exynos_gem_obj->dma_addr, exynos_gem_obj->size,
334 				&exynos_gem_obj->dma_attrs);
335 	if (ret < 0) {
336 		DRM_ERROR("failed to mmap.\n");
337 		return ret;
338 	}
339 
340 	return 0;
341 }
342 
343 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
344 				      struct drm_file *file_priv)
345 {	struct exynos_drm_gem_obj *exynos_gem_obj;
346 	struct drm_exynos_gem_info *args = data;
347 	struct drm_gem_object *obj;
348 
349 	mutex_lock(&dev->struct_mutex);
350 
351 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
352 	if (!obj) {
353 		DRM_ERROR("failed to lookup gem object.\n");
354 		mutex_unlock(&dev->struct_mutex);
355 		return -EINVAL;
356 	}
357 
358 	exynos_gem_obj = to_exynos_gem_obj(obj);
359 
360 	args->flags = exynos_gem_obj->flags;
361 	args->size = exynos_gem_obj->size;
362 
363 	drm_gem_object_unreference(obj);
364 	mutex_unlock(&dev->struct_mutex);
365 
366 	return 0;
367 }
368 
369 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
370 				struct sg_table *sgt,
371 				enum dma_data_direction dir)
372 {
373 	int nents;
374 
375 	mutex_lock(&drm_dev->struct_mutex);
376 
377 	nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
378 	if (!nents) {
379 		DRM_ERROR("failed to map sgl with dma.\n");
380 		mutex_unlock(&drm_dev->struct_mutex);
381 		return nents;
382 	}
383 
384 	mutex_unlock(&drm_dev->struct_mutex);
385 	return 0;
386 }
387 
388 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
389 				struct sg_table *sgt,
390 				enum dma_data_direction dir)
391 {
392 	dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
393 }
394 
395 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
396 {
397 	exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
398 }
399 
400 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
401 			       struct drm_device *dev,
402 			       struct drm_mode_create_dumb *args)
403 {
404 	struct exynos_drm_gem_obj *exynos_gem_obj;
405 	int ret;
406 
407 	/*
408 	 * allocate memory to be used for framebuffer.
409 	 * - this callback would be called by user application
410 	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
411 	 */
412 
413 	args->pitch = args->width * ((args->bpp + 7) / 8);
414 	args->size = args->pitch * args->height;
415 
416 	if (is_drm_iommu_supported(dev)) {
417 		exynos_gem_obj = exynos_drm_gem_create(dev,
418 			EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
419 			args->size);
420 	} else {
421 		exynos_gem_obj = exynos_drm_gem_create(dev,
422 			EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
423 			args->size);
424 	}
425 
426 	if (IS_ERR(exynos_gem_obj)) {
427 		dev_warn(dev->dev, "FB allocation failed.\n");
428 		return PTR_ERR(exynos_gem_obj);
429 	}
430 
431 	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
432 			&args->handle);
433 	if (ret) {
434 		exynos_drm_gem_destroy(exynos_gem_obj);
435 		return ret;
436 	}
437 
438 	return 0;
439 }
440 
441 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
442 				   struct drm_device *dev, uint32_t handle,
443 				   uint64_t *offset)
444 {
445 	struct drm_gem_object *obj;
446 	int ret = 0;
447 
448 	mutex_lock(&dev->struct_mutex);
449 
450 	/*
451 	 * get offset of memory allocated for drm framebuffer.
452 	 * - this callback would be called by user application
453 	 *	with DRM_IOCTL_MODE_MAP_DUMB command.
454 	 */
455 
456 	obj = drm_gem_object_lookup(dev, file_priv, handle);
457 	if (!obj) {
458 		DRM_ERROR("failed to lookup gem object.\n");
459 		ret = -EINVAL;
460 		goto unlock;
461 	}
462 
463 	ret = drm_gem_create_mmap_offset(obj);
464 	if (ret)
465 		goto out;
466 
467 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
468 	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
469 
470 out:
471 	drm_gem_object_unreference(obj);
472 unlock:
473 	mutex_unlock(&dev->struct_mutex);
474 	return ret;
475 }
476 
477 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
478 {
479 	struct drm_gem_object *obj = vma->vm_private_data;
480 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
481 	unsigned long pfn;
482 	pgoff_t page_offset;
483 	int ret;
484 
485 	page_offset = ((unsigned long)vmf->virtual_address -
486 			vma->vm_start) >> PAGE_SHIFT;
487 
488 	if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) {
489 		DRM_ERROR("invalid page offset\n");
490 		ret = -EINVAL;
491 		goto out;
492 	}
493 
494 	pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]);
495 	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
496 
497 out:
498 	switch (ret) {
499 	case 0:
500 	case -ERESTARTSYS:
501 	case -EINTR:
502 		return VM_FAULT_NOPAGE;
503 	case -ENOMEM:
504 		return VM_FAULT_OOM;
505 	default:
506 		return VM_FAULT_SIGBUS;
507 	}
508 }
509 
510 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
511 {
512 	struct exynos_drm_gem_obj *exynos_gem_obj;
513 	struct drm_gem_object *obj;
514 	int ret;
515 
516 	/* set vm_area_struct. */
517 	ret = drm_gem_mmap(filp, vma);
518 	if (ret < 0) {
519 		DRM_ERROR("failed to mmap.\n");
520 		return ret;
521 	}
522 
523 	obj = vma->vm_private_data;
524 	exynos_gem_obj = to_exynos_gem_obj(obj);
525 
526 	DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj->flags);
527 
528 	/* non-cachable as default. */
529 	if (exynos_gem_obj->flags & EXYNOS_BO_CACHABLE)
530 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
531 	else if (exynos_gem_obj->flags & EXYNOS_BO_WC)
532 		vma->vm_page_prot =
533 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
534 	else
535 		vma->vm_page_prot =
536 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
537 
538 	ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
539 	if (ret)
540 		goto err_close_vm;
541 
542 	return ret;
543 
544 err_close_vm:
545 	drm_gem_vm_close(vma);
546 	drm_gem_free_mmap_offset(obj);
547 
548 	return ret;
549 }
550 
551 /* low-level interface prime helpers */
552 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
553 {
554 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
555 	int npages;
556 
557 	npages = exynos_gem_obj->size >> PAGE_SHIFT;
558 
559 	return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages);
560 }
561 
562 struct drm_gem_object *
563 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
564 				     struct dma_buf_attachment *attach,
565 				     struct sg_table *sgt)
566 {
567 	struct exynos_drm_gem_obj *exynos_gem_obj;
568 	int npages;
569 	int ret;
570 
571 	exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size);
572 	if (IS_ERR(exynos_gem_obj)) {
573 		ret = PTR_ERR(exynos_gem_obj);
574 		return ERR_PTR(ret);
575 	}
576 
577 	exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl);
578 
579 	npages = exynos_gem_obj->size >> PAGE_SHIFT;
580 	exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
581 	if (!exynos_gem_obj->pages) {
582 		ret = -ENOMEM;
583 		goto err;
584 	}
585 
586 	ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL,
587 			npages);
588 	if (ret < 0)
589 		goto err_free_large;
590 
591 	if (sgt->nents == 1) {
592 		/* always physically continuous memory if sgt->nents is 1. */
593 		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
594 	} else {
595 		/*
596 		 * this case could be CONTIG or NONCONTIG type but for now
597 		 * sets NONCONTIG.
598 		 * TODO. we have to find a way that exporter can notify
599 		 * the type of its own buffer to importer.
600 		 */
601 		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
602 	}
603 
604 	return &exynos_gem_obj->base;
605 
606 err_free_large:
607 	drm_free_large(exynos_gem_obj->pages);
608 err:
609 	drm_gem_object_release(&exynos_gem_obj->base);
610 	kfree(exynos_gem_obj);
611 	return ERR_PTR(ret);
612 }
613 
614 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
615 {
616 	return NULL;
617 }
618 
619 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
620 {
621 	/* Nothing to do */
622 }
623