xref: /linux/drivers/gpu/drm/exynos/exynos_drm_gem.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11 
12 #include <drm/drmP.h>
13 #include <drm/drm_vma_manager.h>
14 
15 #include <linux/shmem_fs.h>
16 #include <linux/dma-buf.h>
17 #include <linux/pfn_t.h>
18 #include <drm/exynos_drm.h>
19 
20 #include "exynos_drm_drv.h"
21 #include "exynos_drm_gem.h"
22 #include "exynos_drm_iommu.h"
23 
24 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
25 {
26 	struct drm_device *dev = exynos_gem->base.dev;
27 	unsigned long attr;
28 	unsigned int nr_pages;
29 	struct sg_table sgt;
30 	int ret = -ENOMEM;
31 
32 	if (exynos_gem->dma_addr) {
33 		DRM_DEBUG_KMS("already allocated.\n");
34 		return 0;
35 	}
36 
37 	exynos_gem->dma_attrs = 0;
38 
39 	/*
40 	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
41 	 * region will be allocated else physically contiguous
42 	 * as possible.
43 	 */
44 	if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
45 		exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
46 
47 	/*
48 	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
49 	 * else cachable mapping.
50 	 */
51 	if (exynos_gem->flags & EXYNOS_BO_WC ||
52 			!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
53 		attr = DMA_ATTR_WRITE_COMBINE;
54 	else
55 		attr = DMA_ATTR_NON_CONSISTENT;
56 
57 	exynos_gem->dma_attrs |= attr;
58 	exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
59 
60 	nr_pages = exynos_gem->size >> PAGE_SHIFT;
61 
62 	exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
63 			GFP_KERNEL | __GFP_ZERO);
64 	if (!exynos_gem->pages) {
65 		DRM_ERROR("failed to allocate pages.\n");
66 		return -ENOMEM;
67 	}
68 
69 	exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
70 					     &exynos_gem->dma_addr, GFP_KERNEL,
71 					     exynos_gem->dma_attrs);
72 	if (!exynos_gem->cookie) {
73 		DRM_ERROR("failed to allocate buffer.\n");
74 		goto err_free;
75 	}
76 
77 	ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
78 				    exynos_gem->dma_addr, exynos_gem->size,
79 				    exynos_gem->dma_attrs);
80 	if (ret < 0) {
81 		DRM_ERROR("failed to get sgtable.\n");
82 		goto err_dma_free;
83 	}
84 
85 	if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
86 					     nr_pages)) {
87 		DRM_ERROR("invalid sgtable.\n");
88 		ret = -EINVAL;
89 		goto err_sgt_free;
90 	}
91 
92 	sg_free_table(&sgt);
93 
94 	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
95 			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
96 
97 	return 0;
98 
99 err_sgt_free:
100 	sg_free_table(&sgt);
101 err_dma_free:
102 	dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
103 		       exynos_gem->dma_addr, exynos_gem->dma_attrs);
104 err_free:
105 	kvfree(exynos_gem->pages);
106 
107 	return ret;
108 }
109 
110 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
111 {
112 	struct drm_device *dev = exynos_gem->base.dev;
113 
114 	if (!exynos_gem->dma_addr) {
115 		DRM_DEBUG_KMS("dma_addr is invalid.\n");
116 		return;
117 	}
118 
119 	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
120 			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
121 
122 	dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
123 			(dma_addr_t)exynos_gem->dma_addr,
124 			exynos_gem->dma_attrs);
125 
126 	kvfree(exynos_gem->pages);
127 }
128 
129 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
130 					struct drm_file *file_priv,
131 					unsigned int *handle)
132 {
133 	int ret;
134 
135 	/*
136 	 * allocate a id of idr table where the obj is registered
137 	 * and handle has the id what user can see.
138 	 */
139 	ret = drm_gem_handle_create(file_priv, obj, handle);
140 	if (ret)
141 		return ret;
142 
143 	DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
144 
145 	/* drop reference from allocate - handle holds it now. */
146 	drm_gem_object_put_unlocked(obj);
147 
148 	return 0;
149 }
150 
151 void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
152 {
153 	struct drm_gem_object *obj = &exynos_gem->base;
154 
155 	DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
156 
157 	/*
158 	 * do not release memory region from exporter.
159 	 *
160 	 * the region will be released by exporter
161 	 * once dmabuf's refcount becomes 0.
162 	 */
163 	if (obj->import_attach)
164 		drm_prime_gem_destroy(obj, exynos_gem->sgt);
165 	else
166 		exynos_drm_free_buf(exynos_gem);
167 
168 	/* release file pointer to gem object. */
169 	drm_gem_object_release(obj);
170 
171 	kfree(exynos_gem);
172 }
173 
174 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
175 						  unsigned long size)
176 {
177 	struct exynos_drm_gem *exynos_gem;
178 	struct drm_gem_object *obj;
179 	int ret;
180 
181 	exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
182 	if (!exynos_gem)
183 		return ERR_PTR(-ENOMEM);
184 
185 	exynos_gem->size = size;
186 	obj = &exynos_gem->base;
187 
188 	ret = drm_gem_object_init(dev, obj, size);
189 	if (ret < 0) {
190 		DRM_ERROR("failed to initialize gem object\n");
191 		kfree(exynos_gem);
192 		return ERR_PTR(ret);
193 	}
194 
195 	ret = drm_gem_create_mmap_offset(obj);
196 	if (ret < 0) {
197 		drm_gem_object_release(obj);
198 		kfree(exynos_gem);
199 		return ERR_PTR(ret);
200 	}
201 
202 	DRM_DEBUG_KMS("created file object = %pK\n", obj->filp);
203 
204 	return exynos_gem;
205 }
206 
207 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
208 					     unsigned int flags,
209 					     unsigned long size)
210 {
211 	struct exynos_drm_gem *exynos_gem;
212 	int ret;
213 
214 	if (flags & ~(EXYNOS_BO_MASK)) {
215 		DRM_ERROR("invalid GEM buffer flags: %u\n", flags);
216 		return ERR_PTR(-EINVAL);
217 	}
218 
219 	if (!size) {
220 		DRM_ERROR("invalid GEM buffer size: %lu\n", size);
221 		return ERR_PTR(-EINVAL);
222 	}
223 
224 	size = roundup(size, PAGE_SIZE);
225 
226 	exynos_gem = exynos_drm_gem_init(dev, size);
227 	if (IS_ERR(exynos_gem))
228 		return exynos_gem;
229 
230 	if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
231 		/*
232 		 * when no IOMMU is available, all allocated buffers are
233 		 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
234 		 */
235 		flags &= ~EXYNOS_BO_NONCONTIG;
236 		DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
237 	}
238 
239 	/* set memory type and cache attribute from user side. */
240 	exynos_gem->flags = flags;
241 
242 	ret = exynos_drm_alloc_buf(exynos_gem);
243 	if (ret < 0) {
244 		drm_gem_object_release(&exynos_gem->base);
245 		kfree(exynos_gem);
246 		return ERR_PTR(ret);
247 	}
248 
249 	return exynos_gem;
250 }
251 
252 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
253 				struct drm_file *file_priv)
254 {
255 	struct drm_exynos_gem_create *args = data;
256 	struct exynos_drm_gem *exynos_gem;
257 	int ret;
258 
259 	exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
260 	if (IS_ERR(exynos_gem))
261 		return PTR_ERR(exynos_gem);
262 
263 	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
264 					   &args->handle);
265 	if (ret) {
266 		exynos_drm_gem_destroy(exynos_gem);
267 		return ret;
268 	}
269 
270 	return 0;
271 }
272 
273 int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
274 			     struct drm_file *file_priv)
275 {
276 	struct drm_exynos_gem_map *args = data;
277 
278 	return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
279 				       &args->offset);
280 }
281 
282 struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
283 					  unsigned int gem_handle)
284 {
285 	struct drm_gem_object *obj;
286 
287 	obj = drm_gem_object_lookup(filp, gem_handle);
288 	if (!obj)
289 		return NULL;
290 	return to_exynos_gem(obj);
291 }
292 
293 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
294 				      struct vm_area_struct *vma)
295 {
296 	struct drm_device *drm_dev = exynos_gem->base.dev;
297 	unsigned long vm_size;
298 	int ret;
299 
300 	vma->vm_flags &= ~VM_PFNMAP;
301 	vma->vm_pgoff = 0;
302 
303 	vm_size = vma->vm_end - vma->vm_start;
304 
305 	/* check if user-requested size is valid. */
306 	if (vm_size > exynos_gem->size)
307 		return -EINVAL;
308 
309 	ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
310 			     exynos_gem->dma_addr, exynos_gem->size,
311 			     exynos_gem->dma_attrs);
312 	if (ret < 0) {
313 		DRM_ERROR("failed to mmap.\n");
314 		return ret;
315 	}
316 
317 	return 0;
318 }
319 
320 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
321 				      struct drm_file *file_priv)
322 {
323 	struct exynos_drm_gem *exynos_gem;
324 	struct drm_exynos_gem_info *args = data;
325 	struct drm_gem_object *obj;
326 
327 	obj = drm_gem_object_lookup(file_priv, args->handle);
328 	if (!obj) {
329 		DRM_ERROR("failed to lookup gem object.\n");
330 		return -EINVAL;
331 	}
332 
333 	exynos_gem = to_exynos_gem(obj);
334 
335 	args->flags = exynos_gem->flags;
336 	args->size = exynos_gem->size;
337 
338 	drm_gem_object_put_unlocked(obj);
339 
340 	return 0;
341 }
342 
343 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
344 {
345 	exynos_drm_gem_destroy(to_exynos_gem(obj));
346 }
347 
348 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
349 			       struct drm_device *dev,
350 			       struct drm_mode_create_dumb *args)
351 {
352 	struct exynos_drm_gem *exynos_gem;
353 	unsigned int flags;
354 	int ret;
355 
356 	/*
357 	 * allocate memory to be used for framebuffer.
358 	 * - this callback would be called by user application
359 	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
360 	 */
361 
362 	args->pitch = args->width * ((args->bpp + 7) / 8);
363 	args->size = args->pitch * args->height;
364 
365 	if (is_drm_iommu_supported(dev))
366 		flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
367 	else
368 		flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
369 
370 	exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
371 	if (IS_ERR(exynos_gem)) {
372 		dev_warn(dev->dev, "FB allocation failed.\n");
373 		return PTR_ERR(exynos_gem);
374 	}
375 
376 	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
377 					   &args->handle);
378 	if (ret) {
379 		exynos_drm_gem_destroy(exynos_gem);
380 		return ret;
381 	}
382 
383 	return 0;
384 }
385 
386 vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
387 {
388 	struct vm_area_struct *vma = vmf->vma;
389 	struct drm_gem_object *obj = vma->vm_private_data;
390 	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
391 	unsigned long pfn;
392 	pgoff_t page_offset;
393 
394 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
395 
396 	if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
397 		DRM_ERROR("invalid page offset\n");
398 		return VM_FAULT_SIGBUS;
399 	}
400 
401 	pfn = page_to_pfn(exynos_gem->pages[page_offset]);
402 	return vmf_insert_mixed(vma, vmf->address,
403 			__pfn_to_pfn_t(pfn, PFN_DEV));
404 }
405 
406 static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
407 				   struct vm_area_struct *vma)
408 {
409 	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
410 	int ret;
411 
412 	DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
413 
414 	/* non-cachable as default. */
415 	if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
416 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
417 	else if (exynos_gem->flags & EXYNOS_BO_WC)
418 		vma->vm_page_prot =
419 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
420 	else
421 		vma->vm_page_prot =
422 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
423 
424 	ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
425 	if (ret)
426 		goto err_close_vm;
427 
428 	return ret;
429 
430 err_close_vm:
431 	drm_gem_vm_close(vma);
432 
433 	return ret;
434 }
435 
436 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
437 {
438 	struct drm_gem_object *obj;
439 	int ret;
440 
441 	/* set vm_area_struct. */
442 	ret = drm_gem_mmap(filp, vma);
443 	if (ret < 0) {
444 		DRM_ERROR("failed to mmap.\n");
445 		return ret;
446 	}
447 
448 	obj = vma->vm_private_data;
449 
450 	if (obj->import_attach)
451 		return dma_buf_mmap(obj->dma_buf, vma, 0);
452 
453 	return exynos_drm_gem_mmap_obj(obj, vma);
454 }
455 
456 /* low-level interface prime helpers */
457 struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
458 					    struct dma_buf *dma_buf)
459 {
460 	return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
461 }
462 
463 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
464 {
465 	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
466 	int npages;
467 
468 	npages = exynos_gem->size >> PAGE_SHIFT;
469 
470 	return drm_prime_pages_to_sg(exynos_gem->pages, npages);
471 }
472 
473 struct drm_gem_object *
474 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
475 				     struct dma_buf_attachment *attach,
476 				     struct sg_table *sgt)
477 {
478 	struct exynos_drm_gem *exynos_gem;
479 	int npages;
480 	int ret;
481 
482 	exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
483 	if (IS_ERR(exynos_gem)) {
484 		ret = PTR_ERR(exynos_gem);
485 		return ERR_PTR(ret);
486 	}
487 
488 	exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
489 
490 	npages = exynos_gem->size >> PAGE_SHIFT;
491 	exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
492 	if (!exynos_gem->pages) {
493 		ret = -ENOMEM;
494 		goto err;
495 	}
496 
497 	ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
498 					       npages);
499 	if (ret < 0)
500 		goto err_free_large;
501 
502 	exynos_gem->sgt = sgt;
503 
504 	if (sgt->nents == 1) {
505 		/* always physically continuous memory if sgt->nents is 1. */
506 		exynos_gem->flags |= EXYNOS_BO_CONTIG;
507 	} else {
508 		/*
509 		 * this case could be CONTIG or NONCONTIG type but for now
510 		 * sets NONCONTIG.
511 		 * TODO. we have to find a way that exporter can notify
512 		 * the type of its own buffer to importer.
513 		 */
514 		exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
515 	}
516 
517 	return &exynos_gem->base;
518 
519 err_free_large:
520 	kvfree(exynos_gem->pages);
521 err:
522 	drm_gem_object_release(&exynos_gem->base);
523 	kfree(exynos_gem);
524 	return ERR_PTR(ret);
525 }
526 
527 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
528 {
529 	return NULL;
530 }
531 
532 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
533 {
534 	/* Nothing to do */
535 }
536 
537 int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
538 			      struct vm_area_struct *vma)
539 {
540 	int ret;
541 
542 	ret = drm_gem_mmap_obj(obj, obj->size, vma);
543 	if (ret < 0)
544 		return ret;
545 
546 	return exynos_drm_gem_mmap_obj(obj, vma);
547 }
548