xref: /linux/drivers/gpu/drm/tegra/gem.c (revision 6bde8ef51c917a657476310728d6cb3de6bac9e4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * NVIDIA Tegra DRM GEM helper functions
4  *
5  * Copyright (C) 2012 Sascha Hauer, Pengutronix
6  * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
7  *
8  * Based on the GEM/CMA helpers
9  *
10  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
11  */
12 
13 #include <linux/dma-buf.h>
14 #include <linux/iommu.h>
15 
16 #include <drm/drm_drv.h>
17 #include <drm/drm_prime.h>
18 #include <drm/tegra_drm.h>
19 
20 #include "drm.h"
21 #include "gem.h"
22 
23 static void tegra_bo_put(struct host1x_bo *bo)
24 {
25 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
26 
27 	drm_gem_object_put(&obj->gem);
28 }
29 
30 /* XXX move this into lib/scatterlist.c? */
31 static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
32 				  unsigned int nents, gfp_t gfp_mask)
33 {
34 	struct scatterlist *dst;
35 	unsigned int i;
36 	int err;
37 
38 	err = sg_alloc_table(sgt, nents, gfp_mask);
39 	if (err < 0)
40 		return err;
41 
42 	dst = sgt->sgl;
43 
44 	for (i = 0; i < nents; i++) {
45 		sg_set_page(dst, sg_page(sg), sg->length, 0);
46 		dst = sg_next(dst);
47 		sg = sg_next(sg);
48 	}
49 
50 	return 0;
51 }
52 
53 static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
54 				     dma_addr_t *phys)
55 {
56 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
57 	struct sg_table *sgt;
58 	int err;
59 
60 	/*
61 	 * If we've manually mapped the buffer object through the IOMMU, make
62 	 * sure to return the IOVA address of our mapping.
63 	 *
64 	 * Similarly, for buffers that have been allocated by the DMA API the
65 	 * physical address can be used for devices that are not attached to
66 	 * an IOMMU. For these devices, callers must pass a valid pointer via
67 	 * the @phys argument.
68 	 *
69 	 * Imported buffers were also already mapped at import time, so the
70 	 * existing mapping can be reused.
71 	 */
72 	if (phys) {
73 		*phys = obj->iova;
74 		return NULL;
75 	}
76 
77 	/*
78 	 * If we don't have a mapping for this buffer yet, return an SG table
79 	 * so that host1x can do the mapping for us via the DMA API.
80 	 */
81 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
82 	if (!sgt)
83 		return ERR_PTR(-ENOMEM);
84 
85 	if (obj->pages) {
86 		/*
87 		 * If the buffer object was allocated from the explicit IOMMU
88 		 * API code paths, construct an SG table from the pages.
89 		 */
90 		err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
91 						0, obj->gem.size, GFP_KERNEL);
92 		if (err < 0)
93 			goto free;
94 	} else if (obj->sgt) {
95 		/*
96 		 * If the buffer object already has an SG table but no pages
97 		 * were allocated for it, it means the buffer was imported and
98 		 * the SG table needs to be copied to avoid overwriting any
99 		 * other potential users of the original SG table.
100 		 */
101 		err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents,
102 					     GFP_KERNEL);
103 		if (err < 0)
104 			goto free;
105 	} else {
106 		/*
107 		 * If the buffer object had no pages allocated and if it was
108 		 * not imported, it had to be allocated with the DMA API, so
109 		 * the DMA API helper can be used.
110 		 */
111 		err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
112 				      obj->gem.size);
113 		if (err < 0)
114 			goto free;
115 	}
116 
117 	return sgt;
118 
119 free:
120 	kfree(sgt);
121 	return ERR_PTR(err);
122 }
123 
124 static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
125 {
126 	if (sgt) {
127 		sg_free_table(sgt);
128 		kfree(sgt);
129 	}
130 }
131 
132 static void *tegra_bo_mmap(struct host1x_bo *bo)
133 {
134 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
135 
136 	if (obj->vaddr)
137 		return obj->vaddr;
138 	else if (obj->gem.import_attach)
139 		return dma_buf_vmap(obj->gem.import_attach->dmabuf);
140 	else
141 		return vmap(obj->pages, obj->num_pages, VM_MAP,
142 			    pgprot_writecombine(PAGE_KERNEL));
143 }
144 
145 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
146 {
147 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
148 
149 	if (obj->vaddr)
150 		return;
151 	else if (obj->gem.import_attach)
152 		dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
153 	else
154 		vunmap(addr);
155 }
156 
157 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
158 {
159 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
160 
161 	drm_gem_object_get(&obj->gem);
162 
163 	return bo;
164 }
165 
166 static const struct host1x_bo_ops tegra_bo_ops = {
167 	.get = tegra_bo_get,
168 	.put = tegra_bo_put,
169 	.pin = tegra_bo_pin,
170 	.unpin = tegra_bo_unpin,
171 	.mmap = tegra_bo_mmap,
172 	.munmap = tegra_bo_munmap,
173 };
174 
175 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
176 {
177 	int prot = IOMMU_READ | IOMMU_WRITE;
178 	int err;
179 
180 	if (bo->mm)
181 		return -EBUSY;
182 
183 	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
184 	if (!bo->mm)
185 		return -ENOMEM;
186 
187 	mutex_lock(&tegra->mm_lock);
188 
189 	err = drm_mm_insert_node_generic(&tegra->mm,
190 					 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
191 	if (err < 0) {
192 		dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
193 			err);
194 		goto unlock;
195 	}
196 
197 	bo->iova = bo->mm->start;
198 
199 	bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
200 				bo->sgt->nents, prot);
201 	if (!bo->size) {
202 		dev_err(tegra->drm->dev, "failed to map buffer\n");
203 		err = -ENOMEM;
204 		goto remove;
205 	}
206 
207 	mutex_unlock(&tegra->mm_lock);
208 
209 	return 0;
210 
211 remove:
212 	drm_mm_remove_node(bo->mm);
213 unlock:
214 	mutex_unlock(&tegra->mm_lock);
215 	kfree(bo->mm);
216 	return err;
217 }
218 
219 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
220 {
221 	if (!bo->mm)
222 		return 0;
223 
224 	mutex_lock(&tegra->mm_lock);
225 	iommu_unmap(tegra->domain, bo->iova, bo->size);
226 	drm_mm_remove_node(bo->mm);
227 	mutex_unlock(&tegra->mm_lock);
228 
229 	kfree(bo->mm);
230 
231 	return 0;
232 }
233 
234 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
235 					      size_t size)
236 {
237 	struct tegra_bo *bo;
238 	int err;
239 
240 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
241 	if (!bo)
242 		return ERR_PTR(-ENOMEM);
243 
244 	host1x_bo_init(&bo->base, &tegra_bo_ops);
245 	size = round_up(size, PAGE_SIZE);
246 
247 	err = drm_gem_object_init(drm, &bo->gem, size);
248 	if (err < 0)
249 		goto free;
250 
251 	err = drm_gem_create_mmap_offset(&bo->gem);
252 	if (err < 0)
253 		goto release;
254 
255 	return bo;
256 
257 release:
258 	drm_gem_object_release(&bo->gem);
259 free:
260 	kfree(bo);
261 	return ERR_PTR(err);
262 }
263 
264 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
265 {
266 	if (bo->pages) {
267 		dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
268 			     DMA_FROM_DEVICE);
269 		drm_gem_put_pages(&bo->gem, bo->pages, true, true);
270 		sg_free_table(bo->sgt);
271 		kfree(bo->sgt);
272 	} else if (bo->vaddr) {
273 		dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
274 	}
275 }
276 
277 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
278 {
279 	int err;
280 
281 	bo->pages = drm_gem_get_pages(&bo->gem);
282 	if (IS_ERR(bo->pages))
283 		return PTR_ERR(bo->pages);
284 
285 	bo->num_pages = bo->gem.size >> PAGE_SHIFT;
286 
287 	bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
288 	if (IS_ERR(bo->sgt)) {
289 		err = PTR_ERR(bo->sgt);
290 		goto put_pages;
291 	}
292 
293 	err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
294 			 DMA_FROM_DEVICE);
295 	if (err == 0) {
296 		err = -EFAULT;
297 		goto free_sgt;
298 	}
299 
300 	return 0;
301 
302 free_sgt:
303 	sg_free_table(bo->sgt);
304 	kfree(bo->sgt);
305 put_pages:
306 	drm_gem_put_pages(&bo->gem, bo->pages, false, false);
307 	return err;
308 }
309 
310 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
311 {
312 	struct tegra_drm *tegra = drm->dev_private;
313 	int err;
314 
315 	if (tegra->domain) {
316 		err = tegra_bo_get_pages(drm, bo);
317 		if (err < 0)
318 			return err;
319 
320 		err = tegra_bo_iommu_map(tegra, bo);
321 		if (err < 0) {
322 			tegra_bo_free(drm, bo);
323 			return err;
324 		}
325 	} else {
326 		size_t size = bo->gem.size;
327 
328 		bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
329 					 GFP_KERNEL | __GFP_NOWARN);
330 		if (!bo->vaddr) {
331 			dev_err(drm->dev,
332 				"failed to allocate buffer of size %zu\n",
333 				size);
334 			return -ENOMEM;
335 		}
336 	}
337 
338 	return 0;
339 }
340 
341 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
342 				 unsigned long flags)
343 {
344 	struct tegra_bo *bo;
345 	int err;
346 
347 	bo = tegra_bo_alloc_object(drm, size);
348 	if (IS_ERR(bo))
349 		return bo;
350 
351 	err = tegra_bo_alloc(drm, bo);
352 	if (err < 0)
353 		goto release;
354 
355 	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
356 		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
357 
358 	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
359 		bo->flags |= TEGRA_BO_BOTTOM_UP;
360 
361 	return bo;
362 
363 release:
364 	drm_gem_object_release(&bo->gem);
365 	kfree(bo);
366 	return ERR_PTR(err);
367 }
368 
369 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
370 					     struct drm_device *drm,
371 					     size_t size,
372 					     unsigned long flags,
373 					     u32 *handle)
374 {
375 	struct tegra_bo *bo;
376 	int err;
377 
378 	bo = tegra_bo_create(drm, size, flags);
379 	if (IS_ERR(bo))
380 		return bo;
381 
382 	err = drm_gem_handle_create(file, &bo->gem, handle);
383 	if (err) {
384 		tegra_bo_free_object(&bo->gem);
385 		return ERR_PTR(err);
386 	}
387 
388 	drm_gem_object_put(&bo->gem);
389 
390 	return bo;
391 }
392 
393 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
394 					struct dma_buf *buf)
395 {
396 	struct tegra_drm *tegra = drm->dev_private;
397 	struct dma_buf_attachment *attach;
398 	struct tegra_bo *bo;
399 	int err;
400 
401 	bo = tegra_bo_alloc_object(drm, buf->size);
402 	if (IS_ERR(bo))
403 		return bo;
404 
405 	attach = dma_buf_attach(buf, drm->dev);
406 	if (IS_ERR(attach)) {
407 		err = PTR_ERR(attach);
408 		goto free;
409 	}
410 
411 	get_dma_buf(buf);
412 
413 	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
414 	if (IS_ERR(bo->sgt)) {
415 		err = PTR_ERR(bo->sgt);
416 		goto detach;
417 	}
418 
419 	if (tegra->domain) {
420 		err = tegra_bo_iommu_map(tegra, bo);
421 		if (err < 0)
422 			goto detach;
423 	}
424 
425 	bo->gem.import_attach = attach;
426 
427 	return bo;
428 
429 detach:
430 	if (!IS_ERR_OR_NULL(bo->sgt))
431 		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
432 
433 	dma_buf_detach(buf, attach);
434 	dma_buf_put(buf);
435 free:
436 	drm_gem_object_release(&bo->gem);
437 	kfree(bo);
438 	return ERR_PTR(err);
439 }
440 
441 void tegra_bo_free_object(struct drm_gem_object *gem)
442 {
443 	struct tegra_drm *tegra = gem->dev->dev_private;
444 	struct tegra_bo *bo = to_tegra_bo(gem);
445 
446 	if (tegra->domain)
447 		tegra_bo_iommu_unmap(tegra, bo);
448 
449 	if (gem->import_attach) {
450 		dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
451 					 DMA_TO_DEVICE);
452 		drm_prime_gem_destroy(gem, NULL);
453 	} else {
454 		tegra_bo_free(gem->dev, bo);
455 	}
456 
457 	drm_gem_object_release(gem);
458 	kfree(bo);
459 }
460 
461 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
462 			 struct drm_mode_create_dumb *args)
463 {
464 	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
465 	struct tegra_drm *tegra = drm->dev_private;
466 	struct tegra_bo *bo;
467 
468 	args->pitch = round_up(min_pitch, tegra->pitch_align);
469 	args->size = args->pitch * args->height;
470 
471 	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
472 					 &args->handle);
473 	if (IS_ERR(bo))
474 		return PTR_ERR(bo);
475 
476 	return 0;
477 }
478 
479 static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
480 {
481 	struct vm_area_struct *vma = vmf->vma;
482 	struct drm_gem_object *gem = vma->vm_private_data;
483 	struct tegra_bo *bo = to_tegra_bo(gem);
484 	struct page *page;
485 	pgoff_t offset;
486 
487 	if (!bo->pages)
488 		return VM_FAULT_SIGBUS;
489 
490 	offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
491 	page = bo->pages[offset];
492 
493 	return vmf_insert_page(vma, vmf->address, page);
494 }
495 
496 const struct vm_operations_struct tegra_bo_vm_ops = {
497 	.fault = tegra_bo_fault,
498 	.open = drm_gem_vm_open,
499 	.close = drm_gem_vm_close,
500 };
501 
502 int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
503 {
504 	struct tegra_bo *bo = to_tegra_bo(gem);
505 
506 	if (!bo->pages) {
507 		unsigned long vm_pgoff = vma->vm_pgoff;
508 		int err;
509 
510 		/*
511 		 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
512 		 * and set the vm_pgoff (used as a fake buffer offset by DRM)
513 		 * to 0 as we want to map the whole buffer.
514 		 */
515 		vma->vm_flags &= ~VM_PFNMAP;
516 		vma->vm_pgoff = 0;
517 
518 		err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
519 				  gem->size);
520 		if (err < 0) {
521 			drm_gem_vm_close(vma);
522 			return err;
523 		}
524 
525 		vma->vm_pgoff = vm_pgoff;
526 	} else {
527 		pgprot_t prot = vm_get_page_prot(vma->vm_flags);
528 
529 		vma->vm_flags |= VM_MIXEDMAP;
530 		vma->vm_flags &= ~VM_PFNMAP;
531 
532 		vma->vm_page_prot = pgprot_writecombine(prot);
533 	}
534 
535 	return 0;
536 }
537 
538 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
539 {
540 	struct drm_gem_object *gem;
541 	int err;
542 
543 	err = drm_gem_mmap(file, vma);
544 	if (err < 0)
545 		return err;
546 
547 	gem = vma->vm_private_data;
548 
549 	return __tegra_gem_mmap(gem, vma);
550 }
551 
552 static struct sg_table *
553 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
554 			    enum dma_data_direction dir)
555 {
556 	struct drm_gem_object *gem = attach->dmabuf->priv;
557 	struct tegra_bo *bo = to_tegra_bo(gem);
558 	struct sg_table *sgt;
559 
560 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
561 	if (!sgt)
562 		return NULL;
563 
564 	if (bo->pages) {
565 		if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
566 					      0, gem->size, GFP_KERNEL) < 0)
567 			goto free;
568 	} else {
569 		if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
570 				    gem->size) < 0)
571 			goto free;
572 	}
573 
574 	if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
575 		goto free;
576 
577 	return sgt;
578 
579 free:
580 	sg_free_table(sgt);
581 	kfree(sgt);
582 	return NULL;
583 }
584 
585 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
586 					  struct sg_table *sgt,
587 					  enum dma_data_direction dir)
588 {
589 	struct drm_gem_object *gem = attach->dmabuf->priv;
590 	struct tegra_bo *bo = to_tegra_bo(gem);
591 
592 	if (bo->pages)
593 		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
594 
595 	sg_free_table(sgt);
596 	kfree(sgt);
597 }
598 
599 static void tegra_gem_prime_release(struct dma_buf *buf)
600 {
601 	drm_gem_dmabuf_release(buf);
602 }
603 
604 static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
605 					    enum dma_data_direction direction)
606 {
607 	struct drm_gem_object *gem = buf->priv;
608 	struct tegra_bo *bo = to_tegra_bo(gem);
609 	struct drm_device *drm = gem->dev;
610 
611 	if (bo->pages)
612 		dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
613 				    DMA_FROM_DEVICE);
614 
615 	return 0;
616 }
617 
618 static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
619 					  enum dma_data_direction direction)
620 {
621 	struct drm_gem_object *gem = buf->priv;
622 	struct tegra_bo *bo = to_tegra_bo(gem);
623 	struct drm_device *drm = gem->dev;
624 
625 	if (bo->pages)
626 		dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
627 				       DMA_TO_DEVICE);
628 
629 	return 0;
630 }
631 
632 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
633 {
634 	struct drm_gem_object *gem = buf->priv;
635 	int err;
636 
637 	err = drm_gem_mmap_obj(gem, gem->size, vma);
638 	if (err < 0)
639 		return err;
640 
641 	return __tegra_gem_mmap(gem, vma);
642 }
643 
644 static void *tegra_gem_prime_vmap(struct dma_buf *buf)
645 {
646 	struct drm_gem_object *gem = buf->priv;
647 	struct tegra_bo *bo = to_tegra_bo(gem);
648 
649 	return bo->vaddr;
650 }
651 
652 static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
653 {
654 }
655 
656 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
657 	.map_dma_buf = tegra_gem_prime_map_dma_buf,
658 	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
659 	.release = tegra_gem_prime_release,
660 	.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
661 	.end_cpu_access = tegra_gem_prime_end_cpu_access,
662 	.mmap = tegra_gem_prime_mmap,
663 	.vmap = tegra_gem_prime_vmap,
664 	.vunmap = tegra_gem_prime_vunmap,
665 };
666 
667 struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
668 				       int flags)
669 {
670 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
671 
672 	exp_info.exp_name = KBUILD_MODNAME;
673 	exp_info.owner = gem->dev->driver->fops->owner;
674 	exp_info.ops = &tegra_gem_prime_dmabuf_ops;
675 	exp_info.size = gem->size;
676 	exp_info.flags = flags;
677 	exp_info.priv = gem;
678 
679 	return drm_gem_dmabuf_export(gem->dev, &exp_info);
680 }
681 
682 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
683 					      struct dma_buf *buf)
684 {
685 	struct tegra_bo *bo;
686 
687 	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
688 		struct drm_gem_object *gem = buf->priv;
689 
690 		if (gem->dev == drm) {
691 			drm_gem_object_get(gem);
692 			return gem;
693 		}
694 	}
695 
696 	bo = tegra_bo_import(drm, buf);
697 	if (IS_ERR(bo))
698 		return ERR_CAST(bo);
699 
700 	return &bo->gem;
701 }
702