xref: /linux/drivers/gpu/drm/tegra/gem.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * NVIDIA Tegra DRM GEM helper functions
4  *
5  * Copyright (C) 2012 Sascha Hauer, Pengutronix
6  * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
7  *
8  * Based on the GEM/CMA helpers
9  *
10  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
11  */
12 
13 #include <linux/dma-buf.h>
14 #include <linux/iommu.h>
15 
16 #include <drm/drm_drv.h>
17 #include <drm/drm_prime.h>
18 #include <drm/tegra_drm.h>
19 
20 #include "drm.h"
21 #include "gem.h"
22 
23 static void tegra_bo_put(struct host1x_bo *bo)
24 {
25 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
26 
27 	drm_gem_object_put_unlocked(&obj->gem);
28 }
29 
30 /* XXX move this into lib/scatterlist.c? */
31 static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
32 				  unsigned int nents, gfp_t gfp_mask)
33 {
34 	struct scatterlist *dst;
35 	unsigned int i;
36 	int err;
37 
38 	err = sg_alloc_table(sgt, nents, gfp_mask);
39 	if (err < 0)
40 		return err;
41 
42 	dst = sgt->sgl;
43 
44 	for (i = 0; i < nents; i++) {
45 		sg_set_page(dst, sg_page(sg), sg->length, 0);
46 		dst = sg_next(dst);
47 		sg = sg_next(sg);
48 	}
49 
50 	return 0;
51 }
52 
53 static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
54 				     dma_addr_t *phys)
55 {
56 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
57 	struct sg_table *sgt;
58 	int err;
59 
60 	/*
61 	 * If we've manually mapped the buffer object through the IOMMU, make
62 	 * sure to return the IOVA address of our mapping.
63 	 */
64 	if (phys && obj->mm) {
65 		*phys = obj->iova;
66 		return NULL;
67 	}
68 
69 	/*
70 	 * If we don't have a mapping for this buffer yet, return an SG table
71 	 * so that host1x can do the mapping for us via the DMA API.
72 	 */
73 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
74 	if (!sgt)
75 		return ERR_PTR(-ENOMEM);
76 
77 	if (obj->pages) {
78 		/*
79 		 * If the buffer object was allocated from the explicit IOMMU
80 		 * API code paths, construct an SG table from the pages.
81 		 */
82 		err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
83 						0, obj->gem.size, GFP_KERNEL);
84 		if (err < 0)
85 			goto free;
86 	} else if (obj->sgt) {
87 		/*
88 		 * If the buffer object already has an SG table but no pages
89 		 * were allocated for it, it means the buffer was imported and
90 		 * the SG table needs to be copied to avoid overwriting any
91 		 * other potential users of the original SG table.
92 		 */
93 		err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents,
94 					     GFP_KERNEL);
95 		if (err < 0)
96 			goto free;
97 	} else {
98 		/*
99 		 * If the buffer object had no pages allocated and if it was
100 		 * not imported, it had to be allocated with the DMA API, so
101 		 * the DMA API helper can be used.
102 		 */
103 		err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
104 				      obj->gem.size);
105 		if (err < 0)
106 			goto free;
107 	}
108 
109 	return sgt;
110 
111 free:
112 	kfree(sgt);
113 	return ERR_PTR(err);
114 }
115 
116 static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
117 {
118 	if (sgt) {
119 		sg_free_table(sgt);
120 		kfree(sgt);
121 	}
122 }
123 
124 static void *tegra_bo_mmap(struct host1x_bo *bo)
125 {
126 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
127 
128 	if (obj->vaddr)
129 		return obj->vaddr;
130 	else if (obj->gem.import_attach)
131 		return dma_buf_vmap(obj->gem.import_attach->dmabuf);
132 	else
133 		return vmap(obj->pages, obj->num_pages, VM_MAP,
134 			    pgprot_writecombine(PAGE_KERNEL));
135 }
136 
137 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
138 {
139 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
140 
141 	if (obj->vaddr)
142 		return;
143 	else if (obj->gem.import_attach)
144 		dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
145 	else
146 		vunmap(addr);
147 }
148 
149 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
150 {
151 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
152 
153 	drm_gem_object_get(&obj->gem);
154 
155 	return bo;
156 }
157 
158 static const struct host1x_bo_ops tegra_bo_ops = {
159 	.get = tegra_bo_get,
160 	.put = tegra_bo_put,
161 	.pin = tegra_bo_pin,
162 	.unpin = tegra_bo_unpin,
163 	.mmap = tegra_bo_mmap,
164 	.munmap = tegra_bo_munmap,
165 };
166 
167 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
168 {
169 	int prot = IOMMU_READ | IOMMU_WRITE;
170 	int err;
171 
172 	if (bo->mm)
173 		return -EBUSY;
174 
175 	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
176 	if (!bo->mm)
177 		return -ENOMEM;
178 
179 	mutex_lock(&tegra->mm_lock);
180 
181 	err = drm_mm_insert_node_generic(&tegra->mm,
182 					 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
183 	if (err < 0) {
184 		dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
185 			err);
186 		goto unlock;
187 	}
188 
189 	bo->iova = bo->mm->start;
190 
191 	bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
192 				bo->sgt->nents, prot);
193 	if (!bo->size) {
194 		dev_err(tegra->drm->dev, "failed to map buffer\n");
195 		err = -ENOMEM;
196 		goto remove;
197 	}
198 
199 	mutex_unlock(&tegra->mm_lock);
200 
201 	return 0;
202 
203 remove:
204 	drm_mm_remove_node(bo->mm);
205 unlock:
206 	mutex_unlock(&tegra->mm_lock);
207 	kfree(bo->mm);
208 	return err;
209 }
210 
211 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
212 {
213 	if (!bo->mm)
214 		return 0;
215 
216 	mutex_lock(&tegra->mm_lock);
217 	iommu_unmap(tegra->domain, bo->iova, bo->size);
218 	drm_mm_remove_node(bo->mm);
219 	mutex_unlock(&tegra->mm_lock);
220 
221 	kfree(bo->mm);
222 
223 	return 0;
224 }
225 
226 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
227 					      size_t size)
228 {
229 	struct tegra_bo *bo;
230 	int err;
231 
232 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
233 	if (!bo)
234 		return ERR_PTR(-ENOMEM);
235 
236 	host1x_bo_init(&bo->base, &tegra_bo_ops);
237 	size = round_up(size, PAGE_SIZE);
238 
239 	err = drm_gem_object_init(drm, &bo->gem, size);
240 	if (err < 0)
241 		goto free;
242 
243 	err = drm_gem_create_mmap_offset(&bo->gem);
244 	if (err < 0)
245 		goto release;
246 
247 	return bo;
248 
249 release:
250 	drm_gem_object_release(&bo->gem);
251 free:
252 	kfree(bo);
253 	return ERR_PTR(err);
254 }
255 
256 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
257 {
258 	if (bo->pages) {
259 		dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
260 			     DMA_FROM_DEVICE);
261 		drm_gem_put_pages(&bo->gem, bo->pages, true, true);
262 		sg_free_table(bo->sgt);
263 		kfree(bo->sgt);
264 	} else if (bo->vaddr) {
265 		dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
266 	}
267 }
268 
269 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
270 {
271 	int err;
272 
273 	bo->pages = drm_gem_get_pages(&bo->gem);
274 	if (IS_ERR(bo->pages))
275 		return PTR_ERR(bo->pages);
276 
277 	bo->num_pages = bo->gem.size >> PAGE_SHIFT;
278 
279 	bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
280 	if (IS_ERR(bo->sgt)) {
281 		err = PTR_ERR(bo->sgt);
282 		goto put_pages;
283 	}
284 
285 	err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
286 			 DMA_FROM_DEVICE);
287 	if (err == 0) {
288 		err = -EFAULT;
289 		goto free_sgt;
290 	}
291 
292 	return 0;
293 
294 free_sgt:
295 	sg_free_table(bo->sgt);
296 	kfree(bo->sgt);
297 put_pages:
298 	drm_gem_put_pages(&bo->gem, bo->pages, false, false);
299 	return err;
300 }
301 
302 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
303 {
304 	struct tegra_drm *tegra = drm->dev_private;
305 	int err;
306 
307 	if (tegra->domain) {
308 		err = tegra_bo_get_pages(drm, bo);
309 		if (err < 0)
310 			return err;
311 
312 		err = tegra_bo_iommu_map(tegra, bo);
313 		if (err < 0) {
314 			tegra_bo_free(drm, bo);
315 			return err;
316 		}
317 	} else {
318 		size_t size = bo->gem.size;
319 
320 		bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
321 					 GFP_KERNEL | __GFP_NOWARN);
322 		if (!bo->vaddr) {
323 			dev_err(drm->dev,
324 				"failed to allocate buffer of size %zu\n",
325 				size);
326 			return -ENOMEM;
327 		}
328 	}
329 
330 	return 0;
331 }
332 
333 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
334 				 unsigned long flags)
335 {
336 	struct tegra_bo *bo;
337 	int err;
338 
339 	bo = tegra_bo_alloc_object(drm, size);
340 	if (IS_ERR(bo))
341 		return bo;
342 
343 	err = tegra_bo_alloc(drm, bo);
344 	if (err < 0)
345 		goto release;
346 
347 	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
348 		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
349 
350 	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
351 		bo->flags |= TEGRA_BO_BOTTOM_UP;
352 
353 	return bo;
354 
355 release:
356 	drm_gem_object_release(&bo->gem);
357 	kfree(bo);
358 	return ERR_PTR(err);
359 }
360 
361 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
362 					     struct drm_device *drm,
363 					     size_t size,
364 					     unsigned long flags,
365 					     u32 *handle)
366 {
367 	struct tegra_bo *bo;
368 	int err;
369 
370 	bo = tegra_bo_create(drm, size, flags);
371 	if (IS_ERR(bo))
372 		return bo;
373 
374 	err = drm_gem_handle_create(file, &bo->gem, handle);
375 	if (err) {
376 		tegra_bo_free_object(&bo->gem);
377 		return ERR_PTR(err);
378 	}
379 
380 	drm_gem_object_put_unlocked(&bo->gem);
381 
382 	return bo;
383 }
384 
385 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
386 					struct dma_buf *buf)
387 {
388 	struct tegra_drm *tegra = drm->dev_private;
389 	struct dma_buf_attachment *attach;
390 	struct tegra_bo *bo;
391 	int err;
392 
393 	bo = tegra_bo_alloc_object(drm, buf->size);
394 	if (IS_ERR(bo))
395 		return bo;
396 
397 	attach = dma_buf_attach(buf, drm->dev);
398 	if (IS_ERR(attach)) {
399 		err = PTR_ERR(attach);
400 		goto free;
401 	}
402 
403 	get_dma_buf(buf);
404 
405 	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
406 	if (IS_ERR(bo->sgt)) {
407 		err = PTR_ERR(bo->sgt);
408 		goto detach;
409 	}
410 
411 	if (tegra->domain) {
412 		err = tegra_bo_iommu_map(tegra, bo);
413 		if (err < 0)
414 			goto detach;
415 	}
416 
417 	bo->gem.import_attach = attach;
418 
419 	return bo;
420 
421 detach:
422 	if (!IS_ERR_OR_NULL(bo->sgt))
423 		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
424 
425 	dma_buf_detach(buf, attach);
426 	dma_buf_put(buf);
427 free:
428 	drm_gem_object_release(&bo->gem);
429 	kfree(bo);
430 	return ERR_PTR(err);
431 }
432 
433 void tegra_bo_free_object(struct drm_gem_object *gem)
434 {
435 	struct tegra_drm *tegra = gem->dev->dev_private;
436 	struct tegra_bo *bo = to_tegra_bo(gem);
437 
438 	if (tegra->domain)
439 		tegra_bo_iommu_unmap(tegra, bo);
440 
441 	if (gem->import_attach) {
442 		dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
443 					 DMA_TO_DEVICE);
444 		drm_prime_gem_destroy(gem, NULL);
445 	} else {
446 		tegra_bo_free(gem->dev, bo);
447 	}
448 
449 	drm_gem_object_release(gem);
450 	kfree(bo);
451 }
452 
453 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
454 			 struct drm_mode_create_dumb *args)
455 {
456 	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
457 	struct tegra_drm *tegra = drm->dev_private;
458 	struct tegra_bo *bo;
459 
460 	args->pitch = round_up(min_pitch, tegra->pitch_align);
461 	args->size = args->pitch * args->height;
462 
463 	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
464 					 &args->handle);
465 	if (IS_ERR(bo))
466 		return PTR_ERR(bo);
467 
468 	return 0;
469 }
470 
471 static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
472 {
473 	struct vm_area_struct *vma = vmf->vma;
474 	struct drm_gem_object *gem = vma->vm_private_data;
475 	struct tegra_bo *bo = to_tegra_bo(gem);
476 	struct page *page;
477 	pgoff_t offset;
478 
479 	if (!bo->pages)
480 		return VM_FAULT_SIGBUS;
481 
482 	offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
483 	page = bo->pages[offset];
484 
485 	return vmf_insert_page(vma, vmf->address, page);
486 }
487 
488 const struct vm_operations_struct tegra_bo_vm_ops = {
489 	.fault = tegra_bo_fault,
490 	.open = drm_gem_vm_open,
491 	.close = drm_gem_vm_close,
492 };
493 
494 int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
495 {
496 	struct tegra_bo *bo = to_tegra_bo(gem);
497 
498 	if (!bo->pages) {
499 		unsigned long vm_pgoff = vma->vm_pgoff;
500 		int err;
501 
502 		/*
503 		 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
504 		 * and set the vm_pgoff (used as a fake buffer offset by DRM)
505 		 * to 0 as we want to map the whole buffer.
506 		 */
507 		vma->vm_flags &= ~VM_PFNMAP;
508 		vma->vm_pgoff = 0;
509 
510 		err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
511 				  gem->size);
512 		if (err < 0) {
513 			drm_gem_vm_close(vma);
514 			return err;
515 		}
516 
517 		vma->vm_pgoff = vm_pgoff;
518 	} else {
519 		pgprot_t prot = vm_get_page_prot(vma->vm_flags);
520 
521 		vma->vm_flags |= VM_MIXEDMAP;
522 		vma->vm_flags &= ~VM_PFNMAP;
523 
524 		vma->vm_page_prot = pgprot_writecombine(prot);
525 	}
526 
527 	return 0;
528 }
529 
530 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
531 {
532 	struct drm_gem_object *gem;
533 	int err;
534 
535 	err = drm_gem_mmap(file, vma);
536 	if (err < 0)
537 		return err;
538 
539 	gem = vma->vm_private_data;
540 
541 	return __tegra_gem_mmap(gem, vma);
542 }
543 
544 static struct sg_table *
545 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
546 			    enum dma_data_direction dir)
547 {
548 	struct drm_gem_object *gem = attach->dmabuf->priv;
549 	struct tegra_bo *bo = to_tegra_bo(gem);
550 	struct sg_table *sgt;
551 
552 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
553 	if (!sgt)
554 		return NULL;
555 
556 	if (bo->pages) {
557 		if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
558 					      0, gem->size, GFP_KERNEL) < 0)
559 			goto free;
560 	} else {
561 		if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
562 				    gem->size) < 0)
563 			goto free;
564 	}
565 
566 	if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
567 		goto free;
568 
569 	return sgt;
570 
571 free:
572 	sg_free_table(sgt);
573 	kfree(sgt);
574 	return NULL;
575 }
576 
577 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
578 					  struct sg_table *sgt,
579 					  enum dma_data_direction dir)
580 {
581 	struct drm_gem_object *gem = attach->dmabuf->priv;
582 	struct tegra_bo *bo = to_tegra_bo(gem);
583 
584 	if (bo->pages)
585 		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
586 
587 	sg_free_table(sgt);
588 	kfree(sgt);
589 }
590 
591 static void tegra_gem_prime_release(struct dma_buf *buf)
592 {
593 	drm_gem_dmabuf_release(buf);
594 }
595 
596 static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
597 					    enum dma_data_direction direction)
598 {
599 	struct drm_gem_object *gem = buf->priv;
600 	struct tegra_bo *bo = to_tegra_bo(gem);
601 	struct drm_device *drm = gem->dev;
602 
603 	if (bo->pages)
604 		dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
605 				    DMA_FROM_DEVICE);
606 
607 	return 0;
608 }
609 
610 static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
611 					  enum dma_data_direction direction)
612 {
613 	struct drm_gem_object *gem = buf->priv;
614 	struct tegra_bo *bo = to_tegra_bo(gem);
615 	struct drm_device *drm = gem->dev;
616 
617 	if (bo->pages)
618 		dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
619 				       DMA_TO_DEVICE);
620 
621 	return 0;
622 }
623 
624 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
625 {
626 	struct drm_gem_object *gem = buf->priv;
627 	int err;
628 
629 	err = drm_gem_mmap_obj(gem, gem->size, vma);
630 	if (err < 0)
631 		return err;
632 
633 	return __tegra_gem_mmap(gem, vma);
634 }
635 
636 static void *tegra_gem_prime_vmap(struct dma_buf *buf)
637 {
638 	struct drm_gem_object *gem = buf->priv;
639 	struct tegra_bo *bo = to_tegra_bo(gem);
640 
641 	return bo->vaddr;
642 }
643 
644 static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
645 {
646 }
647 
648 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
649 	.map_dma_buf = tegra_gem_prime_map_dma_buf,
650 	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
651 	.release = tegra_gem_prime_release,
652 	.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
653 	.end_cpu_access = tegra_gem_prime_end_cpu_access,
654 	.mmap = tegra_gem_prime_mmap,
655 	.vmap = tegra_gem_prime_vmap,
656 	.vunmap = tegra_gem_prime_vunmap,
657 };
658 
659 struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
660 				       int flags)
661 {
662 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
663 
664 	exp_info.exp_name = KBUILD_MODNAME;
665 	exp_info.owner = gem->dev->driver->fops->owner;
666 	exp_info.ops = &tegra_gem_prime_dmabuf_ops;
667 	exp_info.size = gem->size;
668 	exp_info.flags = flags;
669 	exp_info.priv = gem;
670 
671 	return drm_gem_dmabuf_export(gem->dev, &exp_info);
672 }
673 
674 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
675 					      struct dma_buf *buf)
676 {
677 	struct tegra_bo *bo;
678 
679 	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
680 		struct drm_gem_object *gem = buf->priv;
681 
682 		if (gem->dev == drm) {
683 			drm_gem_object_get(gem);
684 			return gem;
685 		}
686 	}
687 
688 	bo = tegra_bo_import(drm, buf);
689 	if (IS_ERR(bo))
690 		return ERR_CAST(bo);
691 
692 	return &bo->gem;
693 }
694