xref: /linux/drivers/gpu/drm/tegra/gem.c (revision e80a48bade619ec5a92230b3d4ae84bfc2746822)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * NVIDIA Tegra DRM GEM helper functions
4  *
5  * Copyright (C) 2012 Sascha Hauer, Pengutronix
6  * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
7  *
8  * Based on the GEM/CMA helpers
9  *
10  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
11  */
12 
13 #include <linux/dma-buf.h>
14 #include <linux/iommu.h>
15 #include <linux/module.h>
16 
17 #include <drm/drm_drv.h>
18 #include <drm/drm_prime.h>
19 #include <drm/tegra_drm.h>
20 
21 #include "drm.h"
22 #include "gem.h"
23 
24 MODULE_IMPORT_NS(DMA_BUF);
25 
26 static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
27 {
28 	dma_addr_t next = ~(dma_addr_t)0;
29 	unsigned int count = 0, i;
30 	struct scatterlist *s;
31 
32 	for_each_sg(sgl, s, nents, i) {
33 		/* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
34 		if (!sg_dma_len(s))
35 			continue;
36 
37 		if (sg_dma_address(s) != next) {
38 			next = sg_dma_address(s) + sg_dma_len(s);
39 			count++;
40 		}
41 	}
42 
43 	return count;
44 }
45 
46 static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
47 {
48 	return sg_dma_count_chunks(sgt->sgl, sgt->nents);
49 }
50 
51 static void tegra_bo_put(struct host1x_bo *bo)
52 {
53 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
54 
55 	drm_gem_object_put(&obj->gem);
56 }
57 
58 static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
59 					      enum dma_data_direction direction)
60 {
61 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
62 	struct drm_gem_object *gem = &obj->gem;
63 	struct host1x_bo_mapping *map;
64 	int err;
65 
66 	map = kzalloc(sizeof(*map), GFP_KERNEL);
67 	if (!map)
68 		return ERR_PTR(-ENOMEM);
69 
70 	kref_init(&map->ref);
71 	map->bo = host1x_bo_get(bo);
72 	map->direction = direction;
73 	map->dev = dev;
74 
75 	/*
76 	 * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
77 	 */
78 	if (gem->import_attach) {
79 		struct dma_buf *buf = gem->import_attach->dmabuf;
80 
81 		map->attach = dma_buf_attach(buf, dev);
82 		if (IS_ERR(map->attach)) {
83 			err = PTR_ERR(map->attach);
84 			goto free;
85 		}
86 
87 		map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction);
88 		if (IS_ERR(map->sgt)) {
89 			dma_buf_detach(buf, map->attach);
90 			err = PTR_ERR(map->sgt);
91 			map->sgt = NULL;
92 			goto free;
93 		}
94 
95 		err = sgt_dma_count_chunks(map->sgt);
96 		map->size = gem->size;
97 
98 		goto out;
99 	}
100 
101 	/*
102 	 * If we don't have a mapping for this buffer yet, return an SG table
103 	 * so that host1x can do the mapping for us via the DMA API.
104 	 */
105 	map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
106 	if (!map->sgt) {
107 		err = -ENOMEM;
108 		goto free;
109 	}
110 
111 	if (obj->pages) {
112 		/*
113 		 * If the buffer object was allocated from the explicit IOMMU
114 		 * API code paths, construct an SG table from the pages.
115 		 */
116 		err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
117 						GFP_KERNEL);
118 		if (err < 0)
119 			goto free;
120 	} else {
121 		/*
122 		 * If the buffer object had no pages allocated and if it was
123 		 * not imported, it had to be allocated with the DMA API, so
124 		 * the DMA API helper can be used.
125 		 */
126 		err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
127 		if (err < 0)
128 			goto free;
129 	}
130 
131 	err = dma_map_sgtable(dev, map->sgt, direction, 0);
132 	if (err)
133 		goto free_sgt;
134 
135 out:
136 	/*
137 	 * If we've manually mapped the buffer object through the IOMMU, make sure to return the
138 	 * existing IOVA address of our mapping.
139 	 */
140 	if (!obj->mm) {
141 		map->phys = sg_dma_address(map->sgt->sgl);
142 		map->chunks = err;
143 	} else {
144 		map->phys = obj->iova;
145 		map->chunks = 1;
146 	}
147 
148 	map->size = gem->size;
149 
150 	return map;
151 
152 free_sgt:
153 	sg_free_table(map->sgt);
154 free:
155 	kfree(map->sgt);
156 	kfree(map);
157 	return ERR_PTR(err);
158 }
159 
160 static void tegra_bo_unpin(struct host1x_bo_mapping *map)
161 {
162 	if (map->attach) {
163 		dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
164 						  map->direction);
165 		dma_buf_detach(map->attach->dmabuf, map->attach);
166 	} else {
167 		dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
168 		sg_free_table(map->sgt);
169 		kfree(map->sgt);
170 	}
171 
172 	host1x_bo_put(map->bo);
173 	kfree(map);
174 }
175 
176 static void *tegra_bo_mmap(struct host1x_bo *bo)
177 {
178 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
179 	struct iosys_map map;
180 	int ret;
181 
182 	if (obj->vaddr) {
183 		return obj->vaddr;
184 	} else if (obj->gem.import_attach) {
185 		ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map);
186 		return ret ? NULL : map.vaddr;
187 	} else {
188 		return vmap(obj->pages, obj->num_pages, VM_MAP,
189 			    pgprot_writecombine(PAGE_KERNEL));
190 	}
191 }
192 
193 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
194 {
195 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
196 	struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
197 
198 	if (obj->vaddr)
199 		return;
200 	else if (obj->gem.import_attach)
201 		dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map);
202 	else
203 		vunmap(addr);
204 }
205 
206 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
207 {
208 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
209 
210 	drm_gem_object_get(&obj->gem);
211 
212 	return bo;
213 }
214 
215 static const struct host1x_bo_ops tegra_bo_ops = {
216 	.get = tegra_bo_get,
217 	.put = tegra_bo_put,
218 	.pin = tegra_bo_pin,
219 	.unpin = tegra_bo_unpin,
220 	.mmap = tegra_bo_mmap,
221 	.munmap = tegra_bo_munmap,
222 };
223 
224 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
225 {
226 	int prot = IOMMU_READ | IOMMU_WRITE;
227 	int err;
228 
229 	if (bo->mm)
230 		return -EBUSY;
231 
232 	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
233 	if (!bo->mm)
234 		return -ENOMEM;
235 
236 	mutex_lock(&tegra->mm_lock);
237 
238 	err = drm_mm_insert_node_generic(&tegra->mm,
239 					 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
240 	if (err < 0) {
241 		dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
242 			err);
243 		goto unlock;
244 	}
245 
246 	bo->iova = bo->mm->start;
247 
248 	bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
249 	if (!bo->size) {
250 		dev_err(tegra->drm->dev, "failed to map buffer\n");
251 		err = -ENOMEM;
252 		goto remove;
253 	}
254 
255 	mutex_unlock(&tegra->mm_lock);
256 
257 	return 0;
258 
259 remove:
260 	drm_mm_remove_node(bo->mm);
261 unlock:
262 	mutex_unlock(&tegra->mm_lock);
263 	kfree(bo->mm);
264 	return err;
265 }
266 
267 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
268 {
269 	if (!bo->mm)
270 		return 0;
271 
272 	mutex_lock(&tegra->mm_lock);
273 	iommu_unmap(tegra->domain, bo->iova, bo->size);
274 	drm_mm_remove_node(bo->mm);
275 	mutex_unlock(&tegra->mm_lock);
276 
277 	kfree(bo->mm);
278 
279 	return 0;
280 }
281 
282 static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
283 	.free = tegra_bo_free_object,
284 	.export = tegra_gem_prime_export,
285 	.vm_ops = &tegra_bo_vm_ops,
286 };
287 
288 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
289 					      size_t size)
290 {
291 	struct tegra_bo *bo;
292 	int err;
293 
294 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
295 	if (!bo)
296 		return ERR_PTR(-ENOMEM);
297 
298 	bo->gem.funcs = &tegra_gem_object_funcs;
299 
300 	host1x_bo_init(&bo->base, &tegra_bo_ops);
301 	size = round_up(size, PAGE_SIZE);
302 
303 	err = drm_gem_object_init(drm, &bo->gem, size);
304 	if (err < 0)
305 		goto free;
306 
307 	err = drm_gem_create_mmap_offset(&bo->gem);
308 	if (err < 0)
309 		goto release;
310 
311 	return bo;
312 
313 release:
314 	drm_gem_object_release(&bo->gem);
315 free:
316 	kfree(bo);
317 	return ERR_PTR(err);
318 }
319 
320 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
321 {
322 	if (bo->pages) {
323 		dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
324 		drm_gem_put_pages(&bo->gem, bo->pages, true, true);
325 		sg_free_table(bo->sgt);
326 		kfree(bo->sgt);
327 	} else if (bo->vaddr) {
328 		dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
329 	}
330 }
331 
332 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
333 {
334 	int err;
335 
336 	bo->pages = drm_gem_get_pages(&bo->gem);
337 	if (IS_ERR(bo->pages))
338 		return PTR_ERR(bo->pages);
339 
340 	bo->num_pages = bo->gem.size >> PAGE_SHIFT;
341 
342 	bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
343 	if (IS_ERR(bo->sgt)) {
344 		err = PTR_ERR(bo->sgt);
345 		goto put_pages;
346 	}
347 
348 	err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
349 	if (err)
350 		goto free_sgt;
351 
352 	return 0;
353 
354 free_sgt:
355 	sg_free_table(bo->sgt);
356 	kfree(bo->sgt);
357 put_pages:
358 	drm_gem_put_pages(&bo->gem, bo->pages, false, false);
359 	return err;
360 }
361 
362 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
363 {
364 	struct tegra_drm *tegra = drm->dev_private;
365 	int err;
366 
367 	if (tegra->domain) {
368 		err = tegra_bo_get_pages(drm, bo);
369 		if (err < 0)
370 			return err;
371 
372 		err = tegra_bo_iommu_map(tegra, bo);
373 		if (err < 0) {
374 			tegra_bo_free(drm, bo);
375 			return err;
376 		}
377 	} else {
378 		size_t size = bo->gem.size;
379 
380 		bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
381 					 GFP_KERNEL | __GFP_NOWARN);
382 		if (!bo->vaddr) {
383 			dev_err(drm->dev,
384 				"failed to allocate buffer of size %zu\n",
385 				size);
386 			return -ENOMEM;
387 		}
388 	}
389 
390 	return 0;
391 }
392 
393 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
394 				 unsigned long flags)
395 {
396 	struct tegra_bo *bo;
397 	int err;
398 
399 	bo = tegra_bo_alloc_object(drm, size);
400 	if (IS_ERR(bo))
401 		return bo;
402 
403 	err = tegra_bo_alloc(drm, bo);
404 	if (err < 0)
405 		goto release;
406 
407 	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
408 		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
409 
410 	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
411 		bo->flags |= TEGRA_BO_BOTTOM_UP;
412 
413 	return bo;
414 
415 release:
416 	drm_gem_object_release(&bo->gem);
417 	kfree(bo);
418 	return ERR_PTR(err);
419 }
420 
421 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
422 					     struct drm_device *drm,
423 					     size_t size,
424 					     unsigned long flags,
425 					     u32 *handle)
426 {
427 	struct tegra_bo *bo;
428 	int err;
429 
430 	bo = tegra_bo_create(drm, size, flags);
431 	if (IS_ERR(bo))
432 		return bo;
433 
434 	err = drm_gem_handle_create(file, &bo->gem, handle);
435 	if (err) {
436 		tegra_bo_free_object(&bo->gem);
437 		return ERR_PTR(err);
438 	}
439 
440 	drm_gem_object_put(&bo->gem);
441 
442 	return bo;
443 }
444 
445 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
446 					struct dma_buf *buf)
447 {
448 	struct tegra_drm *tegra = drm->dev_private;
449 	struct dma_buf_attachment *attach;
450 	struct tegra_bo *bo;
451 	int err;
452 
453 	bo = tegra_bo_alloc_object(drm, buf->size);
454 	if (IS_ERR(bo))
455 		return bo;
456 
457 	attach = dma_buf_attach(buf, drm->dev);
458 	if (IS_ERR(attach)) {
459 		err = PTR_ERR(attach);
460 		goto free;
461 	}
462 
463 	get_dma_buf(buf);
464 
465 	bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
466 	if (IS_ERR(bo->sgt)) {
467 		err = PTR_ERR(bo->sgt);
468 		goto detach;
469 	}
470 
471 	if (tegra->domain) {
472 		err = tegra_bo_iommu_map(tegra, bo);
473 		if (err < 0)
474 			goto detach;
475 	}
476 
477 	bo->gem.import_attach = attach;
478 
479 	return bo;
480 
481 detach:
482 	if (!IS_ERR_OR_NULL(bo->sgt))
483 		dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE);
484 
485 	dma_buf_detach(buf, attach);
486 	dma_buf_put(buf);
487 free:
488 	drm_gem_object_release(&bo->gem);
489 	kfree(bo);
490 	return ERR_PTR(err);
491 }
492 
493 void tegra_bo_free_object(struct drm_gem_object *gem)
494 {
495 	struct tegra_drm *tegra = gem->dev->dev_private;
496 	struct host1x_bo_mapping *mapping, *tmp;
497 	struct tegra_bo *bo = to_tegra_bo(gem);
498 
499 	/* remove all mappings of this buffer object from any caches */
500 	list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
501 		if (mapping->cache)
502 			host1x_bo_unpin(mapping);
503 		else
504 			dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
505 				dev_name(mapping->dev));
506 	}
507 
508 	if (tegra->domain)
509 		tegra_bo_iommu_unmap(tegra, bo);
510 
511 	if (gem->import_attach) {
512 		dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
513 						  DMA_TO_DEVICE);
514 		drm_prime_gem_destroy(gem, NULL);
515 	} else {
516 		tegra_bo_free(gem->dev, bo);
517 	}
518 
519 	drm_gem_object_release(gem);
520 	kfree(bo);
521 }
522 
523 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
524 			 struct drm_mode_create_dumb *args)
525 {
526 	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
527 	struct tegra_drm *tegra = drm->dev_private;
528 	struct tegra_bo *bo;
529 
530 	args->pitch = round_up(min_pitch, tegra->pitch_align);
531 	args->size = args->pitch * args->height;
532 
533 	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
534 					 &args->handle);
535 	if (IS_ERR(bo))
536 		return PTR_ERR(bo);
537 
538 	return 0;
539 }
540 
541 static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
542 {
543 	struct vm_area_struct *vma = vmf->vma;
544 	struct drm_gem_object *gem = vma->vm_private_data;
545 	struct tegra_bo *bo = to_tegra_bo(gem);
546 	struct page *page;
547 	pgoff_t offset;
548 
549 	if (!bo->pages)
550 		return VM_FAULT_SIGBUS;
551 
552 	offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
553 	page = bo->pages[offset];
554 
555 	return vmf_insert_page(vma, vmf->address, page);
556 }
557 
558 const struct vm_operations_struct tegra_bo_vm_ops = {
559 	.fault = tegra_bo_fault,
560 	.open = drm_gem_vm_open,
561 	.close = drm_gem_vm_close,
562 };
563 
564 int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
565 {
566 	struct tegra_bo *bo = to_tegra_bo(gem);
567 
568 	if (!bo->pages) {
569 		unsigned long vm_pgoff = vma->vm_pgoff;
570 		int err;
571 
572 		/*
573 		 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
574 		 * and set the vm_pgoff (used as a fake buffer offset by DRM)
575 		 * to 0 as we want to map the whole buffer.
576 		 */
577 		vma->vm_flags &= ~VM_PFNMAP;
578 		vma->vm_pgoff = 0;
579 
580 		err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
581 				  gem->size);
582 		if (err < 0) {
583 			drm_gem_vm_close(vma);
584 			return err;
585 		}
586 
587 		vma->vm_pgoff = vm_pgoff;
588 	} else {
589 		pgprot_t prot = vm_get_page_prot(vma->vm_flags);
590 
591 		vma->vm_flags |= VM_MIXEDMAP;
592 		vma->vm_flags &= ~VM_PFNMAP;
593 
594 		vma->vm_page_prot = pgprot_writecombine(prot);
595 	}
596 
597 	return 0;
598 }
599 
600 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
601 {
602 	struct drm_gem_object *gem;
603 	int err;
604 
605 	err = drm_gem_mmap(file, vma);
606 	if (err < 0)
607 		return err;
608 
609 	gem = vma->vm_private_data;
610 
611 	return __tegra_gem_mmap(gem, vma);
612 }
613 
614 static struct sg_table *
615 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
616 			    enum dma_data_direction dir)
617 {
618 	struct drm_gem_object *gem = attach->dmabuf->priv;
619 	struct tegra_bo *bo = to_tegra_bo(gem);
620 	struct sg_table *sgt;
621 
622 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
623 	if (!sgt)
624 		return NULL;
625 
626 	if (bo->pages) {
627 		if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
628 					      0, gem->size, GFP_KERNEL) < 0)
629 			goto free;
630 	} else {
631 		if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
632 				    gem->size) < 0)
633 			goto free;
634 	}
635 
636 	if (dma_map_sgtable(attach->dev, sgt, dir, 0))
637 		goto free;
638 
639 	return sgt;
640 
641 free:
642 	sg_free_table(sgt);
643 	kfree(sgt);
644 	return NULL;
645 }
646 
647 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
648 					  struct sg_table *sgt,
649 					  enum dma_data_direction dir)
650 {
651 	struct drm_gem_object *gem = attach->dmabuf->priv;
652 	struct tegra_bo *bo = to_tegra_bo(gem);
653 
654 	if (bo->pages)
655 		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
656 
657 	sg_free_table(sgt);
658 	kfree(sgt);
659 }
660 
661 static void tegra_gem_prime_release(struct dma_buf *buf)
662 {
663 	drm_gem_dmabuf_release(buf);
664 }
665 
666 static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
667 					    enum dma_data_direction direction)
668 {
669 	struct drm_gem_object *gem = buf->priv;
670 	struct tegra_bo *bo = to_tegra_bo(gem);
671 	struct drm_device *drm = gem->dev;
672 
673 	if (bo->pages)
674 		dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
675 
676 	return 0;
677 }
678 
679 static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
680 					  enum dma_data_direction direction)
681 {
682 	struct drm_gem_object *gem = buf->priv;
683 	struct tegra_bo *bo = to_tegra_bo(gem);
684 	struct drm_device *drm = gem->dev;
685 
686 	if (bo->pages)
687 		dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
688 
689 	return 0;
690 }
691 
692 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
693 {
694 	struct drm_gem_object *gem = buf->priv;
695 	int err;
696 
697 	dma_resv_assert_held(buf->resv);
698 
699 	err = drm_gem_mmap_obj(gem, gem->size, vma);
700 	if (err < 0)
701 		return err;
702 
703 	return __tegra_gem_mmap(gem, vma);
704 }
705 
706 static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
707 {
708 	struct drm_gem_object *gem = buf->priv;
709 	struct tegra_bo *bo = to_tegra_bo(gem);
710 	void *vaddr;
711 
712 	vaddr = tegra_bo_mmap(&bo->base);
713 	if (IS_ERR(vaddr))
714 		return PTR_ERR(vaddr);
715 
716 	iosys_map_set_vaddr(map, vaddr);
717 
718 	return 0;
719 }
720 
721 static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
722 {
723 	struct drm_gem_object *gem = buf->priv;
724 	struct tegra_bo *bo = to_tegra_bo(gem);
725 
726 	tegra_bo_munmap(&bo->base, map->vaddr);
727 }
728 
729 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
730 	.map_dma_buf = tegra_gem_prime_map_dma_buf,
731 	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
732 	.release = tegra_gem_prime_release,
733 	.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
734 	.end_cpu_access = tegra_gem_prime_end_cpu_access,
735 	.mmap = tegra_gem_prime_mmap,
736 	.vmap = tegra_gem_prime_vmap,
737 	.vunmap = tegra_gem_prime_vunmap,
738 };
739 
740 struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
741 				       int flags)
742 {
743 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
744 
745 	exp_info.exp_name = KBUILD_MODNAME;
746 	exp_info.owner = gem->dev->driver->fops->owner;
747 	exp_info.ops = &tegra_gem_prime_dmabuf_ops;
748 	exp_info.size = gem->size;
749 	exp_info.flags = flags;
750 	exp_info.priv = gem;
751 
752 	return drm_gem_dmabuf_export(gem->dev, &exp_info);
753 }
754 
755 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
756 					      struct dma_buf *buf)
757 {
758 	struct tegra_bo *bo;
759 
760 	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
761 		struct drm_gem_object *gem = buf->priv;
762 
763 		if (gem->dev == drm) {
764 			drm_gem_object_get(gem);
765 			return gem;
766 		}
767 	}
768 
769 	bo = tegra_bo_import(drm, buf);
770 	if (IS_ERR(bo))
771 		return ERR_CAST(bo);
772 
773 	return &bo->gem;
774 }
775 
776 struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
777 {
778 	struct drm_gem_object *gem;
779 	struct tegra_bo *bo;
780 
781 	gem = drm_gem_object_lookup(file, handle);
782 	if (!gem)
783 		return NULL;
784 
785 	bo = to_tegra_bo(gem);
786 	return &bo->base;
787 }
788