xref: /linux/drivers/gpu/drm/tegra/gem.c (revision 0a94608f0f7de9b1135ffea3546afe68eafef57f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * NVIDIA Tegra DRM GEM helper functions
4  *
5  * Copyright (C) 2012 Sascha Hauer, Pengutronix
6  * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
7  *
8  * Based on the GEM/CMA helpers
9  *
10  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
11  */
12 
13 #include <linux/dma-buf.h>
14 #include <linux/iommu.h>
15 #include <linux/module.h>
16 
17 #include <drm/drm_drv.h>
18 #include <drm/drm_prime.h>
19 #include <drm/tegra_drm.h>
20 
21 #include "drm.h"
22 #include "gem.h"
23 
24 MODULE_IMPORT_NS(DMA_BUF);
25 
26 static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
27 {
28 	dma_addr_t next = ~(dma_addr_t)0;
29 	unsigned int count = 0, i;
30 	struct scatterlist *s;
31 
32 	for_each_sg(sgl, s, nents, i) {
33 		/* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
34 		if (!sg_dma_len(s))
35 			continue;
36 
37 		if (sg_dma_address(s) != next) {
38 			next = sg_dma_address(s) + sg_dma_len(s);
39 			count++;
40 		}
41 	}
42 
43 	return count;
44 }
45 
46 static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
47 {
48 	return sg_dma_count_chunks(sgt->sgl, sgt->nents);
49 }
50 
51 static void tegra_bo_put(struct host1x_bo *bo)
52 {
53 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
54 
55 	drm_gem_object_put(&obj->gem);
56 }
57 
58 static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
59 					      enum dma_data_direction direction)
60 {
61 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
62 	struct drm_gem_object *gem = &obj->gem;
63 	struct host1x_bo_mapping *map;
64 	int err;
65 
66 	map = kzalloc(sizeof(*map), GFP_KERNEL);
67 	if (!map)
68 		return ERR_PTR(-ENOMEM);
69 
70 	kref_init(&map->ref);
71 	map->bo = host1x_bo_get(bo);
72 	map->direction = direction;
73 	map->dev = dev;
74 
75 	/*
76 	 * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
77 	 */
78 	if (gem->import_attach) {
79 		struct dma_buf *buf = gem->import_attach->dmabuf;
80 
81 		map->attach = dma_buf_attach(buf, dev);
82 		if (IS_ERR(map->attach)) {
83 			err = PTR_ERR(map->attach);
84 			goto free;
85 		}
86 
87 		map->sgt = dma_buf_map_attachment(map->attach, direction);
88 		if (IS_ERR(map->sgt)) {
89 			dma_buf_detach(buf, map->attach);
90 			err = PTR_ERR(map->sgt);
91 			map->sgt = NULL;
92 			goto free;
93 		}
94 
95 		err = sgt_dma_count_chunks(map->sgt);
96 		map->size = gem->size;
97 
98 		goto out;
99 	}
100 
101 	/*
102 	 * If we don't have a mapping for this buffer yet, return an SG table
103 	 * so that host1x can do the mapping for us via the DMA API.
104 	 */
105 	map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
106 	if (!map->sgt) {
107 		err = -ENOMEM;
108 		goto free;
109 	}
110 
111 	if (obj->pages) {
112 		/*
113 		 * If the buffer object was allocated from the explicit IOMMU
114 		 * API code paths, construct an SG table from the pages.
115 		 */
116 		err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
117 						GFP_KERNEL);
118 		if (err < 0)
119 			goto free;
120 	} else {
121 		/*
122 		 * If the buffer object had no pages allocated and if it was
123 		 * not imported, it had to be allocated with the DMA API, so
124 		 * the DMA API helper can be used.
125 		 */
126 		err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
127 		if (err < 0)
128 			goto free;
129 	}
130 
131 	err = dma_map_sgtable(dev, map->sgt, direction, 0);
132 	if (err)
133 		goto free_sgt;
134 
135 out:
136 	/*
137 	 * If we've manually mapped the buffer object through the IOMMU, make sure to return the
138 	 * existing IOVA address of our mapping.
139 	 */
140 	if (!obj->mm) {
141 		map->phys = sg_dma_address(map->sgt->sgl);
142 		map->chunks = err;
143 	} else {
144 		map->phys = obj->iova;
145 		map->chunks = 1;
146 	}
147 
148 	map->size = gem->size;
149 
150 	return map;
151 
152 free_sgt:
153 	sg_free_table(map->sgt);
154 free:
155 	kfree(map->sgt);
156 	kfree(map);
157 	return ERR_PTR(err);
158 }
159 
160 static void tegra_bo_unpin(struct host1x_bo_mapping *map)
161 {
162 	if (map->attach) {
163 		dma_buf_unmap_attachment(map->attach, map->sgt, map->direction);
164 		dma_buf_detach(map->attach->dmabuf, map->attach);
165 	} else {
166 		dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
167 		sg_free_table(map->sgt);
168 		kfree(map->sgt);
169 	}
170 
171 	host1x_bo_put(map->bo);
172 	kfree(map);
173 }
174 
175 static void *tegra_bo_mmap(struct host1x_bo *bo)
176 {
177 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
178 	struct iosys_map map;
179 	int ret;
180 
181 	if (obj->vaddr) {
182 		return obj->vaddr;
183 	} else if (obj->gem.import_attach) {
184 		ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
185 		return ret ? NULL : map.vaddr;
186 	} else {
187 		return vmap(obj->pages, obj->num_pages, VM_MAP,
188 			    pgprot_writecombine(PAGE_KERNEL));
189 	}
190 }
191 
192 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
193 {
194 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
195 	struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
196 
197 	if (obj->vaddr)
198 		return;
199 	else if (obj->gem.import_attach)
200 		dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
201 	else
202 		vunmap(addr);
203 }
204 
205 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
206 {
207 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
208 
209 	drm_gem_object_get(&obj->gem);
210 
211 	return bo;
212 }
213 
214 static const struct host1x_bo_ops tegra_bo_ops = {
215 	.get = tegra_bo_get,
216 	.put = tegra_bo_put,
217 	.pin = tegra_bo_pin,
218 	.unpin = tegra_bo_unpin,
219 	.mmap = tegra_bo_mmap,
220 	.munmap = tegra_bo_munmap,
221 };
222 
223 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
224 {
225 	int prot = IOMMU_READ | IOMMU_WRITE;
226 	int err;
227 
228 	if (bo->mm)
229 		return -EBUSY;
230 
231 	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
232 	if (!bo->mm)
233 		return -ENOMEM;
234 
235 	mutex_lock(&tegra->mm_lock);
236 
237 	err = drm_mm_insert_node_generic(&tegra->mm,
238 					 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
239 	if (err < 0) {
240 		dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
241 			err);
242 		goto unlock;
243 	}
244 
245 	bo->iova = bo->mm->start;
246 
247 	bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
248 	if (!bo->size) {
249 		dev_err(tegra->drm->dev, "failed to map buffer\n");
250 		err = -ENOMEM;
251 		goto remove;
252 	}
253 
254 	mutex_unlock(&tegra->mm_lock);
255 
256 	return 0;
257 
258 remove:
259 	drm_mm_remove_node(bo->mm);
260 unlock:
261 	mutex_unlock(&tegra->mm_lock);
262 	kfree(bo->mm);
263 	return err;
264 }
265 
266 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
267 {
268 	if (!bo->mm)
269 		return 0;
270 
271 	mutex_lock(&tegra->mm_lock);
272 	iommu_unmap(tegra->domain, bo->iova, bo->size);
273 	drm_mm_remove_node(bo->mm);
274 	mutex_unlock(&tegra->mm_lock);
275 
276 	kfree(bo->mm);
277 
278 	return 0;
279 }
280 
281 static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
282 	.free = tegra_bo_free_object,
283 	.export = tegra_gem_prime_export,
284 	.vm_ops = &tegra_bo_vm_ops,
285 };
286 
287 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
288 					      size_t size)
289 {
290 	struct tegra_bo *bo;
291 	int err;
292 
293 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
294 	if (!bo)
295 		return ERR_PTR(-ENOMEM);
296 
297 	bo->gem.funcs = &tegra_gem_object_funcs;
298 
299 	host1x_bo_init(&bo->base, &tegra_bo_ops);
300 	size = round_up(size, PAGE_SIZE);
301 
302 	err = drm_gem_object_init(drm, &bo->gem, size);
303 	if (err < 0)
304 		goto free;
305 
306 	err = drm_gem_create_mmap_offset(&bo->gem);
307 	if (err < 0)
308 		goto release;
309 
310 	return bo;
311 
312 release:
313 	drm_gem_object_release(&bo->gem);
314 free:
315 	kfree(bo);
316 	return ERR_PTR(err);
317 }
318 
319 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
320 {
321 	if (bo->pages) {
322 		dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
323 		drm_gem_put_pages(&bo->gem, bo->pages, true, true);
324 		sg_free_table(bo->sgt);
325 		kfree(bo->sgt);
326 	} else if (bo->vaddr) {
327 		dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
328 	}
329 }
330 
331 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
332 {
333 	int err;
334 
335 	bo->pages = drm_gem_get_pages(&bo->gem);
336 	if (IS_ERR(bo->pages))
337 		return PTR_ERR(bo->pages);
338 
339 	bo->num_pages = bo->gem.size >> PAGE_SHIFT;
340 
341 	bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
342 	if (IS_ERR(bo->sgt)) {
343 		err = PTR_ERR(bo->sgt);
344 		goto put_pages;
345 	}
346 
347 	err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
348 	if (err)
349 		goto free_sgt;
350 
351 	return 0;
352 
353 free_sgt:
354 	sg_free_table(bo->sgt);
355 	kfree(bo->sgt);
356 put_pages:
357 	drm_gem_put_pages(&bo->gem, bo->pages, false, false);
358 	return err;
359 }
360 
361 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
362 {
363 	struct tegra_drm *tegra = drm->dev_private;
364 	int err;
365 
366 	if (tegra->domain) {
367 		err = tegra_bo_get_pages(drm, bo);
368 		if (err < 0)
369 			return err;
370 
371 		err = tegra_bo_iommu_map(tegra, bo);
372 		if (err < 0) {
373 			tegra_bo_free(drm, bo);
374 			return err;
375 		}
376 	} else {
377 		size_t size = bo->gem.size;
378 
379 		bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
380 					 GFP_KERNEL | __GFP_NOWARN);
381 		if (!bo->vaddr) {
382 			dev_err(drm->dev,
383 				"failed to allocate buffer of size %zu\n",
384 				size);
385 			return -ENOMEM;
386 		}
387 	}
388 
389 	return 0;
390 }
391 
392 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
393 				 unsigned long flags)
394 {
395 	struct tegra_bo *bo;
396 	int err;
397 
398 	bo = tegra_bo_alloc_object(drm, size);
399 	if (IS_ERR(bo))
400 		return bo;
401 
402 	err = tegra_bo_alloc(drm, bo);
403 	if (err < 0)
404 		goto release;
405 
406 	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
407 		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
408 
409 	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
410 		bo->flags |= TEGRA_BO_BOTTOM_UP;
411 
412 	return bo;
413 
414 release:
415 	drm_gem_object_release(&bo->gem);
416 	kfree(bo);
417 	return ERR_PTR(err);
418 }
419 
420 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
421 					     struct drm_device *drm,
422 					     size_t size,
423 					     unsigned long flags,
424 					     u32 *handle)
425 {
426 	struct tegra_bo *bo;
427 	int err;
428 
429 	bo = tegra_bo_create(drm, size, flags);
430 	if (IS_ERR(bo))
431 		return bo;
432 
433 	err = drm_gem_handle_create(file, &bo->gem, handle);
434 	if (err) {
435 		tegra_bo_free_object(&bo->gem);
436 		return ERR_PTR(err);
437 	}
438 
439 	drm_gem_object_put(&bo->gem);
440 
441 	return bo;
442 }
443 
444 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
445 					struct dma_buf *buf)
446 {
447 	struct tegra_drm *tegra = drm->dev_private;
448 	struct dma_buf_attachment *attach;
449 	struct tegra_bo *bo;
450 	int err;
451 
452 	bo = tegra_bo_alloc_object(drm, buf->size);
453 	if (IS_ERR(bo))
454 		return bo;
455 
456 	attach = dma_buf_attach(buf, drm->dev);
457 	if (IS_ERR(attach)) {
458 		err = PTR_ERR(attach);
459 		goto free;
460 	}
461 
462 	get_dma_buf(buf);
463 
464 	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
465 	if (IS_ERR(bo->sgt)) {
466 		err = PTR_ERR(bo->sgt);
467 		goto detach;
468 	}
469 
470 	if (tegra->domain) {
471 		err = tegra_bo_iommu_map(tegra, bo);
472 		if (err < 0)
473 			goto detach;
474 	}
475 
476 	bo->gem.import_attach = attach;
477 
478 	return bo;
479 
480 detach:
481 	if (!IS_ERR_OR_NULL(bo->sgt))
482 		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
483 
484 	dma_buf_detach(buf, attach);
485 	dma_buf_put(buf);
486 free:
487 	drm_gem_object_release(&bo->gem);
488 	kfree(bo);
489 	return ERR_PTR(err);
490 }
491 
492 void tegra_bo_free_object(struct drm_gem_object *gem)
493 {
494 	struct tegra_drm *tegra = gem->dev->dev_private;
495 	struct host1x_bo_mapping *mapping, *tmp;
496 	struct tegra_bo *bo = to_tegra_bo(gem);
497 
498 	/* remove all mappings of this buffer object from any caches */
499 	list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
500 		if (mapping->cache)
501 			host1x_bo_unpin(mapping);
502 		else
503 			dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
504 				dev_name(mapping->dev));
505 	}
506 
507 	if (tegra->domain)
508 		tegra_bo_iommu_unmap(tegra, bo);
509 
510 	if (gem->import_attach) {
511 		dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
512 					 DMA_TO_DEVICE);
513 		drm_prime_gem_destroy(gem, NULL);
514 	} else {
515 		tegra_bo_free(gem->dev, bo);
516 	}
517 
518 	drm_gem_object_release(gem);
519 	kfree(bo);
520 }
521 
522 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
523 			 struct drm_mode_create_dumb *args)
524 {
525 	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
526 	struct tegra_drm *tegra = drm->dev_private;
527 	struct tegra_bo *bo;
528 
529 	args->pitch = round_up(min_pitch, tegra->pitch_align);
530 	args->size = args->pitch * args->height;
531 
532 	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
533 					 &args->handle);
534 	if (IS_ERR(bo))
535 		return PTR_ERR(bo);
536 
537 	return 0;
538 }
539 
540 static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
541 {
542 	struct vm_area_struct *vma = vmf->vma;
543 	struct drm_gem_object *gem = vma->vm_private_data;
544 	struct tegra_bo *bo = to_tegra_bo(gem);
545 	struct page *page;
546 	pgoff_t offset;
547 
548 	if (!bo->pages)
549 		return VM_FAULT_SIGBUS;
550 
551 	offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
552 	page = bo->pages[offset];
553 
554 	return vmf_insert_page(vma, vmf->address, page);
555 }
556 
557 const struct vm_operations_struct tegra_bo_vm_ops = {
558 	.fault = tegra_bo_fault,
559 	.open = drm_gem_vm_open,
560 	.close = drm_gem_vm_close,
561 };
562 
563 int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
564 {
565 	struct tegra_bo *bo = to_tegra_bo(gem);
566 
567 	if (!bo->pages) {
568 		unsigned long vm_pgoff = vma->vm_pgoff;
569 		int err;
570 
571 		/*
572 		 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
573 		 * and set the vm_pgoff (used as a fake buffer offset by DRM)
574 		 * to 0 as we want to map the whole buffer.
575 		 */
576 		vma->vm_flags &= ~VM_PFNMAP;
577 		vma->vm_pgoff = 0;
578 
579 		err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
580 				  gem->size);
581 		if (err < 0) {
582 			drm_gem_vm_close(vma);
583 			return err;
584 		}
585 
586 		vma->vm_pgoff = vm_pgoff;
587 	} else {
588 		pgprot_t prot = vm_get_page_prot(vma->vm_flags);
589 
590 		vma->vm_flags |= VM_MIXEDMAP;
591 		vma->vm_flags &= ~VM_PFNMAP;
592 
593 		vma->vm_page_prot = pgprot_writecombine(prot);
594 	}
595 
596 	return 0;
597 }
598 
599 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
600 {
601 	struct drm_gem_object *gem;
602 	int err;
603 
604 	err = drm_gem_mmap(file, vma);
605 	if (err < 0)
606 		return err;
607 
608 	gem = vma->vm_private_data;
609 
610 	return __tegra_gem_mmap(gem, vma);
611 }
612 
613 static struct sg_table *
614 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
615 			    enum dma_data_direction dir)
616 {
617 	struct drm_gem_object *gem = attach->dmabuf->priv;
618 	struct tegra_bo *bo = to_tegra_bo(gem);
619 	struct sg_table *sgt;
620 
621 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
622 	if (!sgt)
623 		return NULL;
624 
625 	if (bo->pages) {
626 		if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
627 					      0, gem->size, GFP_KERNEL) < 0)
628 			goto free;
629 	} else {
630 		if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
631 				    gem->size) < 0)
632 			goto free;
633 	}
634 
635 	if (dma_map_sgtable(attach->dev, sgt, dir, 0))
636 		goto free;
637 
638 	return sgt;
639 
640 free:
641 	sg_free_table(sgt);
642 	kfree(sgt);
643 	return NULL;
644 }
645 
646 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
647 					  struct sg_table *sgt,
648 					  enum dma_data_direction dir)
649 {
650 	struct drm_gem_object *gem = attach->dmabuf->priv;
651 	struct tegra_bo *bo = to_tegra_bo(gem);
652 
653 	if (bo->pages)
654 		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
655 
656 	sg_free_table(sgt);
657 	kfree(sgt);
658 }
659 
660 static void tegra_gem_prime_release(struct dma_buf *buf)
661 {
662 	drm_gem_dmabuf_release(buf);
663 }
664 
665 static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
666 					    enum dma_data_direction direction)
667 {
668 	struct drm_gem_object *gem = buf->priv;
669 	struct tegra_bo *bo = to_tegra_bo(gem);
670 	struct drm_device *drm = gem->dev;
671 
672 	if (bo->pages)
673 		dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
674 
675 	return 0;
676 }
677 
678 static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
679 					  enum dma_data_direction direction)
680 {
681 	struct drm_gem_object *gem = buf->priv;
682 	struct tegra_bo *bo = to_tegra_bo(gem);
683 	struct drm_device *drm = gem->dev;
684 
685 	if (bo->pages)
686 		dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
687 
688 	return 0;
689 }
690 
691 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
692 {
693 	struct drm_gem_object *gem = buf->priv;
694 	int err;
695 
696 	err = drm_gem_mmap_obj(gem, gem->size, vma);
697 	if (err < 0)
698 		return err;
699 
700 	return __tegra_gem_mmap(gem, vma);
701 }
702 
703 static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
704 {
705 	struct drm_gem_object *gem = buf->priv;
706 	struct tegra_bo *bo = to_tegra_bo(gem);
707 
708 	iosys_map_set_vaddr(map, bo->vaddr);
709 
710 	return 0;
711 }
712 
713 static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
714 {
715 }
716 
717 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
718 	.map_dma_buf = tegra_gem_prime_map_dma_buf,
719 	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
720 	.release = tegra_gem_prime_release,
721 	.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
722 	.end_cpu_access = tegra_gem_prime_end_cpu_access,
723 	.mmap = tegra_gem_prime_mmap,
724 	.vmap = tegra_gem_prime_vmap,
725 	.vunmap = tegra_gem_prime_vunmap,
726 };
727 
728 struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
729 				       int flags)
730 {
731 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
732 
733 	exp_info.exp_name = KBUILD_MODNAME;
734 	exp_info.owner = gem->dev->driver->fops->owner;
735 	exp_info.ops = &tegra_gem_prime_dmabuf_ops;
736 	exp_info.size = gem->size;
737 	exp_info.flags = flags;
738 	exp_info.priv = gem;
739 
740 	return drm_gem_dmabuf_export(gem->dev, &exp_info);
741 }
742 
743 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
744 					      struct dma_buf *buf)
745 {
746 	struct tegra_bo *bo;
747 
748 	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
749 		struct drm_gem_object *gem = buf->priv;
750 
751 		if (gem->dev == drm) {
752 			drm_gem_object_get(gem);
753 			return gem;
754 		}
755 	}
756 
757 	bo = tegra_bo_import(drm, buf);
758 	if (IS_ERR(bo))
759 		return ERR_CAST(bo);
760 
761 	return &bo->gem;
762 }
763 
764 struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
765 {
766 	struct drm_gem_object *gem;
767 	struct tegra_bo *bo;
768 
769 	gem = drm_gem_object_lookup(file, handle);
770 	if (!gem)
771 		return NULL;
772 
773 	bo = to_tegra_bo(gem);
774 	return &bo->base;
775 }
776