1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * NVIDIA Tegra DRM GEM helper functions
4 *
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
7 *
8 * Based on the GEM/CMA helpers
9 *
10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
11 */
12
13 #include <linux/dma-buf.h>
14 #include <linux/iommu.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
17
18 #include <drm/drm_drv.h>
19 #include <drm/drm_prime.h>
20
21 #include "drm.h"
22 #include "gem.h"
23
24 MODULE_IMPORT_NS("DMA_BUF");
25
sg_dma_count_chunks(struct scatterlist * sgl,unsigned int nents)26 static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
27 {
28 dma_addr_t next = ~(dma_addr_t)0;
29 unsigned int count = 0, i;
30 struct scatterlist *s;
31
32 for_each_sg(sgl, s, nents, i) {
33 /* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
34 if (!sg_dma_len(s))
35 continue;
36
37 if (sg_dma_address(s) != next) {
38 next = sg_dma_address(s) + sg_dma_len(s);
39 count++;
40 }
41 }
42
43 return count;
44 }
45
sgt_dma_count_chunks(struct sg_table * sgt)46 static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
47 {
48 return sg_dma_count_chunks(sgt->sgl, sgt->nents);
49 }
50
tegra_bo_put(struct host1x_bo * bo)51 static void tegra_bo_put(struct host1x_bo *bo)
52 {
53 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
54
55 drm_gem_object_put(&obj->gem);
56 }
57
tegra_bo_pin(struct device * dev,struct host1x_bo * bo,enum dma_data_direction direction)58 static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
59 enum dma_data_direction direction)
60 {
61 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
62 struct drm_gem_object *gem = &obj->gem;
63 struct host1x_bo_mapping *map;
64 int err;
65
66 map = kzalloc(sizeof(*map), GFP_KERNEL);
67 if (!map)
68 return ERR_PTR(-ENOMEM);
69
70 kref_init(&map->ref);
71 map->bo = host1x_bo_get(bo);
72 map->direction = direction;
73 map->dev = dev;
74
75 /*
76 * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
77 */
78 if (obj->dma_buf) {
79 struct dma_buf *buf = obj->dma_buf;
80
81 map->attach = dma_buf_attach(buf, dev);
82 if (IS_ERR(map->attach)) {
83 err = PTR_ERR(map->attach);
84 goto free;
85 }
86
87 map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction);
88 if (IS_ERR(map->sgt)) {
89 dma_buf_detach(buf, map->attach);
90 err = PTR_ERR(map->sgt);
91 map->sgt = NULL;
92 goto free;
93 }
94
95 err = sgt_dma_count_chunks(map->sgt);
96 map->size = gem->size;
97
98 goto out;
99 }
100
101 /*
102 * If we don't have a mapping for this buffer yet, return an SG table
103 * so that host1x can do the mapping for us via the DMA API.
104 */
105 map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
106 if (!map->sgt) {
107 err = -ENOMEM;
108 goto free;
109 }
110
111 if (obj->pages) {
112 /*
113 * If the buffer object was allocated from the explicit IOMMU
114 * API code paths, construct an SG table from the pages.
115 */
116 err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
117 GFP_KERNEL);
118 if (err < 0)
119 goto free;
120 } else {
121 /*
122 * If the buffer object had no pages allocated and if it was
123 * not imported, it had to be allocated with the DMA API, so
124 * the DMA API helper can be used.
125 */
126 err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
127 if (err < 0)
128 goto free;
129 }
130
131 err = dma_map_sgtable(dev, map->sgt, direction, 0);
132 if (err)
133 goto free_sgt;
134
135 out:
136 /*
137 * If we've manually mapped the buffer object through the IOMMU, make sure to return the
138 * existing IOVA address of our mapping.
139 */
140 if (!obj->mm) {
141 map->phys = sg_dma_address(map->sgt->sgl);
142 map->chunks = err;
143 } else {
144 map->phys = obj->iova;
145 map->chunks = 1;
146 }
147
148 map->size = gem->size;
149
150 return map;
151
152 free_sgt:
153 sg_free_table(map->sgt);
154 free:
155 kfree(map->sgt);
156 kfree(map);
157 return ERR_PTR(err);
158 }
159
tegra_bo_unpin(struct host1x_bo_mapping * map)160 static void tegra_bo_unpin(struct host1x_bo_mapping *map)
161 {
162 if (map->attach) {
163 dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
164 map->direction);
165 dma_buf_detach(map->attach->dmabuf, map->attach);
166 } else {
167 dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
168 sg_free_table(map->sgt);
169 kfree(map->sgt);
170 }
171
172 host1x_bo_put(map->bo);
173 kfree(map);
174 }
175
tegra_bo_mmap(struct host1x_bo * bo)176 static void *tegra_bo_mmap(struct host1x_bo *bo)
177 {
178 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
179 struct iosys_map map = { 0 };
180 void *vaddr;
181 int ret;
182
183 if (obj->vaddr)
184 return obj->vaddr;
185
186 if (obj->dma_buf) {
187 ret = dma_buf_vmap_unlocked(obj->dma_buf, &map);
188 if (ret < 0)
189 return ERR_PTR(ret);
190
191 return map.vaddr;
192 }
193
194 vaddr = vmap(obj->pages, obj->num_pages, VM_MAP,
195 pgprot_writecombine(PAGE_KERNEL));
196 if (!vaddr)
197 return ERR_PTR(-ENOMEM);
198
199 return vaddr;
200 }
201
tegra_bo_munmap(struct host1x_bo * bo,void * addr)202 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
203 {
204 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
205 struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
206
207 if (obj->vaddr)
208 return;
209
210 if (obj->dma_buf)
211 return dma_buf_vunmap_unlocked(obj->dma_buf, &map);
212
213 vunmap(addr);
214 }
215
tegra_bo_get(struct host1x_bo * bo)216 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
217 {
218 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
219
220 drm_gem_object_get(&obj->gem);
221
222 return bo;
223 }
224
225 static const struct host1x_bo_ops tegra_bo_ops = {
226 .get = tegra_bo_get,
227 .put = tegra_bo_put,
228 .pin = tegra_bo_pin,
229 .unpin = tegra_bo_unpin,
230 .mmap = tegra_bo_mmap,
231 .munmap = tegra_bo_munmap,
232 };
233
tegra_bo_iommu_map(struct tegra_drm * tegra,struct tegra_bo * bo)234 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
235 {
236 int prot = IOMMU_READ | IOMMU_WRITE;
237 int err;
238
239 if (bo->mm)
240 return -EBUSY;
241
242 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
243 if (!bo->mm)
244 return -ENOMEM;
245
246 mutex_lock(&tegra->mm_lock);
247
248 err = drm_mm_insert_node_generic(&tegra->mm,
249 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
250 if (err < 0) {
251 dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
252 err);
253 goto unlock;
254 }
255
256 bo->iova = bo->mm->start;
257
258 bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
259 if (!bo->size) {
260 dev_err(tegra->drm->dev, "failed to map buffer\n");
261 err = -ENOMEM;
262 goto remove;
263 }
264
265 mutex_unlock(&tegra->mm_lock);
266
267 return 0;
268
269 remove:
270 drm_mm_remove_node(bo->mm);
271 unlock:
272 mutex_unlock(&tegra->mm_lock);
273 kfree(bo->mm);
274 return err;
275 }
276
tegra_bo_iommu_unmap(struct tegra_drm * tegra,struct tegra_bo * bo)277 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
278 {
279 if (!bo->mm)
280 return 0;
281
282 mutex_lock(&tegra->mm_lock);
283 iommu_unmap(tegra->domain, bo->iova, bo->size);
284 drm_mm_remove_node(bo->mm);
285 mutex_unlock(&tegra->mm_lock);
286
287 kfree(bo->mm);
288
289 return 0;
290 }
291
292 static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
293 .free = tegra_bo_free_object,
294 .export = tegra_gem_prime_export,
295 .vm_ops = &tegra_bo_vm_ops,
296 };
297
tegra_bo_alloc_object(struct drm_device * drm,size_t size)298 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
299 size_t size)
300 {
301 struct tegra_bo *bo;
302 int err;
303
304 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
305 if (!bo)
306 return ERR_PTR(-ENOMEM);
307
308 bo->gem.funcs = &tegra_gem_object_funcs;
309
310 host1x_bo_init(&bo->base, &tegra_bo_ops);
311 size = round_up(size, PAGE_SIZE);
312
313 err = drm_gem_object_init(drm, &bo->gem, size);
314 if (err < 0)
315 goto free;
316
317 err = drm_gem_create_mmap_offset(&bo->gem);
318 if (err < 0)
319 goto release;
320
321 return bo;
322
323 release:
324 drm_gem_object_release(&bo->gem);
325 free:
326 kfree(bo);
327 return ERR_PTR(err);
328 }
329
tegra_bo_free(struct drm_device * drm,struct tegra_bo * bo)330 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
331 {
332 if (bo->pages) {
333 dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
334 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
335 sg_free_table(bo->sgt);
336 kfree(bo->sgt);
337 } else if (bo->vaddr) {
338 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
339 }
340 }
341
tegra_bo_get_pages(struct drm_device * drm,struct tegra_bo * bo)342 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
343 {
344 int err;
345
346 bo->pages = drm_gem_get_pages(&bo->gem);
347 if (IS_ERR(bo->pages))
348 return PTR_ERR(bo->pages);
349
350 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
351
352 bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
353 if (IS_ERR(bo->sgt)) {
354 err = PTR_ERR(bo->sgt);
355 goto put_pages;
356 }
357
358 err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
359 if (err)
360 goto free_sgt;
361
362 return 0;
363
364 free_sgt:
365 sg_free_table(bo->sgt);
366 kfree(bo->sgt);
367 put_pages:
368 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
369 return err;
370 }
371
tegra_bo_alloc(struct drm_device * drm,struct tegra_bo * bo)372 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
373 {
374 struct tegra_drm *tegra = drm->dev_private;
375 int err;
376
377 if (tegra->domain) {
378 err = tegra_bo_get_pages(drm, bo);
379 if (err < 0)
380 return err;
381
382 err = tegra_bo_iommu_map(tegra, bo);
383 if (err < 0) {
384 tegra_bo_free(drm, bo);
385 return err;
386 }
387 } else {
388 size_t size = bo->gem.size;
389
390 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
391 GFP_KERNEL | __GFP_NOWARN);
392 if (!bo->vaddr) {
393 dev_err(drm->dev,
394 "failed to allocate buffer of size %zu\n",
395 size);
396 return -ENOMEM;
397 }
398 }
399
400 return 0;
401 }
402
tegra_bo_create(struct drm_device * drm,size_t size,unsigned long flags)403 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
404 unsigned long flags)
405 {
406 struct tegra_bo *bo;
407 int err;
408
409 bo = tegra_bo_alloc_object(drm, size);
410 if (IS_ERR(bo))
411 return bo;
412
413 err = tegra_bo_alloc(drm, bo);
414 if (err < 0)
415 goto release;
416
417 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
418 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
419
420 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
421 bo->flags |= TEGRA_BO_BOTTOM_UP;
422
423 return bo;
424
425 release:
426 drm_gem_object_release(&bo->gem);
427 kfree(bo);
428 return ERR_PTR(err);
429 }
430
tegra_bo_create_with_handle(struct drm_file * file,struct drm_device * drm,size_t size,unsigned long flags,u32 * handle)431 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
432 struct drm_device *drm,
433 size_t size,
434 unsigned long flags,
435 u32 *handle)
436 {
437 struct tegra_bo *bo;
438 int err;
439
440 bo = tegra_bo_create(drm, size, flags);
441 if (IS_ERR(bo))
442 return bo;
443
444 err = drm_gem_handle_create(file, &bo->gem, handle);
445 if (err) {
446 tegra_bo_free_object(&bo->gem);
447 return ERR_PTR(err);
448 }
449
450 drm_gem_object_put(&bo->gem);
451
452 return bo;
453 }
454
tegra_bo_import(struct drm_device * drm,struct dma_buf * buf)455 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
456 struct dma_buf *buf)
457 {
458 struct tegra_drm *tegra = drm->dev_private;
459 struct dma_buf_attachment *attach;
460 struct tegra_bo *bo;
461 int err;
462
463 bo = tegra_bo_alloc_object(drm, buf->size);
464 if (IS_ERR(bo))
465 return bo;
466
467 /*
468 * If we need to use IOMMU API to map the dma-buf into the internally managed
469 * domain, map it first to the DRM device to get an sgt.
470 */
471 if (tegra->domain) {
472 attach = dma_buf_attach(buf, drm->dev);
473 if (IS_ERR(attach)) {
474 err = PTR_ERR(attach);
475 goto free;
476 }
477
478 bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
479 if (IS_ERR(bo->sgt)) {
480 err = PTR_ERR(bo->sgt);
481 goto detach;
482 }
483
484 err = tegra_bo_iommu_map(tegra, bo);
485 if (err < 0)
486 goto detach;
487
488 bo->gem.import_attach = attach;
489 }
490
491 get_dma_buf(buf);
492 bo->dma_buf = buf;
493
494 return bo;
495
496 detach:
497 if (!IS_ERR_OR_NULL(bo->sgt))
498 dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE);
499
500 dma_buf_detach(buf, attach);
501 dma_buf_put(buf);
502 free:
503 drm_gem_object_release(&bo->gem);
504 kfree(bo);
505 return ERR_PTR(err);
506 }
507
tegra_bo_free_object(struct drm_gem_object * gem)508 void tegra_bo_free_object(struct drm_gem_object *gem)
509 {
510 struct tegra_drm *tegra = gem->dev->dev_private;
511 struct host1x_bo_mapping *mapping, *tmp;
512 struct tegra_bo *bo = to_tegra_bo(gem);
513
514 /* remove all mappings of this buffer object from any caches */
515 list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
516 if (mapping->cache)
517 host1x_bo_unpin(mapping);
518 else
519 dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
520 dev_name(mapping->dev));
521 }
522
523 if (tegra->domain) {
524 tegra_bo_iommu_unmap(tegra, bo);
525
526 if (drm_gem_is_imported(gem)) {
527 dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
528 DMA_TO_DEVICE);
529 dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach);
530 }
531 }
532
533 tegra_bo_free(gem->dev, bo);
534
535 if (bo->dma_buf)
536 dma_buf_put(bo->dma_buf);
537
538 drm_gem_object_release(gem);
539 kfree(bo);
540 }
541
tegra_bo_dumb_create(struct drm_file * file,struct drm_device * drm,struct drm_mode_create_dumb * args)542 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
543 struct drm_mode_create_dumb *args)
544 {
545 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
546 struct tegra_drm *tegra = drm->dev_private;
547 struct tegra_bo *bo;
548
549 args->pitch = round_up(min_pitch, tegra->pitch_align);
550 args->size = args->pitch * args->height;
551
552 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
553 &args->handle);
554 if (IS_ERR(bo))
555 return PTR_ERR(bo);
556
557 return 0;
558 }
559
tegra_bo_fault(struct vm_fault * vmf)560 static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
561 {
562 struct vm_area_struct *vma = vmf->vma;
563 struct drm_gem_object *gem = vma->vm_private_data;
564 struct tegra_bo *bo = to_tegra_bo(gem);
565 struct page *page;
566 pgoff_t offset;
567
568 if (!bo->pages)
569 return VM_FAULT_SIGBUS;
570
571 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
572 page = bo->pages[offset];
573
574 return vmf_insert_page(vma, vmf->address, page);
575 }
576
577 const struct vm_operations_struct tegra_bo_vm_ops = {
578 .fault = tegra_bo_fault,
579 .open = drm_gem_vm_open,
580 .close = drm_gem_vm_close,
581 };
582
__tegra_gem_mmap(struct drm_gem_object * gem,struct vm_area_struct * vma)583 int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
584 {
585 struct tegra_bo *bo = to_tegra_bo(gem);
586
587 if (!bo->pages) {
588 unsigned long vm_pgoff = vma->vm_pgoff;
589 int err;
590
591 /*
592 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
593 * and set the vm_pgoff (used as a fake buffer offset by DRM)
594 * to 0 as we want to map the whole buffer.
595 */
596 vm_flags_clear(vma, VM_PFNMAP);
597 vma->vm_pgoff = 0;
598
599 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
600 gem->size);
601 if (err < 0) {
602 drm_gem_vm_close(vma);
603 return err;
604 }
605
606 vma->vm_pgoff = vm_pgoff;
607 } else {
608 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
609
610 vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
611
612 vma->vm_page_prot = pgprot_writecombine(prot);
613 }
614
615 return 0;
616 }
617
tegra_drm_mmap(struct file * file,struct vm_area_struct * vma)618 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
619 {
620 struct drm_gem_object *gem;
621 int err;
622
623 err = drm_gem_mmap(file, vma);
624 if (err < 0)
625 return err;
626
627 gem = vma->vm_private_data;
628
629 return __tegra_gem_mmap(gem, vma);
630 }
631
632 static struct sg_table *
tegra_gem_prime_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)633 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
634 enum dma_data_direction dir)
635 {
636 struct drm_gem_object *gem = attach->dmabuf->priv;
637 struct tegra_bo *bo = to_tegra_bo(gem);
638 struct sg_table *sgt;
639
640 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
641 if (!sgt)
642 return NULL;
643
644 if (bo->pages) {
645 if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
646 0, gem->size, GFP_KERNEL) < 0)
647 goto free;
648 } else {
649 if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
650 gem->size) < 0)
651 goto free;
652 }
653
654 if (dma_map_sgtable(attach->dev, sgt, dir, 0))
655 goto free;
656
657 return sgt;
658
659 free:
660 sg_free_table(sgt);
661 kfree(sgt);
662 return NULL;
663 }
664
tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)665 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
666 struct sg_table *sgt,
667 enum dma_data_direction dir)
668 {
669 struct drm_gem_object *gem = attach->dmabuf->priv;
670 struct tegra_bo *bo = to_tegra_bo(gem);
671
672 if (bo->pages)
673 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
674
675 sg_free_table(sgt);
676 kfree(sgt);
677 }
678
tegra_gem_prime_release(struct dma_buf * buf)679 static void tegra_gem_prime_release(struct dma_buf *buf)
680 {
681 drm_gem_dmabuf_release(buf);
682 }
683
tegra_gem_prime_begin_cpu_access(struct dma_buf * buf,enum dma_data_direction direction)684 static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
685 enum dma_data_direction direction)
686 {
687 struct drm_gem_object *gem = buf->priv;
688 struct tegra_bo *bo = to_tegra_bo(gem);
689 struct drm_device *drm = gem->dev;
690
691 if (bo->pages)
692 dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
693
694 return 0;
695 }
696
tegra_gem_prime_end_cpu_access(struct dma_buf * buf,enum dma_data_direction direction)697 static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
698 enum dma_data_direction direction)
699 {
700 struct drm_gem_object *gem = buf->priv;
701 struct tegra_bo *bo = to_tegra_bo(gem);
702 struct drm_device *drm = gem->dev;
703
704 if (bo->pages)
705 dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
706
707 return 0;
708 }
709
tegra_gem_prime_mmap(struct dma_buf * buf,struct vm_area_struct * vma)710 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
711 {
712 struct drm_gem_object *gem = buf->priv;
713 int err;
714
715 err = drm_gem_mmap_obj(gem, gem->size, vma);
716 if (err < 0)
717 return err;
718
719 return __tegra_gem_mmap(gem, vma);
720 }
721
tegra_gem_prime_vmap(struct dma_buf * buf,struct iosys_map * map)722 static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
723 {
724 struct drm_gem_object *gem = buf->priv;
725 struct tegra_bo *bo = to_tegra_bo(gem);
726 void *vaddr;
727
728 vaddr = tegra_bo_mmap(&bo->base);
729 if (IS_ERR(vaddr))
730 return PTR_ERR(vaddr);
731
732 iosys_map_set_vaddr(map, vaddr);
733
734 return 0;
735 }
736
tegra_gem_prime_vunmap(struct dma_buf * buf,struct iosys_map * map)737 static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
738 {
739 struct drm_gem_object *gem = buf->priv;
740 struct tegra_bo *bo = to_tegra_bo(gem);
741
742 tegra_bo_munmap(&bo->base, map->vaddr);
743 }
744
745 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
746 .map_dma_buf = tegra_gem_prime_map_dma_buf,
747 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
748 .release = tegra_gem_prime_release,
749 .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
750 .end_cpu_access = tegra_gem_prime_end_cpu_access,
751 .mmap = tegra_gem_prime_mmap,
752 .vmap = tegra_gem_prime_vmap,
753 .vunmap = tegra_gem_prime_vunmap,
754 };
755
tegra_gem_prime_export(struct drm_gem_object * gem,int flags)756 struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
757 int flags)
758 {
759 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
760
761 exp_info.exp_name = KBUILD_MODNAME;
762 exp_info.owner = gem->dev->driver->fops->owner;
763 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
764 exp_info.size = gem->size;
765 exp_info.flags = flags;
766 exp_info.priv = gem;
767
768 return drm_gem_dmabuf_export(gem->dev, &exp_info);
769 }
770
tegra_gem_prime_import(struct drm_device * drm,struct dma_buf * buf)771 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
772 struct dma_buf *buf)
773 {
774 struct tegra_bo *bo;
775
776 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
777 struct drm_gem_object *gem = buf->priv;
778
779 if (gem->dev == drm) {
780 drm_gem_object_get(gem);
781 return gem;
782 }
783 }
784
785 bo = tegra_bo_import(drm, buf);
786 if (IS_ERR(bo))
787 return ERR_CAST(bo);
788
789 return &bo->gem;
790 }
791
tegra_gem_lookup(struct drm_file * file,u32 handle)792 struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
793 {
794 struct drm_gem_object *gem;
795 struct tegra_bo *bo;
796
797 gem = drm_gem_object_lookup(file, handle);
798 if (!gem)
799 return NULL;
800
801 bo = to_tegra_bo(gem);
802 return &bo->base;
803 }
804