1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * NVIDIA Tegra DRM GEM helper functions 4 * 5 * Copyright (C) 2012 Sascha Hauer, Pengutronix 6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved. 7 * 8 * Based on the GEM/CMA helpers 9 * 10 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 11 */ 12 13 #include <linux/dma-buf.h> 14 #include <linux/iommu.h> 15 #include <linux/module.h> 16 #include <linux/vmalloc.h> 17 18 #include <drm/drm_drv.h> 19 #include <drm/drm_dumb_buffers.h> 20 #include <drm/drm_prime.h> 21 22 #include "drm.h" 23 #include "gem.h" 24 25 MODULE_IMPORT_NS("DMA_BUF"); 26 27 static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents) 28 { 29 dma_addr_t next = ~(dma_addr_t)0; 30 unsigned int count = 0, i; 31 struct scatterlist *s; 32 33 for_each_sg(sgl, s, nents, i) { 34 /* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */ 35 if (!sg_dma_len(s)) 36 continue; 37 38 if (sg_dma_address(s) != next) { 39 next = sg_dma_address(s) + sg_dma_len(s); 40 count++; 41 } 42 } 43 44 return count; 45 } 46 47 static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt) 48 { 49 return sg_dma_count_chunks(sgt->sgl, sgt->nents); 50 } 51 52 static void tegra_bo_put(struct host1x_bo *bo) 53 { 54 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 55 56 drm_gem_object_put(&obj->gem); 57 } 58 59 static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, 60 enum dma_data_direction direction) 61 { 62 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 63 struct drm_gem_object *gem = &obj->gem; 64 struct host1x_bo_mapping *map; 65 int err; 66 67 map = kzalloc(sizeof(*map), GFP_KERNEL); 68 if (!map) 69 return ERR_PTR(-ENOMEM); 70 71 kref_init(&map->ref); 72 map->bo = host1x_bo_get(bo); 73 map->direction = direction; 74 map->dev = dev; 75 76 /* 77 * Imported buffers need special treatment to satisfy the semantics of DMA-BUF. 78 */ 79 if (obj->dma_buf) { 80 struct dma_buf *buf = obj->dma_buf; 81 82 map->attach = dma_buf_attach(buf, dev); 83 if (IS_ERR(map->attach)) { 84 err = PTR_ERR(map->attach); 85 goto free; 86 } 87 88 map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction); 89 if (IS_ERR(map->sgt)) { 90 dma_buf_detach(buf, map->attach); 91 err = PTR_ERR(map->sgt); 92 map->sgt = NULL; 93 goto free; 94 } 95 96 err = sgt_dma_count_chunks(map->sgt); 97 map->size = gem->size; 98 99 goto out; 100 } 101 102 /* 103 * If we don't have a mapping for this buffer yet, return an SG table 104 * so that host1x can do the mapping for us via the DMA API. 105 */ 106 map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL); 107 if (!map->sgt) { 108 err = -ENOMEM; 109 goto free; 110 } 111 112 if (obj->pages) { 113 /* 114 * If the buffer object was allocated from the explicit IOMMU 115 * API code paths, construct an SG table from the pages. 116 */ 117 err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size, 118 GFP_KERNEL); 119 if (err < 0) 120 goto free; 121 } else { 122 /* 123 * If the buffer object had no pages allocated and if it was 124 * not imported, it had to be allocated with the DMA API, so 125 * the DMA API helper can be used. 126 */ 127 err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size); 128 if (err < 0) 129 goto free; 130 } 131 132 err = dma_map_sgtable(dev, map->sgt, direction, 0); 133 if (err) 134 goto free_sgt; 135 136 out: 137 /* 138 * If we've manually mapped the buffer object through the IOMMU, make sure to return the 139 * existing IOVA address of our mapping. 140 */ 141 if (!obj->mm) { 142 map->phys = sg_dma_address(map->sgt->sgl); 143 map->chunks = err; 144 } else { 145 map->phys = obj->iova; 146 map->chunks = 1; 147 } 148 149 map->size = gem->size; 150 151 return map; 152 153 free_sgt: 154 sg_free_table(map->sgt); 155 free: 156 kfree(map->sgt); 157 kfree(map); 158 return ERR_PTR(err); 159 } 160 161 static void tegra_bo_unpin(struct host1x_bo_mapping *map) 162 { 163 if (map->attach) { 164 dma_buf_unmap_attachment_unlocked(map->attach, map->sgt, 165 map->direction); 166 dma_buf_detach(map->attach->dmabuf, map->attach); 167 } else { 168 dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0); 169 sg_free_table(map->sgt); 170 kfree(map->sgt); 171 } 172 173 host1x_bo_put(map->bo); 174 kfree(map); 175 } 176 177 static void *tegra_bo_mmap(struct host1x_bo *bo) 178 { 179 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 180 struct iosys_map map = { 0 }; 181 void *vaddr; 182 int ret; 183 184 if (obj->vaddr) 185 return obj->vaddr; 186 187 if (obj->dma_buf) { 188 ret = dma_buf_vmap_unlocked(obj->dma_buf, &map); 189 if (ret < 0) 190 return ERR_PTR(ret); 191 192 return map.vaddr; 193 } 194 195 vaddr = vmap(obj->pages, obj->num_pages, VM_MAP, 196 pgprot_writecombine(PAGE_KERNEL)); 197 if (!vaddr) 198 return ERR_PTR(-ENOMEM); 199 200 return vaddr; 201 } 202 203 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) 204 { 205 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 206 struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr); 207 208 if (obj->vaddr) 209 return; 210 211 if (obj->dma_buf) 212 return dma_buf_vunmap_unlocked(obj->dma_buf, &map); 213 214 vunmap(addr); 215 } 216 217 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) 218 { 219 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 220 221 drm_gem_object_get(&obj->gem); 222 223 return bo; 224 } 225 226 static const struct host1x_bo_ops tegra_bo_ops = { 227 .get = tegra_bo_get, 228 .put = tegra_bo_put, 229 .pin = tegra_bo_pin, 230 .unpin = tegra_bo_unpin, 231 .mmap = tegra_bo_mmap, 232 .munmap = tegra_bo_munmap, 233 }; 234 235 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) 236 { 237 int prot = IOMMU_READ | IOMMU_WRITE; 238 int err; 239 240 if (bo->mm) 241 return -EBUSY; 242 243 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL); 244 if (!bo->mm) 245 return -ENOMEM; 246 247 mutex_lock(&tegra->mm_lock); 248 249 err = drm_mm_insert_node_generic(&tegra->mm, 250 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0); 251 if (err < 0) { 252 dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n", 253 err); 254 goto unlock; 255 } 256 257 bo->iova = bo->mm->start; 258 259 bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot); 260 if (!bo->size) { 261 dev_err(tegra->drm->dev, "failed to map buffer\n"); 262 err = -ENOMEM; 263 goto remove; 264 } 265 266 mutex_unlock(&tegra->mm_lock); 267 268 return 0; 269 270 remove: 271 drm_mm_remove_node(bo->mm); 272 unlock: 273 mutex_unlock(&tegra->mm_lock); 274 kfree(bo->mm); 275 return err; 276 } 277 278 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) 279 { 280 if (!bo->mm) 281 return 0; 282 283 mutex_lock(&tegra->mm_lock); 284 iommu_unmap(tegra->domain, bo->iova, bo->size); 285 drm_mm_remove_node(bo->mm); 286 mutex_unlock(&tegra->mm_lock); 287 288 kfree(bo->mm); 289 290 return 0; 291 } 292 293 static const struct drm_gem_object_funcs tegra_gem_object_funcs = { 294 .free = tegra_bo_free_object, 295 .export = tegra_gem_prime_export, 296 .vm_ops = &tegra_bo_vm_ops, 297 }; 298 299 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, 300 size_t size) 301 { 302 struct tegra_bo *bo; 303 int err; 304 305 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 306 if (!bo) 307 return ERR_PTR(-ENOMEM); 308 309 bo->gem.funcs = &tegra_gem_object_funcs; 310 311 host1x_bo_init(&bo->base, &tegra_bo_ops); 312 size = round_up(size, PAGE_SIZE); 313 314 err = drm_gem_object_init(drm, &bo->gem, size); 315 if (err < 0) 316 goto free; 317 318 err = drm_gem_create_mmap_offset(&bo->gem); 319 if (err < 0) 320 goto release; 321 322 return bo; 323 324 release: 325 drm_gem_object_release(&bo->gem); 326 free: 327 kfree(bo); 328 return ERR_PTR(err); 329 } 330 331 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) 332 { 333 if (bo->pages) { 334 dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0); 335 drm_gem_put_pages(&bo->gem, bo->pages, true, true); 336 sg_free_table(bo->sgt); 337 kfree(bo->sgt); 338 } else if (bo->vaddr) { 339 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova); 340 } 341 } 342 343 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) 344 { 345 int err; 346 347 bo->pages = drm_gem_get_pages(&bo->gem); 348 if (IS_ERR(bo->pages)) 349 return PTR_ERR(bo->pages); 350 351 bo->num_pages = bo->gem.size >> PAGE_SHIFT; 352 353 bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages); 354 if (IS_ERR(bo->sgt)) { 355 err = PTR_ERR(bo->sgt); 356 goto put_pages; 357 } 358 359 err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0); 360 if (err) 361 goto free_sgt; 362 363 return 0; 364 365 free_sgt: 366 sg_free_table(bo->sgt); 367 kfree(bo->sgt); 368 put_pages: 369 drm_gem_put_pages(&bo->gem, bo->pages, false, false); 370 return err; 371 } 372 373 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) 374 { 375 struct tegra_drm *tegra = drm->dev_private; 376 int err; 377 378 if (tegra->domain) { 379 err = tegra_bo_get_pages(drm, bo); 380 if (err < 0) 381 return err; 382 383 err = tegra_bo_iommu_map(tegra, bo); 384 if (err < 0) { 385 tegra_bo_free(drm, bo); 386 return err; 387 } 388 } else { 389 size_t size = bo->gem.size; 390 391 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova, 392 GFP_KERNEL | __GFP_NOWARN); 393 if (!bo->vaddr) { 394 dev_err(drm->dev, 395 "failed to allocate buffer of size %zu\n", 396 size); 397 return -ENOMEM; 398 } 399 } 400 401 return 0; 402 } 403 404 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size, 405 unsigned long flags) 406 { 407 struct tegra_bo *bo; 408 int err; 409 410 bo = tegra_bo_alloc_object(drm, size); 411 if (IS_ERR(bo)) 412 return bo; 413 414 err = tegra_bo_alloc(drm, bo); 415 if (err < 0) 416 goto release; 417 418 if (flags & DRM_TEGRA_GEM_CREATE_TILED) 419 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; 420 421 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) 422 bo->flags |= TEGRA_BO_BOTTOM_UP; 423 424 return bo; 425 426 release: 427 drm_gem_object_release(&bo->gem); 428 kfree(bo); 429 return ERR_PTR(err); 430 } 431 432 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, 433 struct drm_device *drm, 434 size_t size, 435 unsigned long flags, 436 u32 *handle) 437 { 438 struct tegra_bo *bo; 439 int err; 440 441 bo = tegra_bo_create(drm, size, flags); 442 if (IS_ERR(bo)) 443 return bo; 444 445 err = drm_gem_handle_create(file, &bo->gem, handle); 446 if (err) { 447 tegra_bo_free_object(&bo->gem); 448 return ERR_PTR(err); 449 } 450 451 drm_gem_object_put(&bo->gem); 452 453 return bo; 454 } 455 456 static struct tegra_bo *tegra_bo_import(struct drm_device *drm, 457 struct dma_buf *buf) 458 { 459 struct tegra_drm *tegra = drm->dev_private; 460 struct dma_buf_attachment *attach; 461 struct tegra_bo *bo; 462 int err; 463 464 bo = tegra_bo_alloc_object(drm, buf->size); 465 if (IS_ERR(bo)) 466 return bo; 467 468 /* 469 * If we need to use IOMMU API to map the dma-buf into the internally managed 470 * domain, map it first to the DRM device to get an sgt. 471 */ 472 if (tegra->domain) { 473 attach = dma_buf_attach(buf, drm->dev); 474 if (IS_ERR(attach)) { 475 err = PTR_ERR(attach); 476 goto free; 477 } 478 479 bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE); 480 if (IS_ERR(bo->sgt)) { 481 err = PTR_ERR(bo->sgt); 482 goto detach; 483 } 484 485 err = tegra_bo_iommu_map(tegra, bo); 486 if (err < 0) 487 goto detach; 488 489 bo->gem.import_attach = attach; 490 } 491 492 get_dma_buf(buf); 493 bo->dma_buf = buf; 494 495 return bo; 496 497 detach: 498 if (!IS_ERR_OR_NULL(bo->sgt)) 499 dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE); 500 501 dma_buf_detach(buf, attach); 502 dma_buf_put(buf); 503 free: 504 drm_gem_object_release(&bo->gem); 505 kfree(bo); 506 return ERR_PTR(err); 507 } 508 509 void tegra_bo_free_object(struct drm_gem_object *gem) 510 { 511 struct tegra_drm *tegra = gem->dev->dev_private; 512 struct host1x_bo_mapping *mapping, *tmp; 513 struct tegra_bo *bo = to_tegra_bo(gem); 514 515 /* remove all mappings of this buffer object from any caches */ 516 list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) { 517 if (mapping->cache) 518 host1x_bo_unpin(mapping); 519 else 520 dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping, 521 dev_name(mapping->dev)); 522 } 523 524 if (tegra->domain) { 525 tegra_bo_iommu_unmap(tegra, bo); 526 527 if (drm_gem_is_imported(gem)) { 528 dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt, 529 DMA_TO_DEVICE); 530 dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach); 531 } 532 } 533 534 tegra_bo_free(gem->dev, bo); 535 536 if (bo->dma_buf) 537 dma_buf_put(bo->dma_buf); 538 539 drm_gem_object_release(gem); 540 kfree(bo); 541 } 542 543 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, 544 struct drm_mode_create_dumb *args) 545 { 546 struct tegra_drm *tegra = drm->dev_private; 547 struct tegra_bo *bo; 548 int ret; 549 550 ret = drm_mode_size_dumb(drm, args, tegra->pitch_align, 0); 551 if (ret) 552 return ret; 553 554 bo = tegra_bo_create_with_handle(file, drm, args->size, 0, 555 &args->handle); 556 if (IS_ERR(bo)) 557 return PTR_ERR(bo); 558 559 return 0; 560 } 561 562 static vm_fault_t tegra_bo_fault(struct vm_fault *vmf) 563 { 564 struct vm_area_struct *vma = vmf->vma; 565 struct drm_gem_object *gem = vma->vm_private_data; 566 struct tegra_bo *bo = to_tegra_bo(gem); 567 struct page *page; 568 pgoff_t offset; 569 570 if (!bo->pages) 571 return VM_FAULT_SIGBUS; 572 573 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 574 page = bo->pages[offset]; 575 576 return vmf_insert_page(vma, vmf->address, page); 577 } 578 579 const struct vm_operations_struct tegra_bo_vm_ops = { 580 .fault = tegra_bo_fault, 581 .open = drm_gem_vm_open, 582 .close = drm_gem_vm_close, 583 }; 584 585 int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma) 586 { 587 struct tegra_bo *bo = to_tegra_bo(gem); 588 589 if (!bo->pages) { 590 unsigned long vm_pgoff = vma->vm_pgoff; 591 int err; 592 593 /* 594 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), 595 * and set the vm_pgoff (used as a fake buffer offset by DRM) 596 * to 0 as we want to map the whole buffer. 597 */ 598 vm_flags_clear(vma, VM_PFNMAP); 599 vma->vm_pgoff = 0; 600 601 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova, 602 gem->size); 603 if (err < 0) { 604 drm_gem_vm_close(vma); 605 return err; 606 } 607 608 vma->vm_pgoff = vm_pgoff; 609 } else { 610 pgprot_t prot = vm_get_page_prot(vma->vm_flags); 611 612 vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP); 613 614 vma->vm_page_prot = pgprot_writecombine(prot); 615 } 616 617 return 0; 618 } 619 620 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) 621 { 622 struct drm_gem_object *gem; 623 int err; 624 625 err = drm_gem_mmap(file, vma); 626 if (err < 0) 627 return err; 628 629 gem = vma->vm_private_data; 630 631 return __tegra_gem_mmap(gem, vma); 632 } 633 634 static struct sg_table * 635 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, 636 enum dma_data_direction dir) 637 { 638 struct drm_gem_object *gem = attach->dmabuf->priv; 639 struct tegra_bo *bo = to_tegra_bo(gem); 640 struct sg_table *sgt; 641 642 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); 643 if (!sgt) 644 return NULL; 645 646 if (bo->pages) { 647 if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages, 648 0, gem->size, GFP_KERNEL) < 0) 649 goto free; 650 } else { 651 if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova, 652 gem->size) < 0) 653 goto free; 654 } 655 656 if (dma_map_sgtable(attach->dev, sgt, dir, 0)) 657 goto free; 658 659 return sgt; 660 661 free: 662 sg_free_table(sgt); 663 kfree(sgt); 664 return NULL; 665 } 666 667 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, 668 struct sg_table *sgt, 669 enum dma_data_direction dir) 670 { 671 struct drm_gem_object *gem = attach->dmabuf->priv; 672 struct tegra_bo *bo = to_tegra_bo(gem); 673 674 if (bo->pages) 675 dma_unmap_sgtable(attach->dev, sgt, dir, 0); 676 677 sg_free_table(sgt); 678 kfree(sgt); 679 } 680 681 static void tegra_gem_prime_release(struct dma_buf *buf) 682 { 683 drm_gem_dmabuf_release(buf); 684 } 685 686 static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf, 687 enum dma_data_direction direction) 688 { 689 struct drm_gem_object *gem = buf->priv; 690 struct tegra_bo *bo = to_tegra_bo(gem); 691 struct drm_device *drm = gem->dev; 692 693 if (bo->pages) 694 dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE); 695 696 return 0; 697 } 698 699 static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf, 700 enum dma_data_direction direction) 701 { 702 struct drm_gem_object *gem = buf->priv; 703 struct tegra_bo *bo = to_tegra_bo(gem); 704 struct drm_device *drm = gem->dev; 705 706 if (bo->pages) 707 dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE); 708 709 return 0; 710 } 711 712 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) 713 { 714 struct drm_gem_object *gem = buf->priv; 715 int err; 716 717 err = drm_gem_mmap_obj(gem, gem->size, vma); 718 if (err < 0) 719 return err; 720 721 return __tegra_gem_mmap(gem, vma); 722 } 723 724 static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map) 725 { 726 struct drm_gem_object *gem = buf->priv; 727 struct tegra_bo *bo = to_tegra_bo(gem); 728 void *vaddr; 729 730 vaddr = tegra_bo_mmap(&bo->base); 731 if (IS_ERR(vaddr)) 732 return PTR_ERR(vaddr); 733 734 iosys_map_set_vaddr(map, vaddr); 735 736 return 0; 737 } 738 739 static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map) 740 { 741 struct drm_gem_object *gem = buf->priv; 742 struct tegra_bo *bo = to_tegra_bo(gem); 743 744 tegra_bo_munmap(&bo->base, map->vaddr); 745 } 746 747 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { 748 .map_dma_buf = tegra_gem_prime_map_dma_buf, 749 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, 750 .release = tegra_gem_prime_release, 751 .begin_cpu_access = tegra_gem_prime_begin_cpu_access, 752 .end_cpu_access = tegra_gem_prime_end_cpu_access, 753 .mmap = tegra_gem_prime_mmap, 754 .vmap = tegra_gem_prime_vmap, 755 .vunmap = tegra_gem_prime_vunmap, 756 }; 757 758 struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem, 759 int flags) 760 { 761 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 762 763 exp_info.exp_name = KBUILD_MODNAME; 764 exp_info.owner = gem->dev->driver->fops->owner; 765 exp_info.ops = &tegra_gem_prime_dmabuf_ops; 766 exp_info.size = gem->size; 767 exp_info.flags = flags; 768 exp_info.priv = gem; 769 770 return drm_gem_dmabuf_export(gem->dev, &exp_info); 771 } 772 773 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, 774 struct dma_buf *buf) 775 { 776 struct tegra_bo *bo; 777 778 if (buf->ops == &tegra_gem_prime_dmabuf_ops) { 779 struct drm_gem_object *gem = buf->priv; 780 781 if (gem->dev == drm) { 782 drm_gem_object_get(gem); 783 return gem; 784 } 785 } 786 787 bo = tegra_bo_import(drm, buf); 788 if (IS_ERR(bo)) 789 return ERR_CAST(bo); 790 791 return &bo->gem; 792 } 793 794 struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle) 795 { 796 struct drm_gem_object *gem; 797 struct tegra_bo *bo; 798 799 gem = drm_gem_object_lookup(file, handle); 800 if (!gem) 801 return NULL; 802 803 bo = to_tegra_bo(gem); 804 return &bo->base; 805 } 806