1 /* 2 * NVIDIA Tegra DRM GEM helper functions 3 * 4 * Copyright (C) 2012 Sascha Hauer, Pengutronix 5 * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved. 6 * 7 * Based on the GEM/CMA helpers 8 * 9 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 */ 15 16 #include <linux/dma-buf.h> 17 #include <linux/iommu.h> 18 #include <drm/tegra_drm.h> 19 20 #include "drm.h" 21 #include "gem.h" 22 23 static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo) 24 { 25 return container_of(bo, struct tegra_bo, base); 26 } 27 28 static void tegra_bo_put(struct host1x_bo *bo) 29 { 30 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 31 struct drm_device *drm = obj->gem.dev; 32 33 mutex_lock(&drm->struct_mutex); 34 drm_gem_object_unreference(&obj->gem); 35 mutex_unlock(&drm->struct_mutex); 36 } 37 38 static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) 39 { 40 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 41 42 return obj->paddr; 43 } 44 45 static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) 46 { 47 } 48 49 static void *tegra_bo_mmap(struct host1x_bo *bo) 50 { 51 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 52 53 return obj->vaddr; 54 } 55 56 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) 57 { 58 } 59 60 static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page) 61 { 62 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 63 64 return obj->vaddr + page * PAGE_SIZE; 65 } 66 67 static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page, 68 void *addr) 69 { 70 } 71 72 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) 73 { 74 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 75 struct drm_device *drm = obj->gem.dev; 76 77 mutex_lock(&drm->struct_mutex); 78 drm_gem_object_reference(&obj->gem); 79 mutex_unlock(&drm->struct_mutex); 80 81 return bo; 82 } 83 84 static const struct host1x_bo_ops tegra_bo_ops = { 85 .get = tegra_bo_get, 86 .put = tegra_bo_put, 87 .pin = tegra_bo_pin, 88 .unpin = tegra_bo_unpin, 89 .mmap = tegra_bo_mmap, 90 .munmap = tegra_bo_munmap, 91 .kmap = tegra_bo_kmap, 92 .kunmap = tegra_bo_kunmap, 93 }; 94 95 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) 96 { 97 int prot = IOMMU_READ | IOMMU_WRITE; 98 ssize_t err; 99 100 if (bo->mm) 101 return -EBUSY; 102 103 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL); 104 if (!bo->mm) 105 return -ENOMEM; 106 107 err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size, 108 PAGE_SIZE, 0, 0, 0); 109 if (err < 0) { 110 dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n", 111 err); 112 goto free; 113 } 114 115 bo->paddr = bo->mm->start; 116 117 err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, 118 bo->sgt->nents, prot); 119 if (err < 0) { 120 dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err); 121 goto remove; 122 } 123 124 bo->size = err; 125 126 return 0; 127 128 remove: 129 drm_mm_remove_node(bo->mm); 130 free: 131 kfree(bo->mm); 132 return err; 133 } 134 135 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) 136 { 137 if (!bo->mm) 138 return 0; 139 140 iommu_unmap(tegra->domain, bo->paddr, bo->size); 141 drm_mm_remove_node(bo->mm); 142 kfree(bo->mm); 143 144 return 0; 145 } 146 147 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, 148 size_t size) 149 { 150 struct tegra_bo *bo; 151 int err; 152 153 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 154 if (!bo) 155 return ERR_PTR(-ENOMEM); 156 157 host1x_bo_init(&bo->base, &tegra_bo_ops); 158 size = round_up(size, PAGE_SIZE); 159 160 err = drm_gem_object_init(drm, &bo->gem, size); 161 if (err < 0) 162 goto free; 163 164 err = drm_gem_create_mmap_offset(&bo->gem); 165 if (err < 0) 166 goto release; 167 168 return bo; 169 170 release: 171 drm_gem_object_release(&bo->gem); 172 free: 173 kfree(bo); 174 return ERR_PTR(err); 175 } 176 177 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) 178 { 179 if (bo->pages) { 180 drm_gem_put_pages(&bo->gem, bo->pages, true, true); 181 sg_free_table(bo->sgt); 182 kfree(bo->sgt); 183 } else if (bo->vaddr) { 184 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, 185 bo->paddr); 186 } 187 } 188 189 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) 190 { 191 struct scatterlist *s; 192 struct sg_table *sgt; 193 unsigned int i; 194 195 bo->pages = drm_gem_get_pages(&bo->gem); 196 if (IS_ERR(bo->pages)) 197 return PTR_ERR(bo->pages); 198 199 bo->num_pages = bo->gem.size >> PAGE_SHIFT; 200 201 sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); 202 if (IS_ERR(sgt)) 203 goto put_pages; 204 205 /* 206 * Fake up the SG table so that dma_map_sg() can be used to flush the 207 * pages associated with it. Note that this relies on the fact that 208 * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is 209 * only cache maintenance. 210 * 211 * TODO: Replace this by drm_clflash_sg() once it can be implemented 212 * without relying on symbols that are not exported. 213 */ 214 for_each_sg(sgt->sgl, s, sgt->nents, i) 215 sg_dma_address(s) = sg_phys(s); 216 217 if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) 218 goto release_sgt; 219 220 bo->sgt = sgt; 221 222 return 0; 223 224 release_sgt: 225 sg_free_table(sgt); 226 kfree(sgt); 227 sgt = ERR_PTR(-ENOMEM); 228 put_pages: 229 drm_gem_put_pages(&bo->gem, bo->pages, false, false); 230 return PTR_ERR(sgt); 231 } 232 233 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) 234 { 235 struct tegra_drm *tegra = drm->dev_private; 236 int err; 237 238 if (tegra->domain) { 239 err = tegra_bo_get_pages(drm, bo); 240 if (err < 0) 241 return err; 242 243 err = tegra_bo_iommu_map(tegra, bo); 244 if (err < 0) { 245 tegra_bo_free(drm, bo); 246 return err; 247 } 248 } else { 249 size_t size = bo->gem.size; 250 251 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, 252 GFP_KERNEL | __GFP_NOWARN); 253 if (!bo->vaddr) { 254 dev_err(drm->dev, 255 "failed to allocate buffer of size %zu\n", 256 size); 257 return -ENOMEM; 258 } 259 } 260 261 return 0; 262 } 263 264 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size, 265 unsigned long flags) 266 { 267 struct tegra_bo *bo; 268 int err; 269 270 bo = tegra_bo_alloc_object(drm, size); 271 if (IS_ERR(bo)) 272 return bo; 273 274 err = tegra_bo_alloc(drm, bo); 275 if (err < 0) 276 goto release; 277 278 if (flags & DRM_TEGRA_GEM_CREATE_TILED) 279 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; 280 281 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) 282 bo->flags |= TEGRA_BO_BOTTOM_UP; 283 284 return bo; 285 286 release: 287 drm_gem_object_release(&bo->gem); 288 kfree(bo); 289 return ERR_PTR(err); 290 } 291 292 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, 293 struct drm_device *drm, 294 size_t size, 295 unsigned long flags, 296 u32 *handle) 297 { 298 struct tegra_bo *bo; 299 int err; 300 301 bo = tegra_bo_create(drm, size, flags); 302 if (IS_ERR(bo)) 303 return bo; 304 305 err = drm_gem_handle_create(file, &bo->gem, handle); 306 if (err) { 307 tegra_bo_free_object(&bo->gem); 308 return ERR_PTR(err); 309 } 310 311 drm_gem_object_unreference_unlocked(&bo->gem); 312 313 return bo; 314 } 315 316 static struct tegra_bo *tegra_bo_import(struct drm_device *drm, 317 struct dma_buf *buf) 318 { 319 struct tegra_drm *tegra = drm->dev_private; 320 struct dma_buf_attachment *attach; 321 struct tegra_bo *bo; 322 int err; 323 324 bo = tegra_bo_alloc_object(drm, buf->size); 325 if (IS_ERR(bo)) 326 return bo; 327 328 attach = dma_buf_attach(buf, drm->dev); 329 if (IS_ERR(attach)) { 330 err = PTR_ERR(attach); 331 goto free; 332 } 333 334 get_dma_buf(buf); 335 336 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); 337 if (!bo->sgt) { 338 err = -ENOMEM; 339 goto detach; 340 } 341 342 if (IS_ERR(bo->sgt)) { 343 err = PTR_ERR(bo->sgt); 344 goto detach; 345 } 346 347 if (tegra->domain) { 348 err = tegra_bo_iommu_map(tegra, bo); 349 if (err < 0) 350 goto detach; 351 } else { 352 if (bo->sgt->nents > 1) { 353 err = -EINVAL; 354 goto detach; 355 } 356 357 bo->paddr = sg_dma_address(bo->sgt->sgl); 358 } 359 360 bo->gem.import_attach = attach; 361 362 return bo; 363 364 detach: 365 if (!IS_ERR_OR_NULL(bo->sgt)) 366 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); 367 368 dma_buf_detach(buf, attach); 369 dma_buf_put(buf); 370 free: 371 drm_gem_object_release(&bo->gem); 372 kfree(bo); 373 return ERR_PTR(err); 374 } 375 376 void tegra_bo_free_object(struct drm_gem_object *gem) 377 { 378 struct tegra_drm *tegra = gem->dev->dev_private; 379 struct tegra_bo *bo = to_tegra_bo(gem); 380 381 if (tegra->domain) 382 tegra_bo_iommu_unmap(tegra, bo); 383 384 if (gem->import_attach) { 385 dma_buf_unmap_attachment(gem->import_attach, bo->sgt, 386 DMA_TO_DEVICE); 387 drm_prime_gem_destroy(gem, NULL); 388 } else { 389 tegra_bo_free(gem->dev, bo); 390 } 391 392 drm_gem_object_release(gem); 393 kfree(bo); 394 } 395 396 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, 397 struct drm_mode_create_dumb *args) 398 { 399 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 400 struct tegra_drm *tegra = drm->dev_private; 401 struct tegra_bo *bo; 402 403 args->pitch = round_up(min_pitch, tegra->pitch_align); 404 args->size = args->pitch * args->height; 405 406 bo = tegra_bo_create_with_handle(file, drm, args->size, 0, 407 &args->handle); 408 if (IS_ERR(bo)) 409 return PTR_ERR(bo); 410 411 return 0; 412 } 413 414 int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, 415 u32 handle, u64 *offset) 416 { 417 struct drm_gem_object *gem; 418 struct tegra_bo *bo; 419 420 mutex_lock(&drm->struct_mutex); 421 422 gem = drm_gem_object_lookup(drm, file, handle); 423 if (!gem) { 424 dev_err(drm->dev, "failed to lookup GEM object\n"); 425 mutex_unlock(&drm->struct_mutex); 426 return -EINVAL; 427 } 428 429 bo = to_tegra_bo(gem); 430 431 *offset = drm_vma_node_offset_addr(&bo->gem.vma_node); 432 433 drm_gem_object_unreference(gem); 434 435 mutex_unlock(&drm->struct_mutex); 436 437 return 0; 438 } 439 440 static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 441 { 442 struct drm_gem_object *gem = vma->vm_private_data; 443 struct tegra_bo *bo = to_tegra_bo(gem); 444 struct page *page; 445 pgoff_t offset; 446 int err; 447 448 if (!bo->pages) 449 return VM_FAULT_SIGBUS; 450 451 offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT; 452 page = bo->pages[offset]; 453 454 err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); 455 switch (err) { 456 case -EAGAIN: 457 case 0: 458 case -ERESTARTSYS: 459 case -EINTR: 460 case -EBUSY: 461 return VM_FAULT_NOPAGE; 462 463 case -ENOMEM: 464 return VM_FAULT_OOM; 465 } 466 467 return VM_FAULT_SIGBUS; 468 } 469 470 const struct vm_operations_struct tegra_bo_vm_ops = { 471 .fault = tegra_bo_fault, 472 .open = drm_gem_vm_open, 473 .close = drm_gem_vm_close, 474 }; 475 476 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) 477 { 478 struct drm_gem_object *gem; 479 struct tegra_bo *bo; 480 int ret; 481 482 ret = drm_gem_mmap(file, vma); 483 if (ret) 484 return ret; 485 486 gem = vma->vm_private_data; 487 bo = to_tegra_bo(gem); 488 489 if (!bo->pages) { 490 unsigned long vm_pgoff = vma->vm_pgoff; 491 492 vma->vm_flags &= ~VM_PFNMAP; 493 vma->vm_pgoff = 0; 494 495 ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr, 496 bo->paddr, gem->size); 497 if (ret) { 498 drm_gem_vm_close(vma); 499 return ret; 500 } 501 502 vma->vm_pgoff = vm_pgoff; 503 } else { 504 pgprot_t prot = vm_get_page_prot(vma->vm_flags); 505 506 vma->vm_flags |= VM_MIXEDMAP; 507 vma->vm_flags &= ~VM_PFNMAP; 508 509 vma->vm_page_prot = pgprot_writecombine(prot); 510 } 511 512 return 0; 513 } 514 515 static struct sg_table * 516 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, 517 enum dma_data_direction dir) 518 { 519 struct drm_gem_object *gem = attach->dmabuf->priv; 520 struct tegra_bo *bo = to_tegra_bo(gem); 521 struct sg_table *sgt; 522 523 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); 524 if (!sgt) 525 return NULL; 526 527 if (bo->pages) { 528 struct scatterlist *sg; 529 unsigned int i; 530 531 if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) 532 goto free; 533 534 for_each_sg(sgt->sgl, sg, bo->num_pages, i) 535 sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); 536 537 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) 538 goto free; 539 } else { 540 if (sg_alloc_table(sgt, 1, GFP_KERNEL)) 541 goto free; 542 543 sg_dma_address(sgt->sgl) = bo->paddr; 544 sg_dma_len(sgt->sgl) = gem->size; 545 } 546 547 return sgt; 548 549 free: 550 sg_free_table(sgt); 551 kfree(sgt); 552 return NULL; 553 } 554 555 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, 556 struct sg_table *sgt, 557 enum dma_data_direction dir) 558 { 559 struct drm_gem_object *gem = attach->dmabuf->priv; 560 struct tegra_bo *bo = to_tegra_bo(gem); 561 562 if (bo->pages) 563 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); 564 565 sg_free_table(sgt); 566 kfree(sgt); 567 } 568 569 static void tegra_gem_prime_release(struct dma_buf *buf) 570 { 571 drm_gem_dmabuf_release(buf); 572 } 573 574 static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf, 575 unsigned long page) 576 { 577 return NULL; 578 } 579 580 static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf, 581 unsigned long page, 582 void *addr) 583 { 584 } 585 586 static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page) 587 { 588 return NULL; 589 } 590 591 static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page, 592 void *addr) 593 { 594 } 595 596 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) 597 { 598 return -EINVAL; 599 } 600 601 static void *tegra_gem_prime_vmap(struct dma_buf *buf) 602 { 603 struct drm_gem_object *gem = buf->priv; 604 struct tegra_bo *bo = to_tegra_bo(gem); 605 606 return bo->vaddr; 607 } 608 609 static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr) 610 { 611 } 612 613 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { 614 .map_dma_buf = tegra_gem_prime_map_dma_buf, 615 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, 616 .release = tegra_gem_prime_release, 617 .kmap_atomic = tegra_gem_prime_kmap_atomic, 618 .kunmap_atomic = tegra_gem_prime_kunmap_atomic, 619 .kmap = tegra_gem_prime_kmap, 620 .kunmap = tegra_gem_prime_kunmap, 621 .mmap = tegra_gem_prime_mmap, 622 .vmap = tegra_gem_prime_vmap, 623 .vunmap = tegra_gem_prime_vunmap, 624 }; 625 626 struct dma_buf *tegra_gem_prime_export(struct drm_device *drm, 627 struct drm_gem_object *gem, 628 int flags) 629 { 630 return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size, 631 flags, NULL); 632 } 633 634 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, 635 struct dma_buf *buf) 636 { 637 struct tegra_bo *bo; 638 639 if (buf->ops == &tegra_gem_prime_dmabuf_ops) { 640 struct drm_gem_object *gem = buf->priv; 641 642 if (gem->dev == drm) { 643 drm_gem_object_reference(gem); 644 return gem; 645 } 646 } 647 648 bo = tegra_bo_import(drm, buf); 649 if (IS_ERR(bo)) 650 return ERR_CAST(bo); 651 652 return &bo->gem; 653 } 654