1 /* 2 * NVIDIA Tegra DRM GEM helper functions 3 * 4 * Copyright (C) 2012 Sascha Hauer, Pengutronix 5 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved. 6 * 7 * Based on the GEM/CMA helpers 8 * 9 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 */ 15 16 #include <linux/dma-buf.h> 17 #include <linux/iommu.h> 18 #include <drm/tegra_drm.h> 19 20 #include "drm.h" 21 #include "gem.h" 22 23 static void tegra_bo_put(struct host1x_bo *bo) 24 { 25 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 26 27 drm_gem_object_put_unlocked(&obj->gem); 28 } 29 30 static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) 31 { 32 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 33 34 *sgt = obj->sgt; 35 36 return obj->paddr; 37 } 38 39 static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) 40 { 41 } 42 43 static void *tegra_bo_mmap(struct host1x_bo *bo) 44 { 45 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 46 47 if (obj->vaddr) 48 return obj->vaddr; 49 else if (obj->gem.import_attach) 50 return dma_buf_vmap(obj->gem.import_attach->dmabuf); 51 else 52 return vmap(obj->pages, obj->num_pages, VM_MAP, 53 pgprot_writecombine(PAGE_KERNEL)); 54 } 55 56 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) 57 { 58 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 59 60 if (obj->vaddr) 61 return; 62 else if (obj->gem.import_attach) 63 dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr); 64 else 65 vunmap(addr); 66 } 67 68 static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page) 69 { 70 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 71 72 if (obj->vaddr) 73 return obj->vaddr + page * PAGE_SIZE; 74 else if (obj->gem.import_attach) 75 return dma_buf_kmap(obj->gem.import_attach->dmabuf, page); 76 else 77 return vmap(obj->pages + page, 1, VM_MAP, 78 pgprot_writecombine(PAGE_KERNEL)); 79 } 80 81 static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page, 82 void *addr) 83 { 84 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 85 86 if (obj->vaddr) 87 return; 88 else if (obj->gem.import_attach) 89 dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr); 90 else 91 vunmap(addr); 92 } 93 94 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) 95 { 96 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 97 98 drm_gem_object_get(&obj->gem); 99 100 return bo; 101 } 102 103 static const struct host1x_bo_ops tegra_bo_ops = { 104 .get = tegra_bo_get, 105 .put = tegra_bo_put, 106 .pin = tegra_bo_pin, 107 .unpin = tegra_bo_unpin, 108 .mmap = tegra_bo_mmap, 109 .munmap = tegra_bo_munmap, 110 .kmap = tegra_bo_kmap, 111 .kunmap = tegra_bo_kunmap, 112 }; 113 114 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) 115 { 116 int prot = IOMMU_READ | IOMMU_WRITE; 117 int err; 118 119 if (bo->mm) 120 return -EBUSY; 121 122 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL); 123 if (!bo->mm) 124 return -ENOMEM; 125 126 mutex_lock(&tegra->mm_lock); 127 128 err = drm_mm_insert_node_generic(&tegra->mm, 129 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0); 130 if (err < 0) { 131 dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n", 132 err); 133 goto unlock; 134 } 135 136 bo->paddr = bo->mm->start; 137 138 bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, 139 bo->sgt->nents, prot); 140 if (!bo->size) { 141 dev_err(tegra->drm->dev, "failed to map buffer\n"); 142 err = -ENOMEM; 143 goto remove; 144 } 145 146 mutex_unlock(&tegra->mm_lock); 147 148 return 0; 149 150 remove: 151 drm_mm_remove_node(bo->mm); 152 unlock: 153 mutex_unlock(&tegra->mm_lock); 154 kfree(bo->mm); 155 return err; 156 } 157 158 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) 159 { 160 if (!bo->mm) 161 return 0; 162 163 mutex_lock(&tegra->mm_lock); 164 iommu_unmap(tegra->domain, bo->paddr, bo->size); 165 drm_mm_remove_node(bo->mm); 166 mutex_unlock(&tegra->mm_lock); 167 168 kfree(bo->mm); 169 170 return 0; 171 } 172 173 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, 174 size_t size) 175 { 176 struct tegra_bo *bo; 177 int err; 178 179 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 180 if (!bo) 181 return ERR_PTR(-ENOMEM); 182 183 host1x_bo_init(&bo->base, &tegra_bo_ops); 184 size = round_up(size, PAGE_SIZE); 185 186 err = drm_gem_object_init(drm, &bo->gem, size); 187 if (err < 0) 188 goto free; 189 190 err = drm_gem_create_mmap_offset(&bo->gem); 191 if (err < 0) 192 goto release; 193 194 return bo; 195 196 release: 197 drm_gem_object_release(&bo->gem); 198 free: 199 kfree(bo); 200 return ERR_PTR(err); 201 } 202 203 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) 204 { 205 if (bo->pages) { 206 drm_gem_put_pages(&bo->gem, bo->pages, true, true); 207 sg_free_table(bo->sgt); 208 kfree(bo->sgt); 209 } else if (bo->vaddr) { 210 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); 211 } 212 } 213 214 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) 215 { 216 struct scatterlist *s; 217 unsigned int i; 218 219 bo->pages = drm_gem_get_pages(&bo->gem); 220 if (IS_ERR(bo->pages)) 221 return PTR_ERR(bo->pages); 222 223 bo->num_pages = bo->gem.size >> PAGE_SHIFT; 224 225 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); 226 if (IS_ERR(bo->sgt)) 227 goto put_pages; 228 229 /* 230 * Fake up the SG table so that dma_sync_sg_for_device() can be used 231 * to flush the pages associated with it. 232 * 233 * TODO: Replace this by drm_clflash_sg() once it can be implemented 234 * without relying on symbols that are not exported. 235 */ 236 for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i) 237 sg_dma_address(s) = sg_phys(s); 238 239 dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents, 240 DMA_TO_DEVICE); 241 242 return 0; 243 244 put_pages: 245 drm_gem_put_pages(&bo->gem, bo->pages, false, false); 246 return PTR_ERR(bo->sgt); 247 } 248 249 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) 250 { 251 struct tegra_drm *tegra = drm->dev_private; 252 int err; 253 254 if (tegra->domain) { 255 err = tegra_bo_get_pages(drm, bo); 256 if (err < 0) 257 return err; 258 259 err = tegra_bo_iommu_map(tegra, bo); 260 if (err < 0) { 261 tegra_bo_free(drm, bo); 262 return err; 263 } 264 } else { 265 size_t size = bo->gem.size; 266 267 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr, 268 GFP_KERNEL | __GFP_NOWARN); 269 if (!bo->vaddr) { 270 dev_err(drm->dev, 271 "failed to allocate buffer of size %zu\n", 272 size); 273 return -ENOMEM; 274 } 275 } 276 277 return 0; 278 } 279 280 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size, 281 unsigned long flags) 282 { 283 struct tegra_bo *bo; 284 int err; 285 286 bo = tegra_bo_alloc_object(drm, size); 287 if (IS_ERR(bo)) 288 return bo; 289 290 err = tegra_bo_alloc(drm, bo); 291 if (err < 0) 292 goto release; 293 294 if (flags & DRM_TEGRA_GEM_CREATE_TILED) 295 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; 296 297 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) 298 bo->flags |= TEGRA_BO_BOTTOM_UP; 299 300 return bo; 301 302 release: 303 drm_gem_object_release(&bo->gem); 304 kfree(bo); 305 return ERR_PTR(err); 306 } 307 308 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, 309 struct drm_device *drm, 310 size_t size, 311 unsigned long flags, 312 u32 *handle) 313 { 314 struct tegra_bo *bo; 315 int err; 316 317 bo = tegra_bo_create(drm, size, flags); 318 if (IS_ERR(bo)) 319 return bo; 320 321 err = drm_gem_handle_create(file, &bo->gem, handle); 322 if (err) { 323 tegra_bo_free_object(&bo->gem); 324 return ERR_PTR(err); 325 } 326 327 drm_gem_object_put_unlocked(&bo->gem); 328 329 return bo; 330 } 331 332 static struct tegra_bo *tegra_bo_import(struct drm_device *drm, 333 struct dma_buf *buf) 334 { 335 struct tegra_drm *tegra = drm->dev_private; 336 struct dma_buf_attachment *attach; 337 struct tegra_bo *bo; 338 int err; 339 340 bo = tegra_bo_alloc_object(drm, buf->size); 341 if (IS_ERR(bo)) 342 return bo; 343 344 attach = dma_buf_attach(buf, drm->dev); 345 if (IS_ERR(attach)) { 346 err = PTR_ERR(attach); 347 goto free; 348 } 349 350 get_dma_buf(buf); 351 352 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); 353 if (IS_ERR(bo->sgt)) { 354 err = PTR_ERR(bo->sgt); 355 goto detach; 356 } 357 358 if (tegra->domain) { 359 err = tegra_bo_iommu_map(tegra, bo); 360 if (err < 0) 361 goto detach; 362 } else { 363 if (bo->sgt->nents > 1) { 364 err = -EINVAL; 365 goto detach; 366 } 367 368 bo->paddr = sg_dma_address(bo->sgt->sgl); 369 } 370 371 bo->gem.import_attach = attach; 372 373 return bo; 374 375 detach: 376 if (!IS_ERR_OR_NULL(bo->sgt)) 377 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); 378 379 dma_buf_detach(buf, attach); 380 dma_buf_put(buf); 381 free: 382 drm_gem_object_release(&bo->gem); 383 kfree(bo); 384 return ERR_PTR(err); 385 } 386 387 void tegra_bo_free_object(struct drm_gem_object *gem) 388 { 389 struct tegra_drm *tegra = gem->dev->dev_private; 390 struct tegra_bo *bo = to_tegra_bo(gem); 391 392 if (tegra->domain) 393 tegra_bo_iommu_unmap(tegra, bo); 394 395 if (gem->import_attach) { 396 dma_buf_unmap_attachment(gem->import_attach, bo->sgt, 397 DMA_TO_DEVICE); 398 drm_prime_gem_destroy(gem, NULL); 399 } else { 400 tegra_bo_free(gem->dev, bo); 401 } 402 403 drm_gem_object_release(gem); 404 kfree(bo); 405 } 406 407 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, 408 struct drm_mode_create_dumb *args) 409 { 410 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 411 struct tegra_drm *tegra = drm->dev_private; 412 struct tegra_bo *bo; 413 414 args->pitch = round_up(min_pitch, tegra->pitch_align); 415 args->size = args->pitch * args->height; 416 417 bo = tegra_bo_create_with_handle(file, drm, args->size, 0, 418 &args->handle); 419 if (IS_ERR(bo)) 420 return PTR_ERR(bo); 421 422 return 0; 423 } 424 425 static int tegra_bo_fault(struct vm_fault *vmf) 426 { 427 struct vm_area_struct *vma = vmf->vma; 428 struct drm_gem_object *gem = vma->vm_private_data; 429 struct tegra_bo *bo = to_tegra_bo(gem); 430 struct page *page; 431 pgoff_t offset; 432 int err; 433 434 if (!bo->pages) 435 return VM_FAULT_SIGBUS; 436 437 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 438 page = bo->pages[offset]; 439 440 err = vm_insert_page(vma, vmf->address, page); 441 switch (err) { 442 case -EAGAIN: 443 case 0: 444 case -ERESTARTSYS: 445 case -EINTR: 446 case -EBUSY: 447 return VM_FAULT_NOPAGE; 448 449 case -ENOMEM: 450 return VM_FAULT_OOM; 451 } 452 453 return VM_FAULT_SIGBUS; 454 } 455 456 const struct vm_operations_struct tegra_bo_vm_ops = { 457 .fault = tegra_bo_fault, 458 .open = drm_gem_vm_open, 459 .close = drm_gem_vm_close, 460 }; 461 462 static int tegra_gem_mmap(struct drm_gem_object *gem, 463 struct vm_area_struct *vma) 464 { 465 struct tegra_bo *bo = to_tegra_bo(gem); 466 467 if (!bo->pages) { 468 unsigned long vm_pgoff = vma->vm_pgoff; 469 int err; 470 471 /* 472 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), 473 * and set the vm_pgoff (used as a fake buffer offset by DRM) 474 * to 0 as we want to map the whole buffer. 475 */ 476 vma->vm_flags &= ~VM_PFNMAP; 477 vma->vm_pgoff = 0; 478 479 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr, 480 gem->size); 481 if (err < 0) { 482 drm_gem_vm_close(vma); 483 return err; 484 } 485 486 vma->vm_pgoff = vm_pgoff; 487 } else { 488 pgprot_t prot = vm_get_page_prot(vma->vm_flags); 489 490 vma->vm_flags |= VM_MIXEDMAP; 491 vma->vm_flags &= ~VM_PFNMAP; 492 493 vma->vm_page_prot = pgprot_writecombine(prot); 494 } 495 496 return 0; 497 } 498 499 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) 500 { 501 struct drm_gem_object *gem; 502 int err; 503 504 err = drm_gem_mmap(file, vma); 505 if (err < 0) 506 return err; 507 508 gem = vma->vm_private_data; 509 510 return tegra_gem_mmap(gem, vma); 511 } 512 513 static struct sg_table * 514 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, 515 enum dma_data_direction dir) 516 { 517 struct drm_gem_object *gem = attach->dmabuf->priv; 518 struct tegra_bo *bo = to_tegra_bo(gem); 519 struct sg_table *sgt; 520 521 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); 522 if (!sgt) 523 return NULL; 524 525 if (bo->pages) { 526 struct scatterlist *sg; 527 unsigned int i; 528 529 if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) 530 goto free; 531 532 for_each_sg(sgt->sgl, sg, bo->num_pages, i) 533 sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); 534 535 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) 536 goto free; 537 } else { 538 if (sg_alloc_table(sgt, 1, GFP_KERNEL)) 539 goto free; 540 541 sg_dma_address(sgt->sgl) = bo->paddr; 542 sg_dma_len(sgt->sgl) = gem->size; 543 } 544 545 return sgt; 546 547 free: 548 sg_free_table(sgt); 549 kfree(sgt); 550 return NULL; 551 } 552 553 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, 554 struct sg_table *sgt, 555 enum dma_data_direction dir) 556 { 557 struct drm_gem_object *gem = attach->dmabuf->priv; 558 struct tegra_bo *bo = to_tegra_bo(gem); 559 560 if (bo->pages) 561 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); 562 563 sg_free_table(sgt); 564 kfree(sgt); 565 } 566 567 static void tegra_gem_prime_release(struct dma_buf *buf) 568 { 569 drm_gem_dmabuf_release(buf); 570 } 571 572 static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf, 573 unsigned long page) 574 { 575 return NULL; 576 } 577 578 static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf, 579 unsigned long page, 580 void *addr) 581 { 582 } 583 584 static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page) 585 { 586 return NULL; 587 } 588 589 static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page, 590 void *addr) 591 { 592 } 593 594 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) 595 { 596 struct drm_gem_object *gem = buf->priv; 597 int err; 598 599 err = drm_gem_mmap_obj(gem, gem->size, vma); 600 if (err < 0) 601 return err; 602 603 return tegra_gem_mmap(gem, vma); 604 } 605 606 static void *tegra_gem_prime_vmap(struct dma_buf *buf) 607 { 608 struct drm_gem_object *gem = buf->priv; 609 struct tegra_bo *bo = to_tegra_bo(gem); 610 611 return bo->vaddr; 612 } 613 614 static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr) 615 { 616 } 617 618 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { 619 .map_dma_buf = tegra_gem_prime_map_dma_buf, 620 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, 621 .release = tegra_gem_prime_release, 622 .map_atomic = tegra_gem_prime_kmap_atomic, 623 .unmap_atomic = tegra_gem_prime_kunmap_atomic, 624 .map = tegra_gem_prime_kmap, 625 .unmap = tegra_gem_prime_kunmap, 626 .mmap = tegra_gem_prime_mmap, 627 .vmap = tegra_gem_prime_vmap, 628 .vunmap = tegra_gem_prime_vunmap, 629 }; 630 631 struct dma_buf *tegra_gem_prime_export(struct drm_device *drm, 632 struct drm_gem_object *gem, 633 int flags) 634 { 635 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 636 637 exp_info.ops = &tegra_gem_prime_dmabuf_ops; 638 exp_info.size = gem->size; 639 exp_info.flags = flags; 640 exp_info.priv = gem; 641 642 return drm_gem_dmabuf_export(drm, &exp_info); 643 } 644 645 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, 646 struct dma_buf *buf) 647 { 648 struct tegra_bo *bo; 649 650 if (buf->ops == &tegra_gem_prime_dmabuf_ops) { 651 struct drm_gem_object *gem = buf->priv; 652 653 if (gem->dev == drm) { 654 drm_gem_object_get(gem); 655 return gem; 656 } 657 } 658 659 bo = tegra_bo_import(drm, buf); 660 if (IS_ERR(bo)) 661 return ERR_CAST(bo); 662 663 return &bo->gem; 664 } 665