1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * NVIDIA Tegra DRM GEM helper functions 4 * 5 * Copyright (C) 2012 Sascha Hauer, Pengutronix 6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved. 7 * 8 * Based on the GEM/CMA helpers 9 * 10 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 11 */ 12 13 #include <linux/dma-buf.h> 14 #include <linux/iommu.h> 15 16 #include <drm/drm_drv.h> 17 #include <drm/drm_prime.h> 18 #include <drm/tegra_drm.h> 19 20 #include "drm.h" 21 #include "gem.h" 22 23 static void tegra_bo_put(struct host1x_bo *bo) 24 { 25 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 26 27 drm_gem_object_put_unlocked(&obj->gem); 28 } 29 30 static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) 31 { 32 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 33 34 *sgt = obj->sgt; 35 36 return obj->paddr; 37 } 38 39 static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) 40 { 41 } 42 43 static void *tegra_bo_mmap(struct host1x_bo *bo) 44 { 45 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 46 47 if (obj->vaddr) 48 return obj->vaddr; 49 else if (obj->gem.import_attach) 50 return dma_buf_vmap(obj->gem.import_attach->dmabuf); 51 else 52 return vmap(obj->pages, obj->num_pages, VM_MAP, 53 pgprot_writecombine(PAGE_KERNEL)); 54 } 55 56 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) 57 { 58 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 59 60 if (obj->vaddr) 61 return; 62 else if (obj->gem.import_attach) 63 dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr); 64 else 65 vunmap(addr); 66 } 67 68 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) 69 { 70 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 71 72 drm_gem_object_get(&obj->gem); 73 74 return bo; 75 } 76 77 static const struct host1x_bo_ops tegra_bo_ops = { 78 .get = tegra_bo_get, 79 .put = tegra_bo_put, 80 .pin = tegra_bo_pin, 81 .unpin = tegra_bo_unpin, 82 .mmap = tegra_bo_mmap, 83 .munmap = tegra_bo_munmap, 84 }; 85 86 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) 87 { 88 int prot = IOMMU_READ | IOMMU_WRITE; 89 int err; 90 91 if (bo->mm) 92 return -EBUSY; 93 94 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL); 95 if (!bo->mm) 96 return -ENOMEM; 97 98 mutex_lock(&tegra->mm_lock); 99 100 err = drm_mm_insert_node_generic(&tegra->mm, 101 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0); 102 if (err < 0) { 103 dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n", 104 err); 105 goto unlock; 106 } 107 108 bo->paddr = bo->mm->start; 109 110 bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, 111 bo->sgt->nents, prot); 112 if (!bo->size) { 113 dev_err(tegra->drm->dev, "failed to map buffer\n"); 114 err = -ENOMEM; 115 goto remove; 116 } 117 118 mutex_unlock(&tegra->mm_lock); 119 120 return 0; 121 122 remove: 123 drm_mm_remove_node(bo->mm); 124 unlock: 125 mutex_unlock(&tegra->mm_lock); 126 kfree(bo->mm); 127 return err; 128 } 129 130 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) 131 { 132 if (!bo->mm) 133 return 0; 134 135 mutex_lock(&tegra->mm_lock); 136 iommu_unmap(tegra->domain, bo->paddr, bo->size); 137 drm_mm_remove_node(bo->mm); 138 mutex_unlock(&tegra->mm_lock); 139 140 kfree(bo->mm); 141 142 return 0; 143 } 144 145 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, 146 size_t size) 147 { 148 struct tegra_bo *bo; 149 int err; 150 151 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 152 if (!bo) 153 return ERR_PTR(-ENOMEM); 154 155 host1x_bo_init(&bo->base, &tegra_bo_ops); 156 size = round_up(size, PAGE_SIZE); 157 158 err = drm_gem_object_init(drm, &bo->gem, size); 159 if (err < 0) 160 goto free; 161 162 err = drm_gem_create_mmap_offset(&bo->gem); 163 if (err < 0) 164 goto release; 165 166 return bo; 167 168 release: 169 drm_gem_object_release(&bo->gem); 170 free: 171 kfree(bo); 172 return ERR_PTR(err); 173 } 174 175 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) 176 { 177 if (bo->pages) { 178 dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, 179 DMA_FROM_DEVICE); 180 drm_gem_put_pages(&bo->gem, bo->pages, true, true); 181 sg_free_table(bo->sgt); 182 kfree(bo->sgt); 183 } else if (bo->vaddr) { 184 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); 185 } 186 } 187 188 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) 189 { 190 int err; 191 192 bo->pages = drm_gem_get_pages(&bo->gem); 193 if (IS_ERR(bo->pages)) 194 return PTR_ERR(bo->pages); 195 196 bo->num_pages = bo->gem.size >> PAGE_SHIFT; 197 198 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); 199 if (IS_ERR(bo->sgt)) { 200 err = PTR_ERR(bo->sgt); 201 goto put_pages; 202 } 203 204 err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, 205 DMA_FROM_DEVICE); 206 if (err == 0) { 207 err = -EFAULT; 208 goto free_sgt; 209 } 210 211 return 0; 212 213 free_sgt: 214 sg_free_table(bo->sgt); 215 kfree(bo->sgt); 216 put_pages: 217 drm_gem_put_pages(&bo->gem, bo->pages, false, false); 218 return err; 219 } 220 221 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) 222 { 223 struct tegra_drm *tegra = drm->dev_private; 224 int err; 225 226 if (tegra->domain) { 227 err = tegra_bo_get_pages(drm, bo); 228 if (err < 0) 229 return err; 230 231 err = tegra_bo_iommu_map(tegra, bo); 232 if (err < 0) { 233 tegra_bo_free(drm, bo); 234 return err; 235 } 236 } else { 237 size_t size = bo->gem.size; 238 239 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr, 240 GFP_KERNEL | __GFP_NOWARN); 241 if (!bo->vaddr) { 242 dev_err(drm->dev, 243 "failed to allocate buffer of size %zu\n", 244 size); 245 return -ENOMEM; 246 } 247 } 248 249 return 0; 250 } 251 252 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size, 253 unsigned long flags) 254 { 255 struct tegra_bo *bo; 256 int err; 257 258 bo = tegra_bo_alloc_object(drm, size); 259 if (IS_ERR(bo)) 260 return bo; 261 262 err = tegra_bo_alloc(drm, bo); 263 if (err < 0) 264 goto release; 265 266 if (flags & DRM_TEGRA_GEM_CREATE_TILED) 267 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; 268 269 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) 270 bo->flags |= TEGRA_BO_BOTTOM_UP; 271 272 return bo; 273 274 release: 275 drm_gem_object_release(&bo->gem); 276 kfree(bo); 277 return ERR_PTR(err); 278 } 279 280 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, 281 struct drm_device *drm, 282 size_t size, 283 unsigned long flags, 284 u32 *handle) 285 { 286 struct tegra_bo *bo; 287 int err; 288 289 bo = tegra_bo_create(drm, size, flags); 290 if (IS_ERR(bo)) 291 return bo; 292 293 err = drm_gem_handle_create(file, &bo->gem, handle); 294 if (err) { 295 tegra_bo_free_object(&bo->gem); 296 return ERR_PTR(err); 297 } 298 299 drm_gem_object_put_unlocked(&bo->gem); 300 301 return bo; 302 } 303 304 static struct tegra_bo *tegra_bo_import(struct drm_device *drm, 305 struct dma_buf *buf) 306 { 307 struct tegra_drm *tegra = drm->dev_private; 308 struct dma_buf_attachment *attach; 309 struct tegra_bo *bo; 310 int err; 311 312 bo = tegra_bo_alloc_object(drm, buf->size); 313 if (IS_ERR(bo)) 314 return bo; 315 316 attach = dma_buf_attach(buf, drm->dev); 317 if (IS_ERR(attach)) { 318 err = PTR_ERR(attach); 319 goto free; 320 } 321 322 get_dma_buf(buf); 323 324 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); 325 if (IS_ERR(bo->sgt)) { 326 err = PTR_ERR(bo->sgt); 327 goto detach; 328 } 329 330 if (tegra->domain) { 331 err = tegra_bo_iommu_map(tegra, bo); 332 if (err < 0) 333 goto detach; 334 } else { 335 if (bo->sgt->nents > 1) { 336 err = -EINVAL; 337 goto detach; 338 } 339 340 bo->paddr = sg_dma_address(bo->sgt->sgl); 341 } 342 343 bo->gem.import_attach = attach; 344 345 return bo; 346 347 detach: 348 if (!IS_ERR_OR_NULL(bo->sgt)) 349 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); 350 351 dma_buf_detach(buf, attach); 352 dma_buf_put(buf); 353 free: 354 drm_gem_object_release(&bo->gem); 355 kfree(bo); 356 return ERR_PTR(err); 357 } 358 359 void tegra_bo_free_object(struct drm_gem_object *gem) 360 { 361 struct tegra_drm *tegra = gem->dev->dev_private; 362 struct tegra_bo *bo = to_tegra_bo(gem); 363 364 if (tegra->domain) 365 tegra_bo_iommu_unmap(tegra, bo); 366 367 if (gem->import_attach) { 368 dma_buf_unmap_attachment(gem->import_attach, bo->sgt, 369 DMA_TO_DEVICE); 370 drm_prime_gem_destroy(gem, NULL); 371 } else { 372 tegra_bo_free(gem->dev, bo); 373 } 374 375 drm_gem_object_release(gem); 376 kfree(bo); 377 } 378 379 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, 380 struct drm_mode_create_dumb *args) 381 { 382 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 383 struct tegra_drm *tegra = drm->dev_private; 384 struct tegra_bo *bo; 385 386 args->pitch = round_up(min_pitch, tegra->pitch_align); 387 args->size = args->pitch * args->height; 388 389 bo = tegra_bo_create_with_handle(file, drm, args->size, 0, 390 &args->handle); 391 if (IS_ERR(bo)) 392 return PTR_ERR(bo); 393 394 return 0; 395 } 396 397 static vm_fault_t tegra_bo_fault(struct vm_fault *vmf) 398 { 399 struct vm_area_struct *vma = vmf->vma; 400 struct drm_gem_object *gem = vma->vm_private_data; 401 struct tegra_bo *bo = to_tegra_bo(gem); 402 struct page *page; 403 pgoff_t offset; 404 405 if (!bo->pages) 406 return VM_FAULT_SIGBUS; 407 408 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 409 page = bo->pages[offset]; 410 411 return vmf_insert_page(vma, vmf->address, page); 412 } 413 414 const struct vm_operations_struct tegra_bo_vm_ops = { 415 .fault = tegra_bo_fault, 416 .open = drm_gem_vm_open, 417 .close = drm_gem_vm_close, 418 }; 419 420 int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma) 421 { 422 struct tegra_bo *bo = to_tegra_bo(gem); 423 424 if (!bo->pages) { 425 unsigned long vm_pgoff = vma->vm_pgoff; 426 int err; 427 428 /* 429 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), 430 * and set the vm_pgoff (used as a fake buffer offset by DRM) 431 * to 0 as we want to map the whole buffer. 432 */ 433 vma->vm_flags &= ~VM_PFNMAP; 434 vma->vm_pgoff = 0; 435 436 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr, 437 gem->size); 438 if (err < 0) { 439 drm_gem_vm_close(vma); 440 return err; 441 } 442 443 vma->vm_pgoff = vm_pgoff; 444 } else { 445 pgprot_t prot = vm_get_page_prot(vma->vm_flags); 446 447 vma->vm_flags |= VM_MIXEDMAP; 448 vma->vm_flags &= ~VM_PFNMAP; 449 450 vma->vm_page_prot = pgprot_writecombine(prot); 451 } 452 453 return 0; 454 } 455 456 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) 457 { 458 struct drm_gem_object *gem; 459 int err; 460 461 err = drm_gem_mmap(file, vma); 462 if (err < 0) 463 return err; 464 465 gem = vma->vm_private_data; 466 467 return __tegra_gem_mmap(gem, vma); 468 } 469 470 static struct sg_table * 471 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, 472 enum dma_data_direction dir) 473 { 474 struct drm_gem_object *gem = attach->dmabuf->priv; 475 struct tegra_bo *bo = to_tegra_bo(gem); 476 struct sg_table *sgt; 477 478 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); 479 if (!sgt) 480 return NULL; 481 482 if (bo->pages) { 483 struct scatterlist *sg; 484 unsigned int i; 485 486 if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) 487 goto free; 488 489 for_each_sg(sgt->sgl, sg, bo->num_pages, i) 490 sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); 491 492 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) 493 goto free; 494 } else { 495 if (sg_alloc_table(sgt, 1, GFP_KERNEL)) 496 goto free; 497 498 sg_dma_address(sgt->sgl) = bo->paddr; 499 sg_dma_len(sgt->sgl) = gem->size; 500 } 501 502 return sgt; 503 504 free: 505 sg_free_table(sgt); 506 kfree(sgt); 507 return NULL; 508 } 509 510 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, 511 struct sg_table *sgt, 512 enum dma_data_direction dir) 513 { 514 struct drm_gem_object *gem = attach->dmabuf->priv; 515 struct tegra_bo *bo = to_tegra_bo(gem); 516 517 if (bo->pages) 518 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); 519 520 sg_free_table(sgt); 521 kfree(sgt); 522 } 523 524 static void tegra_gem_prime_release(struct dma_buf *buf) 525 { 526 drm_gem_dmabuf_release(buf); 527 } 528 529 static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf, 530 enum dma_data_direction direction) 531 { 532 struct drm_gem_object *gem = buf->priv; 533 struct tegra_bo *bo = to_tegra_bo(gem); 534 struct drm_device *drm = gem->dev; 535 536 if (bo->pages) 537 dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents, 538 DMA_FROM_DEVICE); 539 540 return 0; 541 } 542 543 static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf, 544 enum dma_data_direction direction) 545 { 546 struct drm_gem_object *gem = buf->priv; 547 struct tegra_bo *bo = to_tegra_bo(gem); 548 struct drm_device *drm = gem->dev; 549 550 if (bo->pages) 551 dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents, 552 DMA_TO_DEVICE); 553 554 return 0; 555 } 556 557 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) 558 { 559 struct drm_gem_object *gem = buf->priv; 560 int err; 561 562 err = drm_gem_mmap_obj(gem, gem->size, vma); 563 if (err < 0) 564 return err; 565 566 return __tegra_gem_mmap(gem, vma); 567 } 568 569 static void *tegra_gem_prime_vmap(struct dma_buf *buf) 570 { 571 struct drm_gem_object *gem = buf->priv; 572 struct tegra_bo *bo = to_tegra_bo(gem); 573 574 return bo->vaddr; 575 } 576 577 static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr) 578 { 579 } 580 581 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { 582 .map_dma_buf = tegra_gem_prime_map_dma_buf, 583 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, 584 .release = tegra_gem_prime_release, 585 .begin_cpu_access = tegra_gem_prime_begin_cpu_access, 586 .end_cpu_access = tegra_gem_prime_end_cpu_access, 587 .mmap = tegra_gem_prime_mmap, 588 .vmap = tegra_gem_prime_vmap, 589 .vunmap = tegra_gem_prime_vunmap, 590 }; 591 592 struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem, 593 int flags) 594 { 595 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 596 597 exp_info.exp_name = KBUILD_MODNAME; 598 exp_info.owner = gem->dev->driver->fops->owner; 599 exp_info.ops = &tegra_gem_prime_dmabuf_ops; 600 exp_info.size = gem->size; 601 exp_info.flags = flags; 602 exp_info.priv = gem; 603 604 return drm_gem_dmabuf_export(gem->dev, &exp_info); 605 } 606 607 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, 608 struct dma_buf *buf) 609 { 610 struct tegra_bo *bo; 611 612 if (buf->ops == &tegra_gem_prime_dmabuf_ops) { 613 struct drm_gem_object *gem = buf->priv; 614 615 if (gem->dev == drm) { 616 drm_gem_object_get(gem); 617 return gem; 618 } 619 } 620 621 bo = tegra_bo_import(drm, buf); 622 if (IS_ERR(bo)) 623 return ERR_CAST(bo); 624 625 return &bo->gem; 626 } 627