1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6 #include <linux/dma-buf.h> 7 #include <linux/highmem.h> 8 #include <linux/module.h> 9 #include <linux/set_memory.h> 10 #include <linux/xarray.h> 11 12 #include <drm/drm_cache.h> 13 #include <drm/drm_debugfs.h> 14 #include <drm/drm_file.h> 15 #include <drm/drm_utils.h> 16 17 #include "ivpu_drv.h" 18 #include "ivpu_gem.h" 19 #include "ivpu_hw.h" 20 #include "ivpu_mmu.h" 21 #include "ivpu_mmu_context.h" 22 23 MODULE_IMPORT_NS(DMA_BUF); 24 25 static const struct drm_gem_object_funcs ivpu_gem_funcs; 26 27 static struct lock_class_key prime_bo_lock_class_key; 28 29 static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo) 30 { 31 /* Pages are managed by the underlying dma-buf */ 32 return 0; 33 } 34 35 static void prime_free_pages_locked(struct ivpu_bo *bo) 36 { 37 /* Pages are managed by the underlying dma-buf */ 38 } 39 40 static int prime_map_pages_locked(struct ivpu_bo *bo) 41 { 42 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 43 struct sg_table *sgt; 44 45 sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL); 46 if (IS_ERR(sgt)) { 47 ivpu_err(vdev, "Failed to map attachment: %ld\n", PTR_ERR(sgt)); 48 return PTR_ERR(sgt); 49 } 50 51 bo->sgt = sgt; 52 return 0; 53 } 54 55 static void prime_unmap_pages_locked(struct ivpu_bo *bo) 56 { 57 dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL); 58 bo->sgt = NULL; 59 } 60 61 static const struct ivpu_bo_ops prime_ops = { 62 .type = IVPU_BO_TYPE_PRIME, 63 .name = "prime", 64 .alloc_pages = prime_alloc_pages_locked, 65 .free_pages = prime_free_pages_locked, 66 .map_pages = prime_map_pages_locked, 67 .unmap_pages = prime_unmap_pages_locked, 68 }; 69 70 static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo) 71 { 72 int npages = bo->base.size >> PAGE_SHIFT; 73 struct page **pages; 74 75 pages = drm_gem_get_pages(&bo->base); 76 if (IS_ERR(pages)) 77 return PTR_ERR(pages); 78 79 if (bo->flags & DRM_IVPU_BO_WC) 80 set_pages_array_wc(pages, npages); 81 else if (bo->flags & DRM_IVPU_BO_UNCACHED) 82 set_pages_array_uc(pages, npages); 83 84 bo->pages = pages; 85 return 0; 86 } 87 88 static void shmem_free_pages_locked(struct ivpu_bo *bo) 89 { 90 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) 91 set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT); 92 93 drm_gem_put_pages(&bo->base, bo->pages, true, false); 94 bo->pages = NULL; 95 } 96 97 static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo) 98 { 99 int npages = bo->base.size >> PAGE_SHIFT; 100 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 101 struct sg_table *sgt; 102 int ret; 103 104 sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages); 105 if (IS_ERR(sgt)) { 106 ivpu_err(vdev, "Failed to allocate sgtable\n"); 107 return PTR_ERR(sgt); 108 } 109 110 ret = dma_map_sgtable(vdev->drm.dev, sgt, DMA_BIDIRECTIONAL, 0); 111 if (ret) { 112 ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret); 113 goto err_free_sgt; 114 } 115 116 bo->sgt = sgt; 117 return 0; 118 119 err_free_sgt: 120 kfree(sgt); 121 return ret; 122 } 123 124 static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo) 125 { 126 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 127 128 dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0); 129 sg_free_table(bo->sgt); 130 kfree(bo->sgt); 131 bo->sgt = NULL; 132 } 133 134 static const struct ivpu_bo_ops shmem_ops = { 135 .type = IVPU_BO_TYPE_SHMEM, 136 .name = "shmem", 137 .alloc_pages = shmem_alloc_pages_locked, 138 .free_pages = shmem_free_pages_locked, 139 .map_pages = ivpu_bo_map_pages_locked, 140 .unmap_pages = ivpu_bo_unmap_pages_locked, 141 }; 142 143 static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo) 144 { 145 unsigned int i, npages = bo->base.size >> PAGE_SHIFT; 146 struct page **pages; 147 int ret; 148 149 pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL); 150 if (!pages) 151 return -ENOMEM; 152 153 for (i = 0; i < npages; i++) { 154 pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 155 if (!pages[i]) { 156 ret = -ENOMEM; 157 goto err_free_pages; 158 } 159 cond_resched(); 160 } 161 162 bo->pages = pages; 163 return 0; 164 165 err_free_pages: 166 while (i--) 167 put_page(pages[i]); 168 kvfree(pages); 169 return ret; 170 } 171 172 static void internal_free_pages_locked(struct ivpu_bo *bo) 173 { 174 unsigned int i, npages = bo->base.size >> PAGE_SHIFT; 175 176 for (i = 0; i < npages; i++) 177 put_page(bo->pages[i]); 178 179 kvfree(bo->pages); 180 bo->pages = NULL; 181 } 182 183 static const struct ivpu_bo_ops internal_ops = { 184 .type = IVPU_BO_TYPE_INTERNAL, 185 .name = "internal", 186 .alloc_pages = internal_alloc_pages_locked, 187 .free_pages = internal_free_pages_locked, 188 .map_pages = ivpu_bo_map_pages_locked, 189 .unmap_pages = ivpu_bo_unmap_pages_locked, 190 }; 191 192 static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo) 193 { 194 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 195 int ret; 196 197 lockdep_assert_held(&bo->lock); 198 drm_WARN_ON(&vdev->drm, bo->sgt); 199 200 ret = bo->ops->alloc_pages(bo); 201 if (ret) { 202 ivpu_err(vdev, "Failed to allocate pages for BO: %d", ret); 203 return ret; 204 } 205 206 ret = bo->ops->map_pages(bo); 207 if (ret) { 208 ivpu_err(vdev, "Failed to map pages for BO: %d", ret); 209 goto err_free_pages; 210 } 211 return ret; 212 213 err_free_pages: 214 bo->ops->free_pages(bo); 215 return ret; 216 } 217 218 static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo) 219 { 220 mutex_lock(&bo->lock); 221 222 WARN_ON(!bo->sgt); 223 bo->ops->unmap_pages(bo); 224 WARN_ON(bo->sgt); 225 bo->ops->free_pages(bo); 226 WARN_ON(bo->pages); 227 228 mutex_unlock(&bo->lock); 229 } 230 231 /* 232 * ivpu_bo_pin() - pin the backing physical pages and map them to VPU. 233 * 234 * This function pins physical memory pages, then maps the physical pages 235 * to IOMMU address space and finally updates the VPU MMU page tables 236 * to allow the VPU to translate VPU address to IOMMU address. 237 */ 238 int __must_check ivpu_bo_pin(struct ivpu_bo *bo) 239 { 240 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 241 int ret = 0; 242 243 mutex_lock(&bo->lock); 244 245 if (!bo->vpu_addr) { 246 ivpu_err(vdev, "vpu_addr not set for BO ctx_id: %d handle: %d\n", 247 bo->ctx->id, bo->handle); 248 ret = -EINVAL; 249 goto unlock; 250 } 251 252 if (!bo->sgt) { 253 ret = ivpu_bo_alloc_and_map_pages_locked(bo); 254 if (ret) 255 goto unlock; 256 } 257 258 if (!bo->mmu_mapped) { 259 ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt, 260 ivpu_bo_is_snooped(bo)); 261 if (ret) { 262 ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret); 263 goto unlock; 264 } 265 bo->mmu_mapped = true; 266 } 267 268 unlock: 269 mutex_unlock(&bo->lock); 270 271 return ret; 272 } 273 274 static int 275 ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, 276 const struct ivpu_addr_range *range) 277 { 278 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 279 int ret; 280 281 if (!range) { 282 if (bo->flags & DRM_IVPU_BO_HIGH_MEM) 283 range = &vdev->hw->ranges.user_high; 284 else 285 range = &vdev->hw->ranges.user_low; 286 } 287 288 mutex_lock(&ctx->lock); 289 ret = ivpu_mmu_context_insert_node_locked(ctx, range, bo->base.size, &bo->mm_node); 290 if (!ret) { 291 bo->ctx = ctx; 292 bo->vpu_addr = bo->mm_node.start; 293 list_add_tail(&bo->ctx_node, &ctx->bo_list); 294 } 295 mutex_unlock(&ctx->lock); 296 297 return ret; 298 } 299 300 static void ivpu_bo_free_vpu_addr(struct ivpu_bo *bo) 301 { 302 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 303 struct ivpu_mmu_context *ctx = bo->ctx; 304 305 ivpu_dbg(vdev, BO, "remove from ctx: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n", 306 ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped); 307 308 mutex_lock(&bo->lock); 309 310 if (bo->mmu_mapped) { 311 drm_WARN_ON(&vdev->drm, !bo->sgt); 312 ivpu_mmu_context_unmap_sgt(vdev, ctx, bo->vpu_addr, bo->sgt); 313 bo->mmu_mapped = false; 314 } 315 316 mutex_lock(&ctx->lock); 317 list_del(&bo->ctx_node); 318 bo->vpu_addr = 0; 319 bo->ctx = NULL; 320 ivpu_mmu_context_remove_node_locked(ctx, &bo->mm_node); 321 mutex_unlock(&ctx->lock); 322 323 mutex_unlock(&bo->lock); 324 } 325 326 void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx) 327 { 328 struct ivpu_bo *bo, *tmp; 329 330 list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node) 331 ivpu_bo_free_vpu_addr(bo); 332 } 333 334 static struct ivpu_bo * 335 ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context, 336 u64 size, u32 flags, const struct ivpu_bo_ops *ops, 337 const struct ivpu_addr_range *range, u64 user_ptr) 338 { 339 struct ivpu_bo *bo; 340 int ret = 0; 341 342 if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size))) 343 return ERR_PTR(-EINVAL); 344 345 switch (flags & DRM_IVPU_BO_CACHE_MASK) { 346 case DRM_IVPU_BO_CACHED: 347 case DRM_IVPU_BO_UNCACHED: 348 case DRM_IVPU_BO_WC: 349 break; 350 default: 351 return ERR_PTR(-EINVAL); 352 } 353 354 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 355 if (!bo) 356 return ERR_PTR(-ENOMEM); 357 358 mutex_init(&bo->lock); 359 bo->base.funcs = &ivpu_gem_funcs; 360 bo->flags = flags; 361 bo->ops = ops; 362 bo->user_ptr = user_ptr; 363 364 if (ops->type == IVPU_BO_TYPE_SHMEM) 365 ret = drm_gem_object_init(&vdev->drm, &bo->base, size); 366 else 367 drm_gem_private_object_init(&vdev->drm, &bo->base, size); 368 369 if (ret) { 370 ivpu_err(vdev, "Failed to initialize drm object\n"); 371 goto err_free; 372 } 373 374 if (flags & DRM_IVPU_BO_MAPPABLE) { 375 ret = drm_gem_create_mmap_offset(&bo->base); 376 if (ret) { 377 ivpu_err(vdev, "Failed to allocate mmap offset\n"); 378 goto err_release; 379 } 380 } 381 382 if (mmu_context) { 383 ret = ivpu_bo_alloc_vpu_addr(bo, mmu_context, range); 384 if (ret) { 385 ivpu_err(vdev, "Failed to add BO to context: %d\n", ret); 386 goto err_release; 387 } 388 } 389 390 return bo; 391 392 err_release: 393 drm_gem_object_release(&bo->base); 394 err_free: 395 kfree(bo); 396 return ERR_PTR(ret); 397 } 398 399 static void ivpu_bo_free(struct drm_gem_object *obj) 400 { 401 struct ivpu_bo *bo = to_ivpu_bo(obj); 402 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 403 404 if (bo->ctx) 405 ivpu_dbg(vdev, BO, "free: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n", 406 bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped); 407 else 408 ivpu_dbg(vdev, BO, "free: ctx (released) allocated %d mmu_mapped %d\n", 409 (bool)bo->sgt, bo->mmu_mapped); 410 411 drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); 412 413 vunmap(bo->kvaddr); 414 415 if (bo->ctx) 416 ivpu_bo_free_vpu_addr(bo); 417 418 if (bo->sgt) 419 ivpu_bo_unmap_and_free_pages(bo); 420 421 if (bo->base.import_attach) 422 drm_prime_gem_destroy(&bo->base, bo->sgt); 423 424 drm_gem_object_release(&bo->base); 425 426 mutex_destroy(&bo->lock); 427 kfree(bo); 428 } 429 430 static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 431 { 432 struct ivpu_bo *bo = to_ivpu_bo(obj); 433 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); 434 435 ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s", 436 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, bo->ops->name); 437 438 if (obj->import_attach) { 439 /* Drop the reference drm_gem_mmap_obj() acquired.*/ 440 drm_gem_object_put(obj); 441 vma->vm_private_data = NULL; 442 return dma_buf_mmap(obj->dma_buf, vma, 0); 443 } 444 445 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND); 446 vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags)); 447 448 return 0; 449 } 450 451 static struct sg_table *ivpu_bo_get_sg_table(struct drm_gem_object *obj) 452 { 453 struct ivpu_bo *bo = to_ivpu_bo(obj); 454 loff_t npages = obj->size >> PAGE_SHIFT; 455 int ret = 0; 456 457 mutex_lock(&bo->lock); 458 459 if (!bo->sgt) 460 ret = ivpu_bo_alloc_and_map_pages_locked(bo); 461 462 mutex_unlock(&bo->lock); 463 464 if (ret) 465 return ERR_PTR(ret); 466 467 return drm_prime_pages_to_sg(obj->dev, bo->pages, npages); 468 } 469 470 static vm_fault_t ivpu_vm_fault(struct vm_fault *vmf) 471 { 472 struct vm_area_struct *vma = vmf->vma; 473 struct drm_gem_object *obj = vma->vm_private_data; 474 struct ivpu_bo *bo = to_ivpu_bo(obj); 475 loff_t npages = obj->size >> PAGE_SHIFT; 476 pgoff_t page_offset; 477 struct page *page; 478 vm_fault_t ret; 479 int err; 480 481 mutex_lock(&bo->lock); 482 483 if (!bo->sgt) { 484 err = ivpu_bo_alloc_and_map_pages_locked(bo); 485 if (err) { 486 ret = vmf_error(err); 487 goto unlock; 488 } 489 } 490 491 /* We don't use vmf->pgoff since that has the fake offset */ 492 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 493 if (page_offset >= npages) { 494 ret = VM_FAULT_SIGBUS; 495 } else { 496 page = bo->pages[page_offset]; 497 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); 498 } 499 500 unlock: 501 mutex_unlock(&bo->lock); 502 503 return ret; 504 } 505 506 static const struct vm_operations_struct ivpu_vm_ops = { 507 .fault = ivpu_vm_fault, 508 .open = drm_gem_vm_open, 509 .close = drm_gem_vm_close, 510 }; 511 512 static const struct drm_gem_object_funcs ivpu_gem_funcs = { 513 .free = ivpu_bo_free, 514 .mmap = ivpu_bo_mmap, 515 .vm_ops = &ivpu_vm_ops, 516 .get_sg_table = ivpu_bo_get_sg_table, 517 }; 518 519 int 520 ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 521 { 522 struct ivpu_file_priv *file_priv = file->driver_priv; 523 struct ivpu_device *vdev = file_priv->vdev; 524 struct drm_ivpu_bo_create *args = data; 525 u64 size = PAGE_ALIGN(args->size); 526 struct ivpu_bo *bo; 527 int ret; 528 529 if (args->flags & ~DRM_IVPU_BO_FLAGS) 530 return -EINVAL; 531 532 if (size == 0) 533 return -EINVAL; 534 535 bo = ivpu_bo_alloc(vdev, &file_priv->ctx, size, args->flags, &shmem_ops, NULL, 0); 536 if (IS_ERR(bo)) { 537 ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)", 538 bo, file_priv->ctx.id, args->size, args->flags); 539 return PTR_ERR(bo); 540 } 541 542 ret = drm_gem_handle_create(file, &bo->base, &bo->handle); 543 if (!ret) { 544 args->vpu_addr = bo->vpu_addr; 545 args->handle = bo->handle; 546 } 547 548 drm_gem_object_put(&bo->base); 549 550 ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n", 551 file_priv->ctx.id, bo->vpu_addr, bo->base.size, bo->flags); 552 553 return ret; 554 } 555 556 struct ivpu_bo * 557 ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags) 558 { 559 const struct ivpu_addr_range *range; 560 struct ivpu_addr_range fixed_range; 561 struct ivpu_bo *bo; 562 pgprot_t prot; 563 int ret; 564 565 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr)); 566 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size)); 567 568 if (vpu_addr) { 569 fixed_range.start = vpu_addr; 570 fixed_range.end = vpu_addr + size; 571 range = &fixed_range; 572 } else { 573 range = &vdev->hw->ranges.global_low; 574 } 575 576 bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0); 577 if (IS_ERR(bo)) { 578 ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)", 579 bo, vpu_addr, size, flags); 580 return NULL; 581 } 582 583 ret = ivpu_bo_pin(bo); 584 if (ret) 585 goto err_put; 586 587 if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) 588 drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT); 589 590 prot = ivpu_bo_pgprot(bo, PAGE_KERNEL); 591 bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot); 592 if (!bo->kvaddr) { 593 ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n"); 594 goto err_put; 595 } 596 597 ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n", 598 bo->vpu_addr, bo->base.size, flags); 599 600 return bo; 601 602 err_put: 603 drm_gem_object_put(&bo->base); 604 return NULL; 605 } 606 607 void ivpu_bo_free_internal(struct ivpu_bo *bo) 608 { 609 drm_gem_object_put(&bo->base); 610 } 611 612 struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) 613 { 614 struct ivpu_device *vdev = to_ivpu_device(dev); 615 struct dma_buf_attachment *attach; 616 struct ivpu_bo *bo; 617 618 attach = dma_buf_attach(buf, dev->dev); 619 if (IS_ERR(attach)) 620 return ERR_CAST(attach); 621 622 get_dma_buf(buf); 623 624 bo = ivpu_bo_alloc(vdev, NULL, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops, NULL, 0); 625 if (IS_ERR(bo)) { 626 ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size); 627 goto err_detach; 628 } 629 630 lockdep_set_class(&bo->lock, &prime_bo_lock_class_key); 631 632 bo->base.import_attach = attach; 633 634 return &bo->base; 635 636 err_detach: 637 dma_buf_detach(buf, attach); 638 dma_buf_put(buf); 639 return ERR_CAST(bo); 640 } 641 642 int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 643 { 644 struct ivpu_file_priv *file_priv = file->driver_priv; 645 struct ivpu_device *vdev = to_ivpu_device(dev); 646 struct drm_ivpu_bo_info *args = data; 647 struct drm_gem_object *obj; 648 struct ivpu_bo *bo; 649 int ret = 0; 650 651 obj = drm_gem_object_lookup(file, args->handle); 652 if (!obj) 653 return -ENOENT; 654 655 bo = to_ivpu_bo(obj); 656 657 mutex_lock(&bo->lock); 658 659 if (!bo->ctx) { 660 ret = ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, NULL); 661 if (ret) { 662 ivpu_err(vdev, "Failed to allocate vpu_addr: %d\n", ret); 663 goto unlock; 664 } 665 } 666 667 args->flags = bo->flags; 668 args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node); 669 args->vpu_addr = bo->vpu_addr; 670 args->size = obj->size; 671 unlock: 672 mutex_unlock(&bo->lock); 673 drm_gem_object_put(obj); 674 return ret; 675 } 676 677 int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 678 { 679 struct drm_ivpu_bo_wait *args = data; 680 struct drm_gem_object *obj; 681 unsigned long timeout; 682 long ret; 683 684 timeout = drm_timeout_abs_to_jiffies(args->timeout_ns); 685 686 obj = drm_gem_object_lookup(file, args->handle); 687 if (!obj) 688 return -EINVAL; 689 690 ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, true, timeout); 691 if (ret == 0) { 692 ret = -ETIMEDOUT; 693 } else if (ret > 0) { 694 ret = 0; 695 args->job_status = to_ivpu_bo(obj)->job_status; 696 } 697 698 drm_gem_object_put(obj); 699 700 return ret; 701 } 702 703 static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p) 704 { 705 unsigned long dma_refcount = 0; 706 707 if (bo->base.dma_buf && bo->base.dma_buf->file) 708 dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count); 709 710 drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n", 711 bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, 712 kref_read(&bo->base.refcount), dma_refcount, bo->ops->name); 713 } 714 715 void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p) 716 { 717 struct ivpu_device *vdev = to_ivpu_device(dev); 718 struct ivpu_file_priv *file_priv; 719 unsigned long ctx_id; 720 struct ivpu_bo *bo; 721 722 drm_printf(p, "%5s %6s %16s %10s %10s %12s %14s\n", 723 "ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type"); 724 725 mutex_lock(&vdev->gctx.lock); 726 list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node) 727 ivpu_bo_print_info(bo, p); 728 mutex_unlock(&vdev->gctx.lock); 729 730 xa_for_each(&vdev->context_xa, ctx_id, file_priv) { 731 file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id); 732 if (!file_priv) 733 continue; 734 735 mutex_lock(&file_priv->ctx.lock); 736 list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node) 737 ivpu_bo_print_info(bo, p); 738 mutex_unlock(&file_priv->ctx.lock); 739 740 ivpu_file_priv_put(&file_priv); 741 } 742 } 743 744 void ivpu_bo_list_print(struct drm_device *dev) 745 { 746 struct drm_printer p = drm_info_printer(dev->dev); 747 748 ivpu_bo_list(dev, &p); 749 } 750