1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/ktime.h> 29 #include <linux/module.h> 30 #include <linux/pagemap.h> 31 #include <linux/pci.h> 32 #include <linux/dma-buf.h> 33 34 #include <drm/amdgpu_drm.h> 35 #include <drm/drm_drv.h> 36 #include <drm/drm_exec.h> 37 #include <drm/drm_gem_ttm_helper.h> 38 #include <drm/ttm/ttm_tt.h> 39 #include <drm/drm_syncobj.h> 40 41 #include "amdgpu.h" 42 #include "amdgpu_display.h" 43 #include "amdgpu_dma_buf.h" 44 #include "amdgpu_hmm.h" 45 #include "amdgpu_xgmi.h" 46 #include "amdgpu_vm.h" 47 48 static int 49 amdgpu_gem_add_input_fence(struct drm_file *filp, 50 uint64_t syncobj_handles_array, 51 uint32_t num_syncobj_handles) 52 { 53 struct dma_fence *fence; 54 uint32_t *syncobj_handles; 55 int ret, i; 56 57 if (!num_syncobj_handles) 58 return 0; 59 60 syncobj_handles = memdup_user(u64_to_user_ptr(syncobj_handles_array), 61 size_mul(sizeof(uint32_t), num_syncobj_handles)); 62 if (IS_ERR(syncobj_handles)) 63 return PTR_ERR(syncobj_handles); 64 65 for (i = 0; i < num_syncobj_handles; i++) { 66 67 if (!syncobj_handles[i]) { 68 ret = -EINVAL; 69 goto free_memdup; 70 } 71 72 ret = drm_syncobj_find_fence(filp, syncobj_handles[i], 0, 0, &fence); 73 if (ret) 74 goto free_memdup; 75 76 dma_fence_wait(fence, false); 77 78 /* TODO: optimize async handling */ 79 dma_fence_put(fence); 80 } 81 82 free_memdup: 83 kfree(syncobj_handles); 84 return ret; 85 } 86 87 static int 88 amdgpu_gem_update_timeline_node(struct drm_file *filp, 89 uint32_t syncobj_handle, 90 uint64_t point, 91 struct drm_syncobj **syncobj, 92 struct dma_fence_chain **chain) 93 { 94 if (!syncobj_handle) 95 return 0; 96 97 /* Find the sync object */ 98 *syncobj = drm_syncobj_find(filp, syncobj_handle); 99 if (!*syncobj) 100 return -ENOENT; 101 102 if (!point) 103 return 0; 104 105 /* Allocate the chain node */ 106 *chain = dma_fence_chain_alloc(); 107 if (!*chain) { 108 drm_syncobj_put(*syncobj); 109 return -ENOMEM; 110 } 111 112 return 0; 113 } 114 115 static void 116 amdgpu_gem_update_bo_mapping(struct drm_file *filp, 117 struct amdgpu_bo_va *bo_va, 118 uint32_t operation, 119 uint64_t point, 120 struct dma_fence *fence, 121 struct drm_syncobj *syncobj, 122 struct dma_fence_chain *chain) 123 { 124 struct amdgpu_bo *bo = bo_va ? bo_va->base.bo : NULL; 125 struct amdgpu_fpriv *fpriv = filp->driver_priv; 126 struct amdgpu_vm *vm = &fpriv->vm; 127 struct dma_fence *last_update; 128 129 if (!syncobj) 130 return; 131 132 /* Find the last update fence */ 133 switch (operation) { 134 case AMDGPU_VA_OP_MAP: 135 case AMDGPU_VA_OP_REPLACE: 136 if (bo && (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)) 137 last_update = vm->last_update; 138 else 139 last_update = bo_va->last_pt_update; 140 break; 141 case AMDGPU_VA_OP_UNMAP: 142 case AMDGPU_VA_OP_CLEAR: 143 last_update = fence; 144 break; 145 default: 146 return; 147 } 148 149 /* Add fence to timeline */ 150 if (!point) 151 drm_syncobj_replace_fence(syncobj, last_update); 152 else 153 drm_syncobj_add_point(syncobj, chain, last_update, point); 154 } 155 156 static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) 157 { 158 struct ttm_buffer_object *bo = vmf->vma->vm_private_data; 159 struct drm_device *ddev = bo->base.dev; 160 vm_fault_t ret; 161 int idx; 162 163 ret = ttm_bo_vm_reserve(bo, vmf); 164 if (ret) 165 return ret; 166 167 if (drm_dev_enter(ddev, &idx)) { 168 ret = amdgpu_bo_fault_reserve_notify(bo); 169 if (ret) { 170 drm_dev_exit(idx); 171 goto unlock; 172 } 173 174 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, 175 TTM_BO_VM_NUM_PREFAULT); 176 177 drm_dev_exit(idx); 178 } else { 179 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); 180 } 181 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 182 return ret; 183 184 unlock: 185 dma_resv_unlock(bo->base.resv); 186 return ret; 187 } 188 189 static const struct vm_operations_struct amdgpu_gem_vm_ops = { 190 .fault = amdgpu_gem_fault, 191 .open = ttm_bo_vm_open, 192 .close = ttm_bo_vm_close, 193 .access = ttm_bo_vm_access 194 }; 195 196 static void amdgpu_gem_object_free(struct drm_gem_object *gobj) 197 { 198 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj); 199 200 amdgpu_hmm_unregister(aobj); 201 ttm_bo_put(&aobj->tbo); 202 } 203 204 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 205 int alignment, u32 initial_domain, 206 u64 flags, enum ttm_bo_type type, 207 struct dma_resv *resv, 208 struct drm_gem_object **obj, int8_t xcp_id_plus1) 209 { 210 struct amdgpu_bo *bo; 211 struct amdgpu_bo_user *ubo; 212 struct amdgpu_bo_param bp; 213 int r; 214 215 memset(&bp, 0, sizeof(bp)); 216 *obj = NULL; 217 flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; 218 219 bp.size = size; 220 bp.byte_align = alignment; 221 bp.type = type; 222 bp.resv = resv; 223 bp.preferred_domain = initial_domain; 224 bp.flags = flags; 225 bp.domain = initial_domain; 226 bp.bo_ptr_size = sizeof(struct amdgpu_bo); 227 bp.xcp_id_plus1 = xcp_id_plus1; 228 229 r = amdgpu_bo_create_user(adev, &bp, &ubo); 230 if (r) 231 return r; 232 233 bo = &ubo->bo; 234 *obj = &bo->tbo.base; 235 236 return 0; 237 } 238 239 void amdgpu_gem_force_release(struct amdgpu_device *adev) 240 { 241 struct drm_device *ddev = adev_to_drm(adev); 242 struct drm_file *file; 243 244 mutex_lock(&ddev->filelist_mutex); 245 246 list_for_each_entry(file, &ddev->filelist, lhead) { 247 struct drm_gem_object *gobj; 248 int handle; 249 250 WARN_ONCE(1, "Still active user space clients!\n"); 251 spin_lock(&file->table_lock); 252 idr_for_each_entry(&file->object_idr, gobj, handle) { 253 WARN_ONCE(1, "And also active allocations!\n"); 254 drm_gem_object_put(gobj); 255 } 256 idr_destroy(&file->object_idr); 257 spin_unlock(&file->table_lock); 258 } 259 260 mutex_unlock(&ddev->filelist_mutex); 261 } 262 263 /* 264 * Call from drm_gem_handle_create which appear in both new and open ioctl 265 * case. 266 */ 267 static int amdgpu_gem_object_open(struct drm_gem_object *obj, 268 struct drm_file *file_priv) 269 { 270 struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); 271 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 272 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 273 struct amdgpu_vm *vm = &fpriv->vm; 274 struct amdgpu_bo_va *bo_va; 275 struct mm_struct *mm; 276 int r; 277 278 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm); 279 if (mm && mm != current->mm) 280 return -EPERM; 281 282 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID && 283 !amdgpu_vm_is_bo_always_valid(vm, abo)) 284 return -EPERM; 285 286 r = amdgpu_bo_reserve(abo, false); 287 if (r) 288 return r; 289 290 amdgpu_vm_bo_update_shared(abo); 291 bo_va = amdgpu_vm_bo_find(vm, abo); 292 if (!bo_va) 293 bo_va = amdgpu_vm_bo_add(adev, vm, abo); 294 else 295 ++bo_va->ref_count; 296 297 /* attach gfx eviction fence */ 298 r = amdgpu_eviction_fence_attach(&fpriv->evf_mgr, abo); 299 if (r) { 300 DRM_DEBUG_DRIVER("Failed to attach eviction fence to BO\n"); 301 amdgpu_bo_unreserve(abo); 302 return r; 303 } 304 305 amdgpu_bo_unreserve(abo); 306 307 /* Validate and add eviction fence to DMABuf imports with dynamic 308 * attachment in compute VMs. Re-validation will be done by 309 * amdgpu_vm_validate. Fences are on the reservation shared with the 310 * export, which is currently required to be validated and fenced 311 * already by amdgpu_amdkfd_gpuvm_restore_process_bos. 312 * 313 * Nested locking below for the case that a GEM object is opened in 314 * kfd_mem_export_dmabuf. Since the lock below is only taken for imports, 315 * but not for export, this is a different lock class that cannot lead to 316 * circular lock dependencies. 317 */ 318 if (!vm->is_compute_context || !vm->process_info) 319 return 0; 320 if (!drm_gem_is_imported(obj) || 321 !dma_buf_is_dynamic(obj->import_attach->dmabuf)) 322 return 0; 323 mutex_lock_nested(&vm->process_info->lock, 1); 324 if (!WARN_ON(!vm->process_info->eviction_fence)) { 325 r = amdgpu_amdkfd_bo_validate_and_fence(abo, AMDGPU_GEM_DOMAIN_GTT, 326 &vm->process_info->eviction_fence->base); 327 if (r) { 328 struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm); 329 330 dev_warn(adev->dev, "validate_and_fence failed: %d\n", r); 331 if (ti) { 332 dev_warn(adev->dev, "pid %d\n", ti->task.pid); 333 amdgpu_vm_put_task_info(ti); 334 } 335 } 336 } 337 mutex_unlock(&vm->process_info->lock); 338 339 return r; 340 } 341 342 static void amdgpu_gem_object_close(struct drm_gem_object *obj, 343 struct drm_file *file_priv) 344 { 345 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 346 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 347 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 348 struct amdgpu_vm *vm = &fpriv->vm; 349 350 struct dma_fence *fence = NULL; 351 struct amdgpu_bo_va *bo_va; 352 struct drm_exec exec; 353 long r; 354 355 drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); 356 drm_exec_until_all_locked(&exec) { 357 r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1); 358 drm_exec_retry_on_contention(&exec); 359 if (unlikely(r)) 360 goto out_unlock; 361 362 r = amdgpu_vm_lock_pd(vm, &exec, 0); 363 drm_exec_retry_on_contention(&exec); 364 if (unlikely(r)) 365 goto out_unlock; 366 } 367 368 if (!amdgpu_vm_is_bo_always_valid(vm, bo)) 369 amdgpu_eviction_fence_detach(&fpriv->evf_mgr, bo); 370 371 bo_va = amdgpu_vm_bo_find(vm, bo); 372 if (!bo_va || --bo_va->ref_count) 373 goto out_unlock; 374 375 amdgpu_vm_bo_del(adev, bo_va); 376 amdgpu_vm_bo_update_shared(bo); 377 if (!amdgpu_vm_ready(vm)) 378 goto out_unlock; 379 380 r = amdgpu_vm_clear_freed(adev, vm, &fence); 381 if (unlikely(r < 0)) 382 dev_err(adev->dev, "failed to clear page " 383 "tables on GEM object close (%ld)\n", r); 384 if (r || !fence) 385 goto out_unlock; 386 387 amdgpu_bo_fence(bo, fence, true); 388 dma_fence_put(fence); 389 390 out_unlock: 391 if (r) 392 dev_err(adev->dev, "leaking bo va (%ld)\n", r); 393 drm_exec_fini(&exec); 394 } 395 396 static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 397 { 398 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 399 400 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) 401 return -EPERM; 402 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 403 return -EPERM; 404 405 /* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings 406 * for debugger access to invisible VRAM. Should have used MAP_SHARED 407 * instead. Clearing VM_MAYWRITE prevents the mapping from ever 408 * becoming writable and makes is_cow_mapping(vm_flags) false. 409 */ 410 if (is_cow_mapping(vma->vm_flags) && 411 !(vma->vm_flags & VM_ACCESS_FLAGS)) 412 vm_flags_clear(vma, VM_MAYWRITE); 413 414 return drm_gem_ttm_mmap(obj, vma); 415 } 416 417 const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { 418 .free = amdgpu_gem_object_free, 419 .open = amdgpu_gem_object_open, 420 .close = amdgpu_gem_object_close, 421 .export = amdgpu_gem_prime_export, 422 .vmap = drm_gem_ttm_vmap, 423 .vunmap = drm_gem_ttm_vunmap, 424 .mmap = amdgpu_gem_object_mmap, 425 .vm_ops = &amdgpu_gem_vm_ops, 426 }; 427 428 /* 429 * GEM ioctls. 430 */ 431 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, 432 struct drm_file *filp) 433 { 434 struct amdgpu_device *adev = drm_to_adev(dev); 435 struct amdgpu_fpriv *fpriv = filp->driver_priv; 436 struct amdgpu_vm *vm = &fpriv->vm; 437 union drm_amdgpu_gem_create *args = data; 438 uint64_t flags = args->in.domain_flags; 439 uint64_t size = args->in.bo_size; 440 struct dma_resv *resv = NULL; 441 struct drm_gem_object *gobj; 442 uint32_t handle, initial_domain; 443 int r; 444 445 /* reject invalid gem flags */ 446 if (flags & ~AMDGPU_GEM_CREATE_SETTABLE_MASK) 447 return -EINVAL; 448 449 /* reject invalid gem domains */ 450 if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK) 451 return -EINVAL; 452 453 if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) { 454 DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n"); 455 return -EINVAL; 456 } 457 458 /* always clear VRAM */ 459 flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED; 460 461 /* create a gem object to contain this object in */ 462 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | 463 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { 464 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { 465 /* if gds bo is created from user space, it must be 466 * passed to bo list 467 */ 468 DRM_ERROR("GDS bo cannot be per-vm-bo\n"); 469 return -EINVAL; 470 } 471 flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; 472 } 473 474 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { 475 r = amdgpu_bo_reserve(vm->root.bo, false); 476 if (r) 477 return r; 478 479 resv = vm->root.bo->tbo.base.resv; 480 } 481 482 initial_domain = (u32)(0xffffffff & args->in.domains); 483 retry: 484 r = amdgpu_gem_object_create(adev, size, args->in.alignment, 485 initial_domain, 486 flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1); 487 if (r && r != -ERESTARTSYS) { 488 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { 489 flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 490 goto retry; 491 } 492 493 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { 494 initial_domain |= AMDGPU_GEM_DOMAIN_GTT; 495 goto retry; 496 } 497 DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n", 498 size, initial_domain, args->in.alignment, r); 499 } 500 501 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { 502 if (!r) { 503 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); 504 505 abo->parent = amdgpu_bo_ref(vm->root.bo); 506 } 507 amdgpu_bo_unreserve(vm->root.bo); 508 } 509 if (r) 510 return r; 511 512 r = drm_gem_handle_create(filp, gobj, &handle); 513 /* drop reference from allocate - handle holds it now */ 514 drm_gem_object_put(gobj); 515 if (r) 516 return r; 517 518 memset(args, 0, sizeof(*args)); 519 args->out.handle = handle; 520 return 0; 521 } 522 523 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, 524 struct drm_file *filp) 525 { 526 struct ttm_operation_ctx ctx = { true, false }; 527 struct amdgpu_device *adev = drm_to_adev(dev); 528 struct drm_amdgpu_gem_userptr *args = data; 529 struct amdgpu_fpriv *fpriv = filp->driver_priv; 530 struct drm_gem_object *gobj; 531 struct hmm_range *range; 532 struct amdgpu_bo *bo; 533 uint32_t handle; 534 int r; 535 536 args->addr = untagged_addr(args->addr); 537 538 if (offset_in_page(args->addr | args->size)) 539 return -EINVAL; 540 541 /* reject unknown flag values */ 542 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY | 543 AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE | 544 AMDGPU_GEM_USERPTR_REGISTER)) 545 return -EINVAL; 546 547 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && 548 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { 549 550 /* if we want to write to it we must install a MMU notifier */ 551 return -EACCES; 552 } 553 554 /* create a gem object to contain this object in */ 555 r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU, 556 0, ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1); 557 if (r) 558 return r; 559 560 bo = gem_to_amdgpu_bo(gobj); 561 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; 562 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 563 r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags); 564 if (r) 565 goto release_object; 566 567 r = amdgpu_hmm_register(bo, args->addr); 568 if (r) 569 goto release_object; 570 571 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { 572 r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, 573 &range); 574 if (r) 575 goto release_object; 576 577 r = amdgpu_bo_reserve(bo, true); 578 if (r) 579 goto user_pages_done; 580 581 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 582 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 583 amdgpu_bo_unreserve(bo); 584 if (r) 585 goto user_pages_done; 586 } 587 588 r = drm_gem_handle_create(filp, gobj, &handle); 589 if (r) 590 goto user_pages_done; 591 592 args->handle = handle; 593 594 user_pages_done: 595 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) 596 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); 597 598 release_object: 599 drm_gem_object_put(gobj); 600 601 return r; 602 } 603 604 int amdgpu_mode_dumb_mmap(struct drm_file *filp, 605 struct drm_device *dev, 606 uint32_t handle, uint64_t *offset_p) 607 { 608 struct drm_gem_object *gobj; 609 struct amdgpu_bo *robj; 610 611 gobj = drm_gem_object_lookup(filp, handle); 612 if (!gobj) 613 return -ENOENT; 614 615 robj = gem_to_amdgpu_bo(gobj); 616 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || 617 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { 618 drm_gem_object_put(gobj); 619 return -EPERM; 620 } 621 *offset_p = amdgpu_bo_mmap_offset(robj); 622 drm_gem_object_put(gobj); 623 return 0; 624 } 625 626 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, 627 struct drm_file *filp) 628 { 629 union drm_amdgpu_gem_mmap *args = data; 630 uint32_t handle = args->in.handle; 631 632 memset(args, 0, sizeof(*args)); 633 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr); 634 } 635 636 /** 637 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value 638 * 639 * @timeout_ns: timeout in ns 640 * 641 * Calculate the timeout in jiffies from an absolute timeout in ns. 642 */ 643 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns) 644 { 645 unsigned long timeout_jiffies; 646 ktime_t timeout; 647 648 /* clamp timeout if it's to large */ 649 if (((int64_t)timeout_ns) < 0) 650 return MAX_SCHEDULE_TIMEOUT; 651 652 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get()); 653 if (ktime_to_ns(timeout) < 0) 654 return 0; 655 656 timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout)); 657 /* clamp timeout to avoid unsigned-> signed overflow */ 658 if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT) 659 return MAX_SCHEDULE_TIMEOUT - 1; 660 661 return timeout_jiffies; 662 } 663 664 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 665 struct drm_file *filp) 666 { 667 union drm_amdgpu_gem_wait_idle *args = data; 668 struct drm_gem_object *gobj; 669 struct amdgpu_bo *robj; 670 uint32_t handle = args->in.handle; 671 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout); 672 int r = 0; 673 long ret; 674 675 gobj = drm_gem_object_lookup(filp, handle); 676 if (!gobj) 677 return -ENOENT; 678 679 robj = gem_to_amdgpu_bo(gobj); 680 ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, 681 true, timeout); 682 683 /* ret == 0 means not signaled, 684 * ret > 0 means signaled 685 * ret < 0 means interrupted before timeout 686 */ 687 if (ret >= 0) { 688 memset(args, 0, sizeof(*args)); 689 args->out.status = (ret == 0); 690 } else 691 r = ret; 692 693 drm_gem_object_put(gobj); 694 return r; 695 } 696 697 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, 698 struct drm_file *filp) 699 { 700 struct drm_amdgpu_gem_metadata *args = data; 701 struct drm_gem_object *gobj; 702 struct amdgpu_bo *robj; 703 int r = -1; 704 705 DRM_DEBUG("%d\n", args->handle); 706 gobj = drm_gem_object_lookup(filp, args->handle); 707 if (gobj == NULL) 708 return -ENOENT; 709 robj = gem_to_amdgpu_bo(gobj); 710 711 r = amdgpu_bo_reserve(robj, false); 712 if (unlikely(r != 0)) 713 goto out; 714 715 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) { 716 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info); 717 r = amdgpu_bo_get_metadata(robj, args->data.data, 718 sizeof(args->data.data), 719 &args->data.data_size_bytes, 720 &args->data.flags); 721 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { 722 if (args->data.data_size_bytes > sizeof(args->data.data)) { 723 r = -EINVAL; 724 goto unreserve; 725 } 726 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); 727 if (!r) 728 r = amdgpu_bo_set_metadata(robj, args->data.data, 729 args->data.data_size_bytes, 730 args->data.flags); 731 } 732 733 unreserve: 734 amdgpu_bo_unreserve(robj); 735 out: 736 drm_gem_object_put(gobj); 737 return r; 738 } 739 740 /** 741 * amdgpu_gem_va_update_vm -update the bo_va in its VM 742 * 743 * @adev: amdgpu_device pointer 744 * @vm: vm to update 745 * @bo_va: bo_va to update 746 * @operation: map, unmap or clear 747 * 748 * Update the bo_va directly after setting its address. Errors are not 749 * vital here, so they are not reported back to userspace. 750 * 751 * Returns resulting fence if freed BO(s) got cleared from the PT. 752 * otherwise stub fence in case of error. 753 */ 754 static struct dma_fence * 755 amdgpu_gem_va_update_vm(struct amdgpu_device *adev, 756 struct amdgpu_vm *vm, 757 struct amdgpu_bo_va *bo_va, 758 uint32_t operation) 759 { 760 struct dma_fence *fence = dma_fence_get_stub(); 761 int r; 762 763 if (!amdgpu_vm_ready(vm)) 764 return fence; 765 766 r = amdgpu_vm_clear_freed(adev, vm, &fence); 767 if (r) 768 goto error; 769 770 if (operation == AMDGPU_VA_OP_MAP || 771 operation == AMDGPU_VA_OP_REPLACE) { 772 r = amdgpu_vm_bo_update(adev, bo_va, false); 773 if (r) 774 goto error; 775 } 776 777 r = amdgpu_vm_update_pdes(adev, vm, false); 778 779 error: 780 if (r && r != -ERESTARTSYS) 781 DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 782 783 return fence; 784 } 785 786 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, 787 struct drm_file *filp) 788 { 789 const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE | 790 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | 791 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK | 792 AMDGPU_VM_PAGE_NOALLOC; 793 const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE | 794 AMDGPU_VM_PAGE_PRT; 795 796 struct drm_amdgpu_gem_va *args = data; 797 struct drm_gem_object *gobj; 798 struct amdgpu_device *adev = drm_to_adev(dev); 799 struct amdgpu_fpriv *fpriv = filp->driver_priv; 800 struct amdgpu_bo *abo; 801 struct amdgpu_bo_va *bo_va; 802 struct drm_syncobj *timeline_syncobj = NULL; 803 struct dma_fence_chain *timeline_chain = NULL; 804 struct dma_fence *fence; 805 struct drm_exec exec; 806 uint64_t vm_size; 807 int r = 0; 808 809 if (args->va_address < AMDGPU_VA_RESERVED_BOTTOM) { 810 dev_dbg(dev->dev, 811 "va_address 0x%llx is in reserved area 0x%llx\n", 812 args->va_address, AMDGPU_VA_RESERVED_BOTTOM); 813 return -EINVAL; 814 } 815 816 if (args->va_address >= AMDGPU_GMC_HOLE_START && 817 args->va_address < AMDGPU_GMC_HOLE_END) { 818 dev_dbg(dev->dev, 819 "va_address 0x%llx is in VA hole 0x%llx-0x%llx\n", 820 args->va_address, AMDGPU_GMC_HOLE_START, 821 AMDGPU_GMC_HOLE_END); 822 return -EINVAL; 823 } 824 825 args->va_address &= AMDGPU_GMC_HOLE_MASK; 826 827 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; 828 vm_size -= AMDGPU_VA_RESERVED_TOP; 829 if (args->va_address + args->map_size > vm_size) { 830 dev_dbg(dev->dev, 831 "va_address 0x%llx is in top reserved area 0x%llx\n", 832 args->va_address + args->map_size, vm_size); 833 return -EINVAL; 834 } 835 836 if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) { 837 dev_dbg(dev->dev, "invalid flags combination 0x%08X\n", 838 args->flags); 839 return -EINVAL; 840 } 841 842 switch (args->operation) { 843 case AMDGPU_VA_OP_MAP: 844 case AMDGPU_VA_OP_UNMAP: 845 case AMDGPU_VA_OP_CLEAR: 846 case AMDGPU_VA_OP_REPLACE: 847 break; 848 default: 849 dev_dbg(dev->dev, "unsupported operation %d\n", 850 args->operation); 851 return -EINVAL; 852 } 853 854 if ((args->operation != AMDGPU_VA_OP_CLEAR) && 855 !(args->flags & AMDGPU_VM_PAGE_PRT)) { 856 gobj = drm_gem_object_lookup(filp, args->handle); 857 if (gobj == NULL) 858 return -ENOENT; 859 abo = gem_to_amdgpu_bo(gobj); 860 } else { 861 gobj = NULL; 862 abo = NULL; 863 } 864 865 r = amdgpu_gem_add_input_fence(filp, 866 args->input_fence_syncobj_handles, 867 args->num_syncobj_handles); 868 if (r) 869 goto error_put_gobj; 870 871 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | 872 DRM_EXEC_IGNORE_DUPLICATES, 0); 873 drm_exec_until_all_locked(&exec) { 874 if (gobj) { 875 r = drm_exec_lock_obj(&exec, gobj); 876 drm_exec_retry_on_contention(&exec); 877 if (unlikely(r)) 878 goto error; 879 } 880 881 r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2); 882 drm_exec_retry_on_contention(&exec); 883 if (unlikely(r)) 884 goto error; 885 } 886 887 if (abo) { 888 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo); 889 if (!bo_va) { 890 r = -ENOENT; 891 goto error; 892 } 893 } else if (args->operation != AMDGPU_VA_OP_CLEAR) { 894 bo_va = fpriv->prt_va; 895 } else { 896 bo_va = NULL; 897 } 898 899 r = amdgpu_gem_update_timeline_node(filp, 900 args->vm_timeline_syncobj_out, 901 args->vm_timeline_point, 902 &timeline_syncobj, 903 &timeline_chain); 904 if (r) 905 goto error; 906 907 switch (args->operation) { 908 case AMDGPU_VA_OP_MAP: 909 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, 910 args->offset_in_bo, args->map_size, 911 args->flags); 912 break; 913 case AMDGPU_VA_OP_UNMAP: 914 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address); 915 break; 916 917 case AMDGPU_VA_OP_CLEAR: 918 r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm, 919 args->va_address, 920 args->map_size); 921 break; 922 case AMDGPU_VA_OP_REPLACE: 923 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, 924 args->offset_in_bo, args->map_size, 925 args->flags); 926 break; 927 default: 928 break; 929 } 930 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) { 931 fence = amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, 932 args->operation); 933 934 if (timeline_syncobj) 935 amdgpu_gem_update_bo_mapping(filp, bo_va, 936 args->operation, 937 args->vm_timeline_point, 938 fence, timeline_syncobj, 939 timeline_chain); 940 else 941 dma_fence_put(fence); 942 943 } 944 945 error: 946 drm_exec_fini(&exec); 947 error_put_gobj: 948 drm_gem_object_put(gobj); 949 return r; 950 } 951 952 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, 953 struct drm_file *filp) 954 { 955 struct drm_amdgpu_gem_op *args = data; 956 struct drm_gem_object *gobj; 957 struct amdgpu_vm_bo_base *base; 958 struct amdgpu_bo *robj; 959 struct drm_exec exec; 960 struct amdgpu_fpriv *fpriv = filp->driver_priv; 961 int r; 962 963 if (args->padding) 964 return -EINVAL; 965 966 gobj = drm_gem_object_lookup(filp, args->handle); 967 if (!gobj) 968 return -ENOENT; 969 970 robj = gem_to_amdgpu_bo(gobj); 971 972 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | 973 DRM_EXEC_IGNORE_DUPLICATES, 0); 974 drm_exec_until_all_locked(&exec) { 975 r = drm_exec_lock_obj(&exec, gobj); 976 drm_exec_retry_on_contention(&exec); 977 if (r) 978 goto out_exec; 979 980 if (args->op == AMDGPU_GEM_OP_GET_MAPPING_INFO) { 981 r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0); 982 drm_exec_retry_on_contention(&exec); 983 if (r) 984 goto out_exec; 985 } 986 } 987 988 switch (args->op) { 989 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { 990 struct drm_amdgpu_gem_create_in info; 991 void __user *out = u64_to_user_ptr(args->value); 992 993 info.bo_size = robj->tbo.base.size; 994 info.alignment = robj->tbo.page_alignment << PAGE_SHIFT; 995 info.domains = robj->preferred_domains; 996 info.domain_flags = robj->flags; 997 drm_exec_fini(&exec); 998 if (copy_to_user(out, &info, sizeof(info))) 999 r = -EFAULT; 1000 break; 1001 } 1002 case AMDGPU_GEM_OP_SET_PLACEMENT: 1003 if (drm_gem_is_imported(&robj->tbo.base) && 1004 args->value & AMDGPU_GEM_DOMAIN_VRAM) { 1005 r = -EINVAL; 1006 goto out_exec; 1007 } 1008 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { 1009 r = -EPERM; 1010 goto out_exec; 1011 } 1012 for (base = robj->vm_bo; base; base = base->next) 1013 if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev), 1014 amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) { 1015 r = -EINVAL; 1016 goto out_exec; 1017 } 1018 1019 1020 robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | 1021 AMDGPU_GEM_DOMAIN_GTT | 1022 AMDGPU_GEM_DOMAIN_CPU); 1023 robj->allowed_domains = robj->preferred_domains; 1024 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 1025 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 1026 1027 if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) 1028 amdgpu_vm_bo_invalidate(robj, true); 1029 drm_exec_fini(&exec); 1030 break; 1031 case AMDGPU_GEM_OP_GET_MAPPING_INFO: { 1032 struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(&fpriv->vm, robj); 1033 struct drm_amdgpu_gem_vm_entry *vm_entries; 1034 struct amdgpu_bo_va_mapping *mapping; 1035 int num_mappings = 0; 1036 /* 1037 * num_entries is set as an input to the size of the user-allocated array of 1038 * drm_amdgpu_gem_vm_entry stored at args->value. 1039 * num_entries is sent back as output as the number of mappings the bo has. 1040 * If that number is larger than the size of the array, the ioctl must 1041 * be retried. 1042 */ 1043 vm_entries = kvcalloc(args->num_entries, sizeof(*vm_entries), GFP_KERNEL); 1044 if (!vm_entries) 1045 return -ENOMEM; 1046 1047 amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) { 1048 if (num_mappings < args->num_entries) { 1049 vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE; 1050 vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE; 1051 vm_entries[num_mappings].offset = mapping->offset; 1052 vm_entries[num_mappings].flags = mapping->flags; 1053 } 1054 num_mappings += 1; 1055 } 1056 1057 amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) { 1058 if (num_mappings < args->num_entries) { 1059 vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE; 1060 vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE; 1061 vm_entries[num_mappings].offset = mapping->offset; 1062 vm_entries[num_mappings].flags = mapping->flags; 1063 } 1064 num_mappings += 1; 1065 } 1066 1067 drm_exec_fini(&exec); 1068 1069 if (num_mappings > 0 && num_mappings <= args->num_entries) 1070 if (copy_to_user(u64_to_user_ptr(args->value), vm_entries, num_mappings * sizeof(*vm_entries))) 1071 r = -EFAULT; 1072 1073 args->num_entries = num_mappings; 1074 1075 kvfree(vm_entries); 1076 break; 1077 } 1078 default: 1079 drm_exec_fini(&exec); 1080 r = -EINVAL; 1081 } 1082 1083 drm_gem_object_put(gobj); 1084 return r; 1085 out_exec: 1086 drm_exec_fini(&exec); 1087 drm_gem_object_put(gobj); 1088 return r; 1089 } 1090 1091 /** 1092 * amdgpu_gem_list_handles_ioctl - get information about a process' buffer objects 1093 * 1094 * @dev: drm device pointer 1095 * @data: drm_amdgpu_gem_list_handles 1096 * @filp: drm file pointer 1097 * 1098 * num_entries is set as an input to the size of the entries array. 1099 * num_entries is sent back as output as the number of bos in the process. 1100 * If that number is larger than the size of the array, the ioctl must 1101 * be retried. 1102 * 1103 * Returns: 1104 * 0 for success, -errno for errors. 1105 */ 1106 int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data, 1107 struct drm_file *filp) 1108 { 1109 struct drm_amdgpu_gem_list_handles *args = data; 1110 struct drm_amdgpu_gem_list_handles_entry *bo_entries; 1111 struct drm_gem_object *gobj; 1112 int id, ret = 0; 1113 int bo_index = 0; 1114 int num_bos = 0; 1115 1116 spin_lock(&filp->table_lock); 1117 idr_for_each_entry(&filp->object_idr, gobj, id) 1118 num_bos += 1; 1119 spin_unlock(&filp->table_lock); 1120 1121 if (args->num_entries < num_bos) { 1122 args->num_entries = num_bos; 1123 return 0; 1124 } 1125 1126 if (num_bos == 0) { 1127 args->num_entries = 0; 1128 return 0; 1129 } 1130 1131 bo_entries = kvcalloc(num_bos, sizeof(*bo_entries), GFP_KERNEL); 1132 if (!bo_entries) 1133 return -ENOMEM; 1134 1135 spin_lock(&filp->table_lock); 1136 idr_for_each_entry(&filp->object_idr, gobj, id) { 1137 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); 1138 struct drm_amdgpu_gem_list_handles_entry *bo_entry; 1139 1140 if (bo_index >= num_bos) { 1141 ret = -EAGAIN; 1142 break; 1143 } 1144 1145 bo_entry = &bo_entries[bo_index]; 1146 1147 bo_entry->size = amdgpu_bo_size(bo); 1148 bo_entry->alloc_flags = bo->flags & AMDGPU_GEM_CREATE_SETTABLE_MASK; 1149 bo_entry->preferred_domains = bo->preferred_domains; 1150 bo_entry->gem_handle = id; 1151 bo_entry->alignment = bo->tbo.page_alignment; 1152 1153 if (bo->tbo.base.import_attach) 1154 bo_entry->flags |= AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT; 1155 1156 bo_index += 1; 1157 } 1158 spin_unlock(&filp->table_lock); 1159 1160 args->num_entries = bo_index; 1161 1162 if (!ret) 1163 if (copy_to_user(u64_to_user_ptr(args->entries), bo_entries, num_bos * sizeof(*bo_entries))) 1164 ret = -EFAULT; 1165 1166 kvfree(bo_entries); 1167 1168 return ret; 1169 } 1170 1171 static int amdgpu_gem_align_pitch(struct amdgpu_device *adev, 1172 int width, 1173 int cpp, 1174 bool tiled) 1175 { 1176 int aligned = width; 1177 int pitch_mask = 0; 1178 1179 switch (cpp) { 1180 case 1: 1181 pitch_mask = 255; 1182 break; 1183 case 2: 1184 pitch_mask = 127; 1185 break; 1186 case 3: 1187 case 4: 1188 pitch_mask = 63; 1189 break; 1190 } 1191 1192 aligned += pitch_mask; 1193 aligned &= ~pitch_mask; 1194 return aligned * cpp; 1195 } 1196 1197 int amdgpu_mode_dumb_create(struct drm_file *file_priv, 1198 struct drm_device *dev, 1199 struct drm_mode_create_dumb *args) 1200 { 1201 struct amdgpu_device *adev = drm_to_adev(dev); 1202 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 1203 struct drm_gem_object *gobj; 1204 uint32_t handle; 1205 u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 1206 AMDGPU_GEM_CREATE_CPU_GTT_USWC | 1207 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1208 u32 domain; 1209 int r; 1210 1211 /* 1212 * The buffer returned from this function should be cleared, but 1213 * it can only be done if the ring is enabled or we'll fail to 1214 * create the buffer. 1215 */ 1216 if (adev->mman.buffer_funcs_enabled) 1217 flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED; 1218 1219 args->pitch = amdgpu_gem_align_pitch(adev, args->width, 1220 DIV_ROUND_UP(args->bpp, 8), 0); 1221 args->size = (u64)args->pitch * args->height; 1222 args->size = ALIGN(args->size, PAGE_SIZE); 1223 domain = amdgpu_bo_get_preferred_domain(adev, 1224 amdgpu_display_supported_domains(adev, flags)); 1225 r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags, 1226 ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1); 1227 if (r) 1228 return -ENOMEM; 1229 1230 r = drm_gem_handle_create(file_priv, gobj, &handle); 1231 /* drop reference from allocate - handle holds it now */ 1232 drm_gem_object_put(gobj); 1233 if (r) 1234 return r; 1235 1236 args->handle = handle; 1237 return 0; 1238 } 1239 1240 #if defined(CONFIG_DEBUG_FS) 1241 static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused) 1242 { 1243 struct amdgpu_device *adev = m->private; 1244 struct drm_device *dev = adev_to_drm(adev); 1245 struct drm_file *file; 1246 int r; 1247 1248 r = mutex_lock_interruptible(&dev->filelist_mutex); 1249 if (r) 1250 return r; 1251 1252 list_for_each_entry(file, &dev->filelist, lhead) { 1253 struct task_struct *task; 1254 struct drm_gem_object *gobj; 1255 struct pid *pid; 1256 int id; 1257 1258 /* 1259 * Although we have a valid reference on file->pid, that does 1260 * not guarantee that the task_struct who called get_pid() is 1261 * still alive (e.g. get_pid(current) => fork() => exit()). 1262 * Therefore, we need to protect this ->comm access using RCU. 1263 */ 1264 rcu_read_lock(); 1265 pid = rcu_dereference(file->pid); 1266 task = pid_task(pid, PIDTYPE_TGID); 1267 seq_printf(m, "pid %8d command %s:\n", pid_nr(pid), 1268 task ? task->comm : "<unknown>"); 1269 rcu_read_unlock(); 1270 1271 spin_lock(&file->table_lock); 1272 idr_for_each_entry(&file->object_idr, gobj, id) { 1273 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); 1274 1275 amdgpu_bo_print_info(id, bo, m); 1276 } 1277 spin_unlock(&file->table_lock); 1278 } 1279 1280 mutex_unlock(&dev->filelist_mutex); 1281 return 0; 1282 } 1283 1284 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info); 1285 1286 #endif 1287 1288 void amdgpu_debugfs_gem_init(struct amdgpu_device *adev) 1289 { 1290 #if defined(CONFIG_DEBUG_FS) 1291 struct drm_minor *minor = adev_to_drm(adev)->primary; 1292 struct dentry *root = minor->debugfs_root; 1293 1294 debugfs_create_file("amdgpu_gem_info", 0444, root, adev, 1295 &amdgpu_debugfs_gem_info_fops); 1296 #endif 1297 } 1298