Lines Matching +full:protect +full:- +full:exec
32 #include <linux/dma-buf.h>
49 struct ttm_buffer_object *bo = vmf->vma->vm_private_data; in amdgpu_gem_fault()
50 struct drm_device *ddev = bo->base.dev; in amdgpu_gem_fault()
65 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, in amdgpu_gem_fault()
70 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); in amdgpu_gem_fault()
72 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in amdgpu_gem_fault()
76 dma_resv_unlock(bo->base.resv); in amdgpu_gem_fault()
92 ttm_bo_put(&aobj->tbo); in amdgpu_gem_object_free()
124 bo = &ubo->bo; in amdgpu_gem_object_create()
125 *obj = &bo->tbo.base; in amdgpu_gem_object_create()
135 mutex_lock(&ddev->filelist_mutex); in amdgpu_gem_force_release()
137 list_for_each_entry(file, &ddev->filelist, lhead) { in amdgpu_gem_force_release()
142 spin_lock(&file->table_lock); in amdgpu_gem_force_release()
143 idr_for_each_entry(&file->object_idr, gobj, handle) { in amdgpu_gem_force_release()
147 idr_destroy(&file->object_idr); in amdgpu_gem_force_release()
148 spin_unlock(&file->table_lock); in amdgpu_gem_force_release()
151 mutex_unlock(&ddev->filelist_mutex); in amdgpu_gem_force_release()
162 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); in amdgpu_gem_object_open()
163 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; in amdgpu_gem_object_open()
164 struct amdgpu_vm *vm = &fpriv->vm; in amdgpu_gem_object_open()
169 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm); in amdgpu_gem_object_open()
170 if (mm && mm != current->mm) in amdgpu_gem_object_open()
171 return -EPERM; in amdgpu_gem_object_open()
173 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID && in amdgpu_gem_object_open()
175 return -EPERM; in amdgpu_gem_object_open()
186 ++bo_va->ref_count; in amdgpu_gem_object_open()
190 * attachment in compute VMs. Re-validation will be done by in amdgpu_gem_object_open()
200 if (!vm->is_compute_context || !vm->process_info) in amdgpu_gem_object_open()
202 if (!obj->import_attach || in amdgpu_gem_object_open()
203 !dma_buf_is_dynamic(obj->import_attach->dmabuf)) in amdgpu_gem_object_open()
205 mutex_lock_nested(&vm->process_info->lock, 1); in amdgpu_gem_object_open()
206 if (!WARN_ON(!vm->process_info->eviction_fence)) { in amdgpu_gem_object_open()
208 &vm->process_info->eviction_fence->base); in amdgpu_gem_object_open()
212 dev_warn(adev->dev, "validate_and_fence failed: %d\n", r); in amdgpu_gem_object_open()
214 dev_warn(adev->dev, "pid %d\n", ti->pid); in amdgpu_gem_object_open()
219 mutex_unlock(&vm->process_info->lock); in amdgpu_gem_object_open()
228 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gem_object_close()
229 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; in amdgpu_gem_object_close()
230 struct amdgpu_vm *vm = &fpriv->vm; in amdgpu_gem_object_close()
234 struct drm_exec exec; in amdgpu_gem_object_close() local
237 drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); in amdgpu_gem_object_close()
238 drm_exec_until_all_locked(&exec) { in amdgpu_gem_object_close()
239 r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1); in amdgpu_gem_object_close()
240 drm_exec_retry_on_contention(&exec); in amdgpu_gem_object_close()
244 r = amdgpu_vm_lock_pd(vm, &exec, 0); in amdgpu_gem_object_close()
245 drm_exec_retry_on_contention(&exec); in amdgpu_gem_object_close()
251 if (!bo_va || --bo_va->ref_count) in amdgpu_gem_object_close()
261 dev_err(adev->dev, "failed to clear page " in amdgpu_gem_object_close()
271 dev_err(adev->dev, "leaking bo va (%ld)\n", r); in amdgpu_gem_object_close()
272 drm_exec_fini(&exec); in amdgpu_gem_object_close()
279 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) in amdgpu_gem_object_mmap()
280 return -EPERM; in amdgpu_gem_object_mmap()
281 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) in amdgpu_gem_object_mmap()
282 return -EPERM; in amdgpu_gem_object_mmap()
289 if (is_cow_mapping(vma->vm_flags) && in amdgpu_gem_object_mmap()
290 !(vma->vm_flags & VM_ACCESS_FLAGS)) in amdgpu_gem_object_mmap()
314 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_gem_create_ioctl()
315 struct amdgpu_vm *vm = &fpriv->vm; in amdgpu_gem_create_ioctl()
317 uint64_t flags = args->in.domain_flags; in amdgpu_gem_create_ioctl()
318 uint64_t size = args->in.bo_size; in amdgpu_gem_create_ioctl()
325 if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL) in amdgpu_gem_create_ioctl()
326 return -EINVAL; in amdgpu_gem_create_ioctl()
338 return -EINVAL; in amdgpu_gem_create_ioctl()
341 if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK) in amdgpu_gem_create_ioctl()
342 return -EINVAL; in amdgpu_gem_create_ioctl()
346 return -EINVAL; in amdgpu_gem_create_ioctl()
353 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | in amdgpu_gem_create_ioctl()
359 DRM_ERROR("GDS bo cannot be per-vm-bo\n"); in amdgpu_gem_create_ioctl()
360 return -EINVAL; in amdgpu_gem_create_ioctl()
366 r = amdgpu_bo_reserve(vm->root.bo, false); in amdgpu_gem_create_ioctl()
370 resv = vm->root.bo->tbo.base.resv; in amdgpu_gem_create_ioctl()
373 initial_domain = (u32)(0xffffffff & args->in.domains); in amdgpu_gem_create_ioctl()
375 r = amdgpu_gem_object_create(adev, size, args->in.alignment, in amdgpu_gem_create_ioctl()
377 flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1); in amdgpu_gem_create_ioctl()
378 if (r && r != -ERESTARTSYS) { in amdgpu_gem_create_ioctl()
389 size, initial_domain, args->in.alignment, r); in amdgpu_gem_create_ioctl()
396 abo->parent = amdgpu_bo_ref(vm->root.bo); in amdgpu_gem_create_ioctl()
398 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_gem_create_ioctl()
404 /* drop reference from allocate - handle holds it now */ in amdgpu_gem_create_ioctl()
410 args->out.handle = handle; in amdgpu_gem_create_ioctl()
420 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_gem_userptr_ioctl()
427 args->addr = untagged_addr(args->addr); in amdgpu_gem_userptr_ioctl()
429 if (offset_in_page(args->addr | args->size)) in amdgpu_gem_userptr_ioctl()
430 return -EINVAL; in amdgpu_gem_userptr_ioctl()
433 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY | in amdgpu_gem_userptr_ioctl()
436 return -EINVAL; in amdgpu_gem_userptr_ioctl()
438 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && in amdgpu_gem_userptr_ioctl()
439 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { in amdgpu_gem_userptr_ioctl()
442 return -EACCES; in amdgpu_gem_userptr_ioctl()
446 r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU, in amdgpu_gem_userptr_ioctl()
447 0, ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1); in amdgpu_gem_userptr_ioctl()
452 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; in amdgpu_gem_userptr_ioctl()
453 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; in amdgpu_gem_userptr_ioctl()
454 r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags); in amdgpu_gem_userptr_ioctl()
458 r = amdgpu_hmm_register(bo, args->addr); in amdgpu_gem_userptr_ioctl()
462 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { in amdgpu_gem_userptr_ioctl()
463 r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, in amdgpu_gem_userptr_ioctl()
473 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_gem_userptr_ioctl()
483 args->handle = handle; in amdgpu_gem_userptr_ioctl()
486 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) in amdgpu_gem_userptr_ioctl()
487 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); in amdgpu_gem_userptr_ioctl()
504 return -ENOENT; in amdgpu_mode_dumb_mmap()
507 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || in amdgpu_mode_dumb_mmap()
508 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { in amdgpu_mode_dumb_mmap()
510 return -EPERM; in amdgpu_mode_dumb_mmap()
521 uint32_t handle = args->in.handle; in amdgpu_gem_mmap_ioctl()
524 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr); in amdgpu_gem_mmap_ioctl()
528 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
548 /* clamp timeout to avoid unsigned-> signed overflow */ in amdgpu_gem_timeout()
550 return MAX_SCHEDULE_TIMEOUT - 1; in amdgpu_gem_timeout()
561 uint32_t handle = args->in.handle; in amdgpu_gem_wait_idle_ioctl()
562 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout); in amdgpu_gem_wait_idle_ioctl()
568 return -ENOENT; in amdgpu_gem_wait_idle_ioctl()
571 ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, in amdgpu_gem_wait_idle_ioctl()
580 args->out.status = (ret == 0); in amdgpu_gem_wait_idle_ioctl()
594 int r = -1; in amdgpu_gem_metadata_ioctl()
596 DRM_DEBUG("%d\n", args->handle); in amdgpu_gem_metadata_ioctl()
597 gobj = drm_gem_object_lookup(filp, args->handle); in amdgpu_gem_metadata_ioctl()
599 return -ENOENT; in amdgpu_gem_metadata_ioctl()
606 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) { in amdgpu_gem_metadata_ioctl()
607 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info); in amdgpu_gem_metadata_ioctl()
608 r = amdgpu_bo_get_metadata(robj, args->data.data, in amdgpu_gem_metadata_ioctl()
609 sizeof(args->data.data), in amdgpu_gem_metadata_ioctl()
610 &args->data.data_size_bytes, in amdgpu_gem_metadata_ioctl()
611 &args->data.flags); in amdgpu_gem_metadata_ioctl()
612 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { in amdgpu_gem_metadata_ioctl()
613 if (args->data.data_size_bytes > sizeof(args->data.data)) { in amdgpu_gem_metadata_ioctl()
614 r = -EINVAL; in amdgpu_gem_metadata_ioctl()
617 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); in amdgpu_gem_metadata_ioctl()
619 r = amdgpu_bo_set_metadata(robj, args->data.data, in amdgpu_gem_metadata_ioctl()
620 args->data.data_size_bytes, in amdgpu_gem_metadata_ioctl()
621 args->data.flags); in amdgpu_gem_metadata_ioctl()
632 * amdgpu_gem_va_update_vm -update the bo_va in its VM
666 if (r && r != -ERESTARTSYS) in amdgpu_gem_va_update_vm()
671 * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
693 if (adev->gmc.gmc_funcs->map_mtype) in amdgpu_gem_va_map_flags()
713 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_gem_va_ioctl()
716 struct drm_exec exec; in amdgpu_gem_va_ioctl() local
721 if (args->va_address < AMDGPU_VA_RESERVED_BOTTOM) { in amdgpu_gem_va_ioctl()
722 dev_dbg(dev->dev, in amdgpu_gem_va_ioctl()
724 args->va_address, AMDGPU_VA_RESERVED_BOTTOM); in amdgpu_gem_va_ioctl()
725 return -EINVAL; in amdgpu_gem_va_ioctl()
728 if (args->va_address >= AMDGPU_GMC_HOLE_START && in amdgpu_gem_va_ioctl()
729 args->va_address < AMDGPU_GMC_HOLE_END) { in amdgpu_gem_va_ioctl()
730 dev_dbg(dev->dev, in amdgpu_gem_va_ioctl()
731 "va_address 0x%llx is in VA hole 0x%llx-0x%llx\n", in amdgpu_gem_va_ioctl()
732 args->va_address, AMDGPU_GMC_HOLE_START, in amdgpu_gem_va_ioctl()
734 return -EINVAL; in amdgpu_gem_va_ioctl()
737 args->va_address &= AMDGPU_GMC_HOLE_MASK; in amdgpu_gem_va_ioctl()
739 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; in amdgpu_gem_va_ioctl()
740 vm_size -= AMDGPU_VA_RESERVED_TOP; in amdgpu_gem_va_ioctl()
741 if (args->va_address + args->map_size > vm_size) { in amdgpu_gem_va_ioctl()
742 dev_dbg(dev->dev, in amdgpu_gem_va_ioctl()
744 args->va_address + args->map_size, vm_size); in amdgpu_gem_va_ioctl()
745 return -EINVAL; in amdgpu_gem_va_ioctl()
748 if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) { in amdgpu_gem_va_ioctl()
749 dev_dbg(dev->dev, "invalid flags combination 0x%08X\n", in amdgpu_gem_va_ioctl()
750 args->flags); in amdgpu_gem_va_ioctl()
751 return -EINVAL; in amdgpu_gem_va_ioctl()
754 switch (args->operation) { in amdgpu_gem_va_ioctl()
761 dev_dbg(dev->dev, "unsupported operation %d\n", in amdgpu_gem_va_ioctl()
762 args->operation); in amdgpu_gem_va_ioctl()
763 return -EINVAL; in amdgpu_gem_va_ioctl()
766 if ((args->operation != AMDGPU_VA_OP_CLEAR) && in amdgpu_gem_va_ioctl()
767 !(args->flags & AMDGPU_VM_PAGE_PRT)) { in amdgpu_gem_va_ioctl()
768 gobj = drm_gem_object_lookup(filp, args->handle); in amdgpu_gem_va_ioctl()
770 return -ENOENT; in amdgpu_gem_va_ioctl()
777 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | in amdgpu_gem_va_ioctl()
779 drm_exec_until_all_locked(&exec) { in amdgpu_gem_va_ioctl()
781 r = drm_exec_lock_obj(&exec, gobj); in amdgpu_gem_va_ioctl()
782 drm_exec_retry_on_contention(&exec); in amdgpu_gem_va_ioctl()
787 r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2); in amdgpu_gem_va_ioctl()
788 drm_exec_retry_on_contention(&exec); in amdgpu_gem_va_ioctl()
794 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo); in amdgpu_gem_va_ioctl()
796 r = -ENOENT; in amdgpu_gem_va_ioctl()
799 } else if (args->operation != AMDGPU_VA_OP_CLEAR) { in amdgpu_gem_va_ioctl()
800 bo_va = fpriv->prt_va; in amdgpu_gem_va_ioctl()
805 switch (args->operation) { in amdgpu_gem_va_ioctl()
807 va_flags = amdgpu_gem_va_map_flags(adev, args->flags); in amdgpu_gem_va_ioctl()
808 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, in amdgpu_gem_va_ioctl()
809 args->offset_in_bo, args->map_size, in amdgpu_gem_va_ioctl()
813 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address); in amdgpu_gem_va_ioctl()
817 r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm, in amdgpu_gem_va_ioctl()
818 args->va_address, in amdgpu_gem_va_ioctl()
819 args->map_size); in amdgpu_gem_va_ioctl()
822 va_flags = amdgpu_gem_va_map_flags(adev, args->flags); in amdgpu_gem_va_ioctl()
823 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, in amdgpu_gem_va_ioctl()
824 args->offset_in_bo, args->map_size, in amdgpu_gem_va_ioctl()
830 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) in amdgpu_gem_va_ioctl()
831 amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, in amdgpu_gem_va_ioctl()
832 args->operation); in amdgpu_gem_va_ioctl()
835 drm_exec_fini(&exec); in amdgpu_gem_va_ioctl()
849 gobj = drm_gem_object_lookup(filp, args->handle); in amdgpu_gem_op_ioctl()
851 return -ENOENT; in amdgpu_gem_op_ioctl()
859 switch (args->op) { in amdgpu_gem_op_ioctl()
862 void __user *out = u64_to_user_ptr(args->value); in amdgpu_gem_op_ioctl()
864 info.bo_size = robj->tbo.base.size; in amdgpu_gem_op_ioctl()
865 info.alignment = robj->tbo.page_alignment << PAGE_SHIFT; in amdgpu_gem_op_ioctl()
866 info.domains = robj->preferred_domains; in amdgpu_gem_op_ioctl()
867 info.domain_flags = robj->flags; in amdgpu_gem_op_ioctl()
870 r = -EFAULT; in amdgpu_gem_op_ioctl()
874 if (robj->tbo.base.import_attach && in amdgpu_gem_op_ioctl()
875 args->value & AMDGPU_GEM_DOMAIN_VRAM) { in amdgpu_gem_op_ioctl()
876 r = -EINVAL; in amdgpu_gem_op_ioctl()
880 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { in amdgpu_gem_op_ioctl()
881 r = -EPERM; in amdgpu_gem_op_ioctl()
885 for (base = robj->vm_bo; base; base = base->next) in amdgpu_gem_op_ioctl()
886 if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev), in amdgpu_gem_op_ioctl()
887 amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) { in amdgpu_gem_op_ioctl()
888 r = -EINVAL; in amdgpu_gem_op_ioctl()
894 robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | in amdgpu_gem_op_ioctl()
897 robj->allowed_domains = robj->preferred_domains; in amdgpu_gem_op_ioctl()
898 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) in amdgpu_gem_op_ioctl()
899 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; in amdgpu_gem_op_ioctl()
901 if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) in amdgpu_gem_op_ioctl()
908 r = -EINVAL; in amdgpu_gem_op_ioctl()
947 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; in amdgpu_mode_dumb_create()
961 if (adev->mman.buffer_funcs_enabled) in amdgpu_mode_dumb_create()
964 args->pitch = amdgpu_gem_align_pitch(adev, args->width, in amdgpu_mode_dumb_create()
965 DIV_ROUND_UP(args->bpp, 8), 0); in amdgpu_mode_dumb_create()
966 args->size = (u64)args->pitch * args->height; in amdgpu_mode_dumb_create()
967 args->size = ALIGN(args->size, PAGE_SIZE); in amdgpu_mode_dumb_create()
970 r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags, in amdgpu_mode_dumb_create()
971 ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1); in amdgpu_mode_dumb_create()
973 return -ENOMEM; in amdgpu_mode_dumb_create()
976 /* drop reference from allocate - handle holds it now */ in amdgpu_mode_dumb_create()
981 args->handle = handle; in amdgpu_mode_dumb_create()
988 struct amdgpu_device *adev = m->private; in amdgpu_debugfs_gem_info_show()
993 r = mutex_lock_interruptible(&dev->filelist_mutex); in amdgpu_debugfs_gem_info_show()
997 list_for_each_entry(file, &dev->filelist, lhead) { in amdgpu_debugfs_gem_info_show()
1004 * Although we have a valid reference on file->pid, that does in amdgpu_debugfs_gem_info_show()
1007 * Therefore, we need to protect this ->comm access using RCU. in amdgpu_debugfs_gem_info_show()
1010 pid = rcu_dereference(file->pid); in amdgpu_debugfs_gem_info_show()
1013 task ? task->comm : "<unknown>"); in amdgpu_debugfs_gem_info_show()
1016 spin_lock(&file->table_lock); in amdgpu_debugfs_gem_info_show()
1017 idr_for_each_entry(&file->object_idr, gobj, id) { in amdgpu_debugfs_gem_info_show()
1022 spin_unlock(&file->table_lock); in amdgpu_debugfs_gem_info_show()
1025 mutex_unlock(&dev->filelist_mutex); in amdgpu_debugfs_gem_info_show()
1036 struct drm_minor *minor = adev_to_drm(adev)->primary; in amdgpu_debugfs_gem_init()
1037 struct dentry *root = minor->debugfs_root; in amdgpu_debugfs_gem_init()