Lines Matching +full:dont +full:- +full:validate
30 #include <linux/iosys-map.h>
49 struct ttm_buffer_object *bo = vmf->vma->vm_private_data; in radeon_gem_fault()
50 struct radeon_device *rdev = radeon_get_rdev(bo->bdev); in radeon_gem_fault()
53 down_read(&rdev->pm.mclk_lock); in radeon_gem_fault()
63 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, in radeon_gem_fault()
65 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in radeon_gem_fault()
69 dma_resv_unlock(bo->base.resv); in radeon_gem_fault()
72 up_read(&rdev->pm.mclk_lock); in radeon_gem_fault()
89 ttm_bo_put(&robj->tbo); in radeon_gem_object_free()
111 max_size = rdev->mc.gtt_size - rdev->gart_pin_size; in radeon_gem_object_create()
115 return -ENOMEM; in radeon_gem_object_create()
122 if (r != -ERESTARTSYS) { in radeon_gem_object_create()
132 *obj = &robj->tbo.base; in radeon_gem_object_create()
133 robj->pid = task_pid_nr(current); in radeon_gem_object_create()
135 mutex_lock(&rdev->gem.mutex); in radeon_gem_object_create()
136 list_add_tail(&robj->list, &rdev->gem.objects); in radeon_gem_object_create()
137 mutex_unlock(&rdev->gem.mutex); in radeon_gem_object_create()
151 /* work out where to validate the buffer to */ in radeon_gem_set_domain()
163 r = dma_resv_wait_timeout(robj->tbo.base.resv, in radeon_gem_set_domain()
167 r = -EBUSY; in radeon_gem_set_domain()
169 if (r < 0 && r != -EINTR) { in radeon_gem_set_domain()
174 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) { in radeon_gem_set_domain()
175 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */ in radeon_gem_set_domain()
176 return -EINVAL; in radeon_gem_set_domain()
183 INIT_LIST_HEAD(&rdev->gem.objects); in radeon_gem_init()
199 struct radeon_device *rdev = rbo->rdev; in radeon_gem_object_open()
200 struct radeon_fpriv *fpriv = file_priv->driver_priv; in radeon_gem_object_open()
201 struct radeon_vm *vm = &fpriv->vm; in radeon_gem_object_open()
205 if ((rdev->family < CHIP_CAYMAN) || in radeon_gem_object_open()
206 (!rdev->accel_working)) { in radeon_gem_object_open()
219 ++bo_va->ref_count; in radeon_gem_object_open()
230 struct radeon_device *rdev = rbo->rdev; in radeon_gem_object_close()
231 struct radeon_fpriv *fpriv = file_priv->driver_priv; in radeon_gem_object_close()
232 struct radeon_vm *vm = &fpriv->vm; in radeon_gem_object_close()
236 if ((rdev->family < CHIP_CAYMAN) || in radeon_gem_object_close()
237 (!rdev->accel_working)) { in radeon_gem_object_close()
243 dev_err(rdev->dev, "leaking bo va because " in radeon_gem_object_close()
249 if (--bo_va->ref_count == 0) { in radeon_gem_object_close()
258 if (r == -EDEADLK) { in radeon_gem_handle_lockup()
261 r = -EAGAIN; in radeon_gem_handle_lockup()
269 struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev); in radeon_gem_object_mmap()
271 if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm)) in radeon_gem_object_mmap()
272 return -EPERM; in radeon_gem_object_mmap()
297 struct radeon_device *rdev = dev->dev_private; in radeon_gem_info_ioctl()
301 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); in radeon_gem_info_ioctl()
303 args->vram_size = (u64)man->size << PAGE_SHIFT; in radeon_gem_info_ioctl()
304 args->vram_visible = rdev->mc.visible_vram_size; in radeon_gem_info_ioctl()
305 args->vram_visible -= rdev->vram_pin_size; in radeon_gem_info_ioctl()
306 args->gart_size = rdev->mc.gtt_size; in radeon_gem_info_ioctl()
307 args->gart_size -= rdev->gart_pin_size; in radeon_gem_info_ioctl()
315 struct radeon_device *rdev = dev->dev_private; in radeon_gem_create_ioctl()
321 down_read(&rdev->exclusive_lock); in radeon_gem_create_ioctl()
323 args->size = roundup(args->size, PAGE_SIZE); in radeon_gem_create_ioctl()
324 r = radeon_gem_object_create(rdev, args->size, args->alignment, in radeon_gem_create_ioctl()
325 args->initial_domain, args->flags, in radeon_gem_create_ioctl()
328 up_read(&rdev->exclusive_lock); in radeon_gem_create_ioctl()
333 /* drop reference from allocate - handle holds it now */ in radeon_gem_create_ioctl()
336 up_read(&rdev->exclusive_lock); in radeon_gem_create_ioctl()
340 args->handle = handle; in radeon_gem_create_ioctl()
341 up_read(&rdev->exclusive_lock); in radeon_gem_create_ioctl()
349 struct radeon_device *rdev = dev->dev_private; in radeon_gem_userptr_ioctl()
356 args->addr = untagged_addr(args->addr); in radeon_gem_userptr_ioctl()
358 if (offset_in_page(args->addr | args->size)) in radeon_gem_userptr_ioctl()
359 return -EINVAL; in radeon_gem_userptr_ioctl()
362 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | in radeon_gem_userptr_ioctl()
365 return -EINVAL; in radeon_gem_userptr_ioctl()
367 if (args->flags & RADEON_GEM_USERPTR_READONLY) { in radeon_gem_userptr_ioctl()
369 if (rdev->family < CHIP_R600) in radeon_gem_userptr_ioctl()
370 return -EINVAL; in radeon_gem_userptr_ioctl()
372 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || in radeon_gem_userptr_ioctl()
373 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { in radeon_gem_userptr_ioctl()
377 return -EACCES; in radeon_gem_userptr_ioctl()
380 down_read(&rdev->exclusive_lock); in radeon_gem_userptr_ioctl()
383 r = radeon_gem_object_create(rdev, args->size, 0, in radeon_gem_userptr_ioctl()
390 r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags); in radeon_gem_userptr_ioctl()
394 if (args->flags & RADEON_GEM_USERPTR_REGISTER) { in radeon_gem_userptr_ioctl()
395 r = radeon_mn_register(bo, args->addr); in radeon_gem_userptr_ioctl()
400 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { in radeon_gem_userptr_ioctl()
401 mmap_read_lock(current->mm); in radeon_gem_userptr_ioctl()
404 mmap_read_unlock(current->mm); in radeon_gem_userptr_ioctl()
409 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in radeon_gem_userptr_ioctl()
411 mmap_read_unlock(current->mm); in radeon_gem_userptr_ioctl()
417 /* drop reference from allocate - handle holds it now */ in radeon_gem_userptr_ioctl()
422 args->handle = handle; in radeon_gem_userptr_ioctl()
423 up_read(&rdev->exclusive_lock); in radeon_gem_userptr_ioctl()
430 up_read(&rdev->exclusive_lock); in radeon_gem_userptr_ioctl()
439 /* transition the BO to a domain - in radeon_gem_set_domain_ioctl()
440 * just validate the BO into a certain domain */ in radeon_gem_set_domain_ioctl()
441 struct radeon_device *rdev = dev->dev_private; in radeon_gem_set_domain_ioctl()
446 /* for now if someone requests domain CPU - in radeon_gem_set_domain_ioctl()
448 down_read(&rdev->exclusive_lock); in radeon_gem_set_domain_ioctl()
451 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_set_domain_ioctl()
453 up_read(&rdev->exclusive_lock); in radeon_gem_set_domain_ioctl()
454 return -ENOENT; in radeon_gem_set_domain_ioctl()
457 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); in radeon_gem_set_domain_ioctl()
460 up_read(&rdev->exclusive_lock); in radeon_gem_set_domain_ioctl()
474 return -ENOENT; in radeon_mode_dumb_mmap()
477 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) { in radeon_mode_dumb_mmap()
479 return -EPERM; in radeon_mode_dumb_mmap()
491 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); in radeon_gem_mmap_ioctl()
503 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_busy_ioctl()
505 return -ENOENT; in radeon_gem_busy_ioctl()
509 r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ); in radeon_gem_busy_ioctl()
511 r = -EBUSY; in radeon_gem_busy_ioctl()
515 cur_placement = READ_ONCE(robj->tbo.resource->mem_type); in radeon_gem_busy_ioctl()
516 args->domain = radeon_mem_type_to_domain(cur_placement); in radeon_gem_busy_ioctl()
524 struct radeon_device *rdev = dev->dev_private; in radeon_gem_wait_idle_ioctl()
532 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_wait_idle_ioctl()
534 return -ENOENT; in radeon_gem_wait_idle_ioctl()
538 ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, in radeon_gem_wait_idle_ioctl()
541 r = -EBUSY; in radeon_gem_wait_idle_ioctl()
546 cur_placement = READ_ONCE(robj->tbo.resource->mem_type); in radeon_gem_wait_idle_ioctl()
547 if (rdev->asic->mmio_hdp_flush && in radeon_gem_wait_idle_ioctl()
549 robj->rdev->asic->mmio_hdp_flush(rdev); in radeon_gem_wait_idle_ioctl()
563 DRM_DEBUG("%d \n", args->handle); in radeon_gem_set_tiling_ioctl()
564 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_set_tiling_ioctl()
566 return -ENOENT; in radeon_gem_set_tiling_ioctl()
568 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); in radeon_gem_set_tiling_ioctl()
582 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_get_tiling_ioctl()
584 return -ENOENT; in radeon_gem_get_tiling_ioctl()
589 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); in radeon_gem_get_tiling_ioctl()
597 * radeon_gem_va_update_vm -update the bo_va in its VM
616 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); in radeon_gem_va_update_vm()
623 r = drm_exec_prepare_obj(&exec, &entry->robj->tbo.base, in radeon_gem_va_update_vm()
630 r = drm_exec_prepare_obj(&exec, &bo_va->bo->tbo.base, 1); in radeon_gem_va_update_vm()
637 domain = radeon_mem_type_to_domain(entry->robj->tbo.resource->mem_type); in radeon_gem_va_update_vm()
644 mutex_lock(&bo_va->vm->mutex); in radeon_gem_va_update_vm()
645 r = radeon_vm_clear_freed(rdev, bo_va->vm); in radeon_gem_va_update_vm()
649 if (bo_va->it.start && bo_va->bo) in radeon_gem_va_update_vm()
650 r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource); in radeon_gem_va_update_vm()
653 mutex_unlock(&bo_va->vm->mutex); in radeon_gem_va_update_vm()
659 if (r && r != -ERESTARTSYS) in radeon_gem_va_update_vm()
668 struct radeon_device *rdev = dev->dev_private; in radeon_gem_va_ioctl()
669 struct radeon_fpriv *fpriv = filp->driver_priv; in radeon_gem_va_ioctl()
675 if (!rdev->vm_manager.enabled) { in radeon_gem_va_ioctl()
676 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
677 return -ENOTTY; in radeon_gem_va_ioctl()
680 /* !! DONT REMOVE !! in radeon_gem_va_ioctl()
685 if (args->vm_id) { in radeon_gem_va_ioctl()
686 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
687 return -EINVAL; in radeon_gem_va_ioctl()
690 if (args->offset < RADEON_VA_RESERVED_SIZE) { in radeon_gem_va_ioctl()
691 dev_err(dev->dev, in radeon_gem_va_ioctl()
693 (unsigned long)args->offset, in radeon_gem_va_ioctl()
695 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
696 return -EINVAL; in radeon_gem_va_ioctl()
704 if ((args->flags & invalid_flags)) { in radeon_gem_va_ioctl()
705 dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n", in radeon_gem_va_ioctl()
706 args->flags, invalid_flags); in radeon_gem_va_ioctl()
707 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
708 return -EINVAL; in radeon_gem_va_ioctl()
711 switch (args->operation) { in radeon_gem_va_ioctl()
716 dev_err(dev->dev, "unsupported operation %d\n", in radeon_gem_va_ioctl()
717 args->operation); in radeon_gem_va_ioctl()
718 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
719 return -EINVAL; in radeon_gem_va_ioctl()
722 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_va_ioctl()
724 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
725 return -ENOENT; in radeon_gem_va_ioctl()
730 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
734 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); in radeon_gem_va_ioctl()
736 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
739 return -ENOENT; in radeon_gem_va_ioctl()
742 switch (args->operation) { in radeon_gem_va_ioctl()
744 if (bo_va->it.start) { in radeon_gem_va_ioctl()
745 args->operation = RADEON_VA_RESULT_VA_EXIST; in radeon_gem_va_ioctl()
746 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; in radeon_gem_va_ioctl()
750 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); in radeon_gem_va_ioctl()
760 args->operation = RADEON_VA_RESULT_OK; in radeon_gem_va_ioctl()
762 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
777 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_op_ioctl()
779 return -ENOENT; in radeon_gem_op_ioctl()
783 r = -EPERM; in radeon_gem_op_ioctl()
784 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) in radeon_gem_op_ioctl()
791 switch (args->op) { in radeon_gem_op_ioctl()
793 args->value = robj->initial_domain; in radeon_gem_op_ioctl()
796 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | in radeon_gem_op_ioctl()
801 r = -EINVAL; in radeon_gem_op_ioctl()
838 struct radeon_device *rdev = dev->dev_private; in radeon_mode_dumb_create()
843 args->pitch = radeon_align_pitch(rdev, args->width, in radeon_mode_dumb_create()
844 DIV_ROUND_UP(args->bpp, 8), 0); in radeon_mode_dumb_create()
845 args->size = (u64)args->pitch * args->height; in radeon_mode_dumb_create()
846 args->size = ALIGN(args->size, PAGE_SIZE); in radeon_mode_dumb_create()
848 r = radeon_gem_object_create(rdev, args->size, 0, in radeon_mode_dumb_create()
852 return -ENOMEM; in radeon_mode_dumb_create()
855 /* drop reference from allocate - handle holds it now */ in radeon_mode_dumb_create()
860 args->handle = handle; in radeon_mode_dumb_create()
867 struct radeon_device *rdev = m->private; in radeon_debugfs_gem_info_show()
871 mutex_lock(&rdev->gem.mutex); in radeon_debugfs_gem_info_show()
872 list_for_each_entry(rbo, &rdev->gem.objects, list) { in radeon_debugfs_gem_info_show()
876 domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type); in radeon_debugfs_gem_info_show()
891 placement, (unsigned long)rbo->pid); in radeon_debugfs_gem_info_show()
894 mutex_unlock(&rdev->gem.mutex); in radeon_debugfs_gem_info_show()
904 struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; in radeon_gem_debugfs_init()