Lines Matching +full:vm +full:- +full:map
1 // SPDX-License-Identifier: MIT
40 if (adev->userq_funcs[i]) in amdgpu_userq_get_supported_ip_mask()
47 int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, in amdgpu_userq_input_va_validate() argument
58 r = amdgpu_bo_reserve(vm->root.bo, false); in amdgpu_userq_input_va_validate()
62 va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr); in amdgpu_userq_input_va_validate()
64 r = -EINVAL; in amdgpu_userq_input_va_validate()
67 /* Only validate the userq whether resident in the VM mapping range */ in amdgpu_userq_input_va_validate()
68 if (user_addr >= va_map->start && in amdgpu_userq_input_va_validate()
69 va_map->last - user_addr + 1 >= size) { in amdgpu_userq_input_va_validate()
70 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_userq_input_va_validate()
74 r = -EINVAL; in amdgpu_userq_input_va_validate()
76 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_userq_input_va_validate()
84 struct amdgpu_device *adev = uq_mgr->adev; in amdgpu_userq_preempt_helper()
86 adev->userq_funcs[queue->queue_type]; in amdgpu_userq_preempt_helper()
89 if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { in amdgpu_userq_preempt_helper()
90 r = userq_funcs->preempt(uq_mgr, queue); in amdgpu_userq_preempt_helper()
92 queue->state = AMDGPU_USERQ_STATE_HUNG; in amdgpu_userq_preempt_helper()
94 queue->state = AMDGPU_USERQ_STATE_PREEMPTED; in amdgpu_userq_preempt_helper()
105 struct amdgpu_device *adev = uq_mgr->adev; in amdgpu_userq_restore_helper()
107 adev->userq_funcs[queue->queue_type]; in amdgpu_userq_restore_helper()
110 if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) { in amdgpu_userq_restore_helper()
111 r = userq_funcs->restore(uq_mgr, queue); in amdgpu_userq_restore_helper()
113 queue->state = AMDGPU_USERQ_STATE_HUNG; in amdgpu_userq_restore_helper()
115 queue->state = AMDGPU_USERQ_STATE_MAPPED; in amdgpu_userq_restore_helper()
126 struct amdgpu_device *adev = uq_mgr->adev; in amdgpu_userq_unmap_helper()
128 adev->userq_funcs[queue->queue_type]; in amdgpu_userq_unmap_helper()
131 if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) || in amdgpu_userq_unmap_helper()
132 (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) { in amdgpu_userq_unmap_helper()
133 r = userq_funcs->unmap(uq_mgr, queue); in amdgpu_userq_unmap_helper()
135 queue->state = AMDGPU_USERQ_STATE_HUNG; in amdgpu_userq_unmap_helper()
137 queue->state = AMDGPU_USERQ_STATE_UNMAPPED; in amdgpu_userq_unmap_helper()
146 struct amdgpu_device *adev = uq_mgr->adev; in amdgpu_userq_map_helper()
148 adev->userq_funcs[queue->queue_type]; in amdgpu_userq_map_helper()
151 if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) { in amdgpu_userq_map_helper()
152 r = userq_funcs->map(uq_mgr, queue); in amdgpu_userq_map_helper()
154 queue->state = AMDGPU_USERQ_STATE_HUNG; in amdgpu_userq_map_helper()
156 queue->state = AMDGPU_USERQ_STATE_MAPPED; in amdgpu_userq_map_helper()
166 struct dma_fence *f = queue->last_fence; in amdgpu_userq_wait_for_last_fence()
172 drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n", in amdgpu_userq_wait_for_last_fence()
173 f->context, f->seqno); in amdgpu_userq_wait_for_last_fence()
182 struct amdgpu_device *adev = uq_mgr->adev; in amdgpu_userq_cleanup()
183 const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type]; in amdgpu_userq_cleanup()
185 uq_funcs->mqd_destroy(uq_mgr, queue); in amdgpu_userq_cleanup()
187 idr_remove(&uq_mgr->userq_idr, queue_id); in amdgpu_userq_cleanup()
194 return idr_find(&uq_mgr->userq_idr, qid); in amdgpu_userq_find()
205 flush_delayed_work(&uq_mgr->resume_work); in amdgpu_userq_ensure_ev_fence()
207 mutex_lock(&uq_mgr->userq_mutex); in amdgpu_userq_ensure_ev_fence()
208 spin_lock(&evf_mgr->ev_fence_lock); in amdgpu_userq_ensure_ev_fence()
209 ev_fence = evf_mgr->ev_fence; in amdgpu_userq_ensure_ev_fence()
210 spin_unlock(&evf_mgr->ev_fence_lock); in amdgpu_userq_ensure_ev_fence()
211 if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) { in amdgpu_userq_ensure_ev_fence()
212 mutex_unlock(&uq_mgr->userq_mutex); in amdgpu_userq_ensure_ev_fence()
217 schedule_delayed_work(&uq_mgr->resume_work, 0); in amdgpu_userq_ensure_ev_fence()
226 struct amdgpu_device *adev = uq_mgr->adev; in amdgpu_userq_create_object()
240 r = amdgpu_bo_create(adev, &bp, &userq_obj->obj); in amdgpu_userq_create_object()
242 drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r); in amdgpu_userq_create_object()
246 r = amdgpu_bo_reserve(userq_obj->obj, true); in amdgpu_userq_create_object()
248 drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r); in amdgpu_userq_create_object()
252 r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo); in amdgpu_userq_create_object()
254 drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r); in amdgpu_userq_create_object()
258 r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr); in amdgpu_userq_create_object()
260 drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r); in amdgpu_userq_create_object()
264 userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj); in amdgpu_userq_create_object()
265 amdgpu_bo_unreserve(userq_obj->obj); in amdgpu_userq_create_object()
266 memset(userq_obj->cpu_ptr, 0, size); in amdgpu_userq_create_object()
270 amdgpu_bo_unreserve(userq_obj->obj); in amdgpu_userq_create_object()
273 amdgpu_bo_unref(&userq_obj->obj); in amdgpu_userq_create_object()
280 amdgpu_bo_kunmap(userq_obj->obj); in amdgpu_userq_destroy_object()
281 amdgpu_bo_unref(&userq_obj->obj); in amdgpu_userq_destroy_object()
291 struct amdgpu_userq_obj *db_obj = db_info->db_obj; in amdgpu_userq_get_doorbell_index()
294 gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle); in amdgpu_userq_get_doorbell_index()
296 drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n"); in amdgpu_userq_get_doorbell_index()
297 return -EINVAL; in amdgpu_userq_get_doorbell_index()
300 db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); in amdgpu_userq_get_doorbell_index()
303 r = amdgpu_bo_reserve(db_obj->obj, true); in amdgpu_userq_get_doorbell_index()
305 drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n"); in amdgpu_userq_get_doorbell_index()
310 r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL); in amdgpu_userq_get_doorbell_index()
312 drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n"); in amdgpu_userq_get_doorbell_index()
316 switch (db_info->queue_type) { in amdgpu_userq_get_doorbell_index()
325 db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1; in amdgpu_userq_get_doorbell_index()
330 db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1; in amdgpu_userq_get_doorbell_index()
334 drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n", in amdgpu_userq_get_doorbell_index()
335 db_info->queue_type); in amdgpu_userq_get_doorbell_index()
336 r = -EINVAL; in amdgpu_userq_get_doorbell_index()
340 index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj, in amdgpu_userq_get_doorbell_index()
341 db_info->doorbell_offset, db_size); in amdgpu_userq_get_doorbell_index()
342 drm_dbg_driver(adev_to_drm(uq_mgr->adev), in amdgpu_userq_get_doorbell_index()
344 amdgpu_bo_unreserve(db_obj->obj); in amdgpu_userq_get_doorbell_index()
348 amdgpu_bo_unpin(db_obj->obj); in amdgpu_userq_get_doorbell_index()
350 amdgpu_bo_unreserve(db_obj->obj); in amdgpu_userq_get_doorbell_index()
352 amdgpu_bo_unref(&db_obj->obj); in amdgpu_userq_get_doorbell_index()
359 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_userq_destroy()
360 struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr; in amdgpu_userq_destroy()
361 struct amdgpu_device *adev = uq_mgr->adev; in amdgpu_userq_destroy()
365 cancel_delayed_work_sync(&uq_mgr->resume_work); in amdgpu_userq_destroy()
366 mutex_lock(&uq_mgr->userq_mutex); in amdgpu_userq_destroy()
370 drm_dbg_driver(adev_to_drm(uq_mgr->adev), "Invalid queue id to destroy\n"); in amdgpu_userq_destroy()
371 mutex_unlock(&uq_mgr->userq_mutex); in amdgpu_userq_destroy()
372 return -EINVAL; in amdgpu_userq_destroy()
375 r = amdgpu_bo_reserve(queue->db_obj.obj, true); in amdgpu_userq_destroy()
377 amdgpu_bo_unpin(queue->db_obj.obj); in amdgpu_userq_destroy()
378 amdgpu_bo_unreserve(queue->db_obj.obj); in amdgpu_userq_destroy()
380 amdgpu_bo_unref(&queue->db_obj.obj); in amdgpu_userq_destroy()
383 debugfs_remove_recursive(queue->debugfs_queue); in amdgpu_userq_destroy()
388 drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n"); in amdgpu_userq_destroy()
389 queue->state = AMDGPU_USERQ_STATE_HUNG; in amdgpu_userq_destroy()
392 mutex_unlock(&uq_mgr->userq_mutex); in amdgpu_userq_destroy()
394 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); in amdgpu_userq_destroy()
395 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); in amdgpu_userq_destroy()
412 return -EACCES; in amdgpu_userq_priority_permit()
418 struct amdgpu_usermode_queue *queue = m->private; in amdgpu_mqd_info_read()
422 if (!queue || !queue->mqd.obj) in amdgpu_mqd_info_read()
423 return -EINVAL; in amdgpu_mqd_info_read()
425 bo = amdgpu_bo_ref(queue->mqd.obj); in amdgpu_mqd_info_read()
429 return -EINVAL; in amdgpu_mqd_info_read()
432 seq_printf(m, "queue_type: %d\n", queue->queue_type); in amdgpu_mqd_info_read()
433 seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj)); in amdgpu_mqd_info_read()
443 return single_open(file, amdgpu_mqd_info_read, inode->i_private); in amdgpu_mqd_info_open()
458 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_userq_create()
459 struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr; in amdgpu_userq_create()
460 struct amdgpu_device *adev = uq_mgr->adev; in amdgpu_userq_create()
469 (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >> in amdgpu_userq_create()
476 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); in amdgpu_userq_create()
478 drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n"); in amdgpu_userq_create()
479 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); in amdgpu_userq_create()
490 mutex_lock(&adev->userq_mutex); in amdgpu_userq_create()
491 amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr); in amdgpu_userq_create()
493 uq_funcs = adev->userq_funcs[args->in.ip_type]; in amdgpu_userq_create()
495 drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n", in amdgpu_userq_create()
496 args->in.ip_type); in amdgpu_userq_create()
497 r = -EINVAL; in amdgpu_userq_create()
503 drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n"); in amdgpu_userq_create()
504 r = -ENOMEM; in amdgpu_userq_create()
509 if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) || in amdgpu_userq_create()
510 amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) || in amdgpu_userq_create()
511 amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) { in amdgpu_userq_create()
512 r = -EINVAL; in amdgpu_userq_create()
516 queue->doorbell_handle = args->in.doorbell_handle; in amdgpu_userq_create()
517 queue->queue_type = args->in.ip_type; in amdgpu_userq_create()
518 queue->vm = &fpriv->vm; in amdgpu_userq_create()
519 queue->priority = priority; in amdgpu_userq_create()
521 db_info.queue_type = queue->queue_type; in amdgpu_userq_create()
522 db_info.doorbell_handle = queue->doorbell_handle; in amdgpu_userq_create()
523 db_info.db_obj = &queue->db_obj; in amdgpu_userq_create()
524 db_info.doorbell_offset = args->in.doorbell_offset; in amdgpu_userq_create()
528 if (index == (uint64_t)-EINVAL) { in amdgpu_userq_create()
529 drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n"); in amdgpu_userq_create()
531 r = -EINVAL; in amdgpu_userq_create()
535 queue->doorbell_index = index; in amdgpu_userq_create()
536 xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC); in amdgpu_userq_create()
539 drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n"); in amdgpu_userq_create()
543 r = uq_funcs->mqd_create(uq_mgr, &args->in, queue); in amdgpu_userq_create()
545 drm_file_err(uq_mgr->file, "Failed to create Queue\n"); in amdgpu_userq_create()
552 qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL); in amdgpu_userq_create()
554 drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n"); in amdgpu_userq_create()
556 uq_funcs->mqd_destroy(uq_mgr, queue); in amdgpu_userq_create()
558 r = -ENOMEM; in amdgpu_userq_create()
562 /* don't map the queue if scheduling is halted */ in amdgpu_userq_create()
563 if (adev->userq_halt_for_enforce_isolation && in amdgpu_userq_create()
564 ((queue->queue_type == AMDGPU_HW_IP_GFX) || in amdgpu_userq_create()
565 (queue->queue_type == AMDGPU_HW_IP_COMPUTE))) in amdgpu_userq_create()
572 drm_file_err(uq_mgr->file, "Failed to map Queue\n"); in amdgpu_userq_create()
573 idr_remove(&uq_mgr->userq_idr, qid); in amdgpu_userq_create()
575 uq_funcs->mqd_destroy(uq_mgr, queue); in amdgpu_userq_create()
581 queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid); in amdgpu_userq_create()
583 r = -ENOMEM; in amdgpu_userq_create()
589 queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client); in amdgpu_userq_create()
590 debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops); in amdgpu_userq_create()
594 args->out.queue_id = qid; in amdgpu_userq_create()
597 mutex_unlock(&uq_mgr->userq_mutex); in amdgpu_userq_create()
598 mutex_unlock(&adev->userq_mutex); in amdgpu_userq_create()
609 switch (args->in.op) { in amdgpu_userq_input_args_validate()
611 if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK | in amdgpu_userq_input_args_validate()
613 return -EINVAL; in amdgpu_userq_input_args_validate()
615 if (args->in.ip_type != AMDGPU_HW_IP_GFX && in amdgpu_userq_input_args_validate()
616 args->in.ip_type != AMDGPU_HW_IP_DMA && in amdgpu_userq_input_args_validate()
617 args->in.ip_type != AMDGPU_HW_IP_COMPUTE) { in amdgpu_userq_input_args_validate()
619 args->in.ip_type); in amdgpu_userq_input_args_validate()
620 return -EINVAL; in amdgpu_userq_input_args_validate()
623 if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) && in amdgpu_userq_input_args_validate()
624 (args->in.ip_type != AMDGPU_HW_IP_GFX) && in amdgpu_userq_input_args_validate()
625 (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) && in amdgpu_userq_input_args_validate()
628 return -EINVAL; in amdgpu_userq_input_args_validate()
631 if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET || in amdgpu_userq_input_args_validate()
632 args->in.queue_va == 0 || in amdgpu_userq_input_args_validate()
633 args->in.queue_size == 0) { in amdgpu_userq_input_args_validate()
635 return -EINVAL; in amdgpu_userq_input_args_validate()
637 if (!args->in.wptr_va || !args->in.rptr_va) { in amdgpu_userq_input_args_validate()
639 return -EINVAL; in amdgpu_userq_input_args_validate()
643 if (args->in.ip_type || in amdgpu_userq_input_args_validate()
644 args->in.doorbell_handle || in amdgpu_userq_input_args_validate()
645 args->in.doorbell_offset || in amdgpu_userq_input_args_validate()
646 args->in.flags || in amdgpu_userq_input_args_validate()
647 args->in.queue_va || in amdgpu_userq_input_args_validate()
648 args->in.queue_size || in amdgpu_userq_input_args_validate()
649 args->in.rptr_va || in amdgpu_userq_input_args_validate()
650 args->in.wptr_va || in amdgpu_userq_input_args_validate()
651 args->in.mqd || in amdgpu_userq_input_args_validate()
652 args->in.mqd_size) in amdgpu_userq_input_args_validate()
653 return -EINVAL; in amdgpu_userq_input_args_validate()
656 return -EINVAL; in amdgpu_userq_input_args_validate()
669 return -EINVAL; in amdgpu_userq_ioctl()
671 switch (args->in.op) { in amdgpu_userq_ioctl()
679 r = amdgpu_userq_destroy(filp, args->in.queue_id); in amdgpu_userq_ioctl()
685 drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op); in amdgpu_userq_ioctl()
686 return -EINVAL; in amdgpu_userq_ioctl()
700 idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { in amdgpu_userq_restore_all()
707 drm_file_err(uq_mgr->file, "Failed to map all the queues\n"); in amdgpu_userq_restore_all()
715 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); in amdgpu_userq_validate_vm()
716 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_userq_validate_vm()
722 struct amdgpu_vm *vm) in amdgpu_userq_bo_validate() argument
729 spin_lock(&vm->status_lock); in amdgpu_userq_bo_validate()
730 while (!list_empty(&vm->invalidated)) { in amdgpu_userq_bo_validate()
731 bo_va = list_first_entry(&vm->invalidated, in amdgpu_userq_bo_validate()
734 spin_unlock(&vm->status_lock); in amdgpu_userq_bo_validate()
736 bo = bo_va->base.bo; in amdgpu_userq_bo_validate()
737 ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2); in amdgpu_userq_bo_validate()
741 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); in amdgpu_userq_bo_validate()
742 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_userq_bo_validate()
751 spin_lock(&vm->status_lock); in amdgpu_userq_bo_validate()
753 spin_unlock(&vm->status_lock); in amdgpu_userq_bo_validate()
758 /* Make sure the whole VM is ready to be used */
763 struct amdgpu_device *adev = uq_mgr->adev; in amdgpu_userq_vm_validate()
764 struct amdgpu_vm *vm = &fpriv->vm; in amdgpu_userq_vm_validate() local
771 ret = amdgpu_vm_lock_pd(vm, &exec, 1); in amdgpu_userq_vm_validate()
776 ret = amdgpu_vm_lock_done_list(vm, &exec, 1); in amdgpu_userq_vm_validate()
781 /* This validates PDs, PTs and per VM BOs */ in amdgpu_userq_vm_validate()
782 ret = amdgpu_vm_validate(adev, vm, NULL, in amdgpu_userq_vm_validate()
789 ret = amdgpu_userq_bo_validate(adev, &exec, vm); in amdgpu_userq_vm_validate()
795 ret = amdgpu_vm_handle_moved(adev, vm, NULL); in amdgpu_userq_vm_validate()
799 ret = amdgpu_vm_update_pdes(adev, vm, false); in amdgpu_userq_vm_validate()
804 * We need to wait for all VM updates to finish before restarting the in amdgpu_userq_vm_validate()
808 list_for_each_entry(bo_va, &vm->done, base.vm_status) in amdgpu_userq_vm_validate()
809 dma_fence_wait(bo_va->last_pt_update, false); in amdgpu_userq_vm_validate()
810 dma_fence_wait(vm->last_update, false); in amdgpu_userq_vm_validate()
812 ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec); in amdgpu_userq_vm_validate()
814 drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n"); in amdgpu_userq_vm_validate()
827 flush_delayed_work(&fpriv->evf_mgr.suspend_work); in amdgpu_userq_restore_worker()
829 mutex_lock(&uq_mgr->userq_mutex); in amdgpu_userq_restore_worker()
833 drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n"); in amdgpu_userq_restore_worker()
839 drm_file_err(uq_mgr->file, "Failed to restore all queues\n"); in amdgpu_userq_restore_worker()
844 mutex_unlock(&uq_mgr->userq_mutex); in amdgpu_userq_restore_worker()
855 idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { in amdgpu_userq_evict_all()
862 drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n"); in amdgpu_userq_evict_all()
872 idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { in amdgpu_userq_wait_for_signal()
873 struct dma_fence *f = queue->last_fence; in amdgpu_userq_wait_for_signal()
879 drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n", in amdgpu_userq_wait_for_signal()
880 f->context, f->seqno); in amdgpu_userq_wait_for_signal()
881 return -ETIMEDOUT; in amdgpu_userq_wait_for_signal()
894 struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr; in amdgpu_userq_evict()
899 drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n"); in amdgpu_userq_evict()
905 drm_file_err(uq_mgr->file, "Failed to evict userqueue\n"); in amdgpu_userq_evict()
912 if (evf_mgr->fd_closing) { in amdgpu_userq_evict()
913 cancel_delayed_work_sync(&uq_mgr->resume_work); in amdgpu_userq_evict()
918 schedule_delayed_work(&uq_mgr->resume_work, 0); in amdgpu_userq_evict()
924 mutex_init(&userq_mgr->userq_mutex); in amdgpu_userq_mgr_init()
925 idr_init_base(&userq_mgr->userq_idr, 1); in amdgpu_userq_mgr_init()
926 userq_mgr->adev = adev; in amdgpu_userq_mgr_init()
927 userq_mgr->file = file_priv; in amdgpu_userq_mgr_init()
929 mutex_lock(&adev->userq_mutex); in amdgpu_userq_mgr_init()
930 list_add(&userq_mgr->list, &adev->userq_mgr_list); in amdgpu_userq_mgr_init()
931 mutex_unlock(&adev->userq_mutex); in amdgpu_userq_mgr_init()
933 INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker); in amdgpu_userq_mgr_init()
939 struct amdgpu_device *adev = userq_mgr->adev; in amdgpu_userq_mgr_fini()
944 cancel_delayed_work_sync(&userq_mgr->resume_work); in amdgpu_userq_mgr_fini()
946 mutex_lock(&adev->userq_mutex); in amdgpu_userq_mgr_fini()
947 mutex_lock(&userq_mgr->userq_mutex); in amdgpu_userq_mgr_fini()
948 idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) { in amdgpu_userq_mgr_fini()
954 list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { in amdgpu_userq_mgr_fini()
956 list_del(&uqm->list); in amdgpu_userq_mgr_fini()
960 idr_destroy(&userq_mgr->userq_idr); in amdgpu_userq_mgr_fini()
961 mutex_unlock(&userq_mgr->userq_mutex); in amdgpu_userq_mgr_fini()
962 mutex_unlock(&adev->userq_mutex); in amdgpu_userq_mgr_fini()
963 mutex_destroy(&userq_mgr->userq_mutex); in amdgpu_userq_mgr_fini()
977 mutex_lock(&adev->userq_mutex); in amdgpu_userq_suspend()
978 list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { in amdgpu_userq_suspend()
979 cancel_delayed_work_sync(&uqm->resume_work); in amdgpu_userq_suspend()
980 mutex_lock(&uqm->userq_mutex); in amdgpu_userq_suspend()
981 idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { in amdgpu_userq_suspend()
982 if (adev->in_s0ix) in amdgpu_userq_suspend()
989 mutex_unlock(&uqm->userq_mutex); in amdgpu_userq_suspend()
991 mutex_unlock(&adev->userq_mutex); in amdgpu_userq_suspend()
1006 mutex_lock(&adev->userq_mutex); in amdgpu_userq_resume()
1007 list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { in amdgpu_userq_resume()
1008 mutex_lock(&uqm->userq_mutex); in amdgpu_userq_resume()
1009 idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { in amdgpu_userq_resume()
1010 if (adev->in_s0ix) in amdgpu_userq_resume()
1017 mutex_unlock(&uqm->userq_mutex); in amdgpu_userq_resume()
1019 mutex_unlock(&adev->userq_mutex); in amdgpu_userq_resume()
1036 mutex_lock(&adev->userq_mutex); in amdgpu_userq_stop_sched_for_enforce_isolation()
1037 if (adev->userq_halt_for_enforce_isolation) in amdgpu_userq_stop_sched_for_enforce_isolation()
1038 dev_warn(adev->dev, "userq scheduling already stopped!\n"); in amdgpu_userq_stop_sched_for_enforce_isolation()
1039 adev->userq_halt_for_enforce_isolation = true; in amdgpu_userq_stop_sched_for_enforce_isolation()
1040 list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { in amdgpu_userq_stop_sched_for_enforce_isolation()
1041 cancel_delayed_work_sync(&uqm->resume_work); in amdgpu_userq_stop_sched_for_enforce_isolation()
1042 mutex_lock(&uqm->userq_mutex); in amdgpu_userq_stop_sched_for_enforce_isolation()
1043 idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { in amdgpu_userq_stop_sched_for_enforce_isolation()
1044 if (((queue->queue_type == AMDGPU_HW_IP_GFX) || in amdgpu_userq_stop_sched_for_enforce_isolation()
1045 (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && in amdgpu_userq_stop_sched_for_enforce_isolation()
1046 (queue->xcp_id == idx)) { in amdgpu_userq_stop_sched_for_enforce_isolation()
1052 mutex_unlock(&uqm->userq_mutex); in amdgpu_userq_stop_sched_for_enforce_isolation()
1054 mutex_unlock(&adev->userq_mutex); in amdgpu_userq_stop_sched_for_enforce_isolation()
1071 mutex_lock(&adev->userq_mutex); in amdgpu_userq_start_sched_for_enforce_isolation()
1072 if (!adev->userq_halt_for_enforce_isolation) in amdgpu_userq_start_sched_for_enforce_isolation()
1073 dev_warn(adev->dev, "userq scheduling already started!\n"); in amdgpu_userq_start_sched_for_enforce_isolation()
1074 adev->userq_halt_for_enforce_isolation = false; in amdgpu_userq_start_sched_for_enforce_isolation()
1075 list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { in amdgpu_userq_start_sched_for_enforce_isolation()
1076 mutex_lock(&uqm->userq_mutex); in amdgpu_userq_start_sched_for_enforce_isolation()
1077 idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { in amdgpu_userq_start_sched_for_enforce_isolation()
1078 if (((queue->queue_type == AMDGPU_HW_IP_GFX) || in amdgpu_userq_start_sched_for_enforce_isolation()
1079 (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && in amdgpu_userq_start_sched_for_enforce_isolation()
1080 (queue->xcp_id == idx)) { in amdgpu_userq_start_sched_for_enforce_isolation()
1086 mutex_unlock(&uqm->userq_mutex); in amdgpu_userq_start_sched_for_enforce_isolation()
1088 mutex_unlock(&adev->userq_mutex); in amdgpu_userq_start_sched_for_enforce_isolation()