Lines Matching +full:protect +full:- +full:exec
1 // SPDX-License-Identifier: MIT
41 if (q->vm) in __xe_exec_queue_free()
42 xe_vm_put(q->vm); in __xe_exec_queue_free()
44 if (q->xef) in __xe_exec_queue_free()
45 xe_file_put(q->xef); in __xe_exec_queue_free()
57 struct xe_gt *gt = hwe->gt; in __xe_exec_queue_alloc()
65 return ERR_PTR(-ENOMEM); in __xe_exec_queue_alloc()
67 kref_init(&q->refcount); in __xe_exec_queue_alloc()
68 q->flags = flags; in __xe_exec_queue_alloc()
69 q->hwe = hwe; in __xe_exec_queue_alloc()
70 q->gt = gt; in __xe_exec_queue_alloc()
71 q->class = hwe->class; in __xe_exec_queue_alloc()
72 q->width = width; in __xe_exec_queue_alloc()
73 q->msix_vec = XE_IRQ_DEFAULT_MSIX; in __xe_exec_queue_alloc()
74 q->logical_mask = logical_mask; in __xe_exec_queue_alloc()
75 q->fence_irq = >->fence_irq[hwe->class]; in __xe_exec_queue_alloc()
76 q->ring_ops = gt->ring_ops[hwe->class]; in __xe_exec_queue_alloc()
77 q->ops = gt->exec_queue_ops; in __xe_exec_queue_alloc()
78 INIT_LIST_HEAD(&q->lr.link); in __xe_exec_queue_alloc()
79 INIT_LIST_HEAD(&q->multi_gt_link); in __xe_exec_queue_alloc()
80 INIT_LIST_HEAD(&q->hw_engine_group_link); in __xe_exec_queue_alloc()
82 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; in __xe_exec_queue_alloc()
83 q->sched_props.preempt_timeout_us = in __xe_exec_queue_alloc()
84 hwe->eclass->sched_props.preempt_timeout_us; in __xe_exec_queue_alloc()
85 q->sched_props.job_timeout_ms = in __xe_exec_queue_alloc()
86 hwe->eclass->sched_props.job_timeout_ms; in __xe_exec_queue_alloc()
87 if (q->flags & EXEC_QUEUE_FLAG_KERNEL && in __xe_exec_queue_alloc()
88 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY) in __xe_exec_queue_alloc()
89 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; in __xe_exec_queue_alloc()
91 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; in __xe_exec_queue_alloc()
94 q->vm = xe_vm_get(vm); in __xe_exec_queue_alloc()
98 * may set q->usm, must come before xe_lrc_create(), in __xe_exec_queue_alloc()
99 * may overwrite q->sched_props, must come before q->ops->init() in __xe_exec_queue_alloc()
113 struct xe_vm *vm = q->vm; in __xe_exec_queue_init()
122 for (i = 0; i < q->width; ++i) { in __xe_exec_queue_init()
123 q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec); in __xe_exec_queue_init()
124 if (IS_ERR(q->lrc[i])) { in __xe_exec_queue_init()
125 err = PTR_ERR(q->lrc[i]); in __xe_exec_queue_init()
133 err = q->ops->init(q); in __xe_exec_queue_init()
143 for (i = i - 1; i >= 0; --i) in __xe_exec_queue_init()
144 xe_lrc_put(q->lrc[i]); in __xe_exec_queue_init()
185 if (hwe->class == class) { in xe_exec_queue_create_class()
186 logical_mask |= BIT(hwe->logical_instance); in xe_exec_queue_create_class()
193 return ERR_PTR(-ENODEV); in xe_exec_queue_create_class()
199 * xe_exec_queue_create_bind() - Create bind exec queue.
201 * @tile: tile which bind exec queue belongs to.
202 * @flags: exec queue creation flags
203 * @extensions: exec queue creation extensions
205 * Normalize bind exec queue creation. Bind exec queue is tied to migration VM
209 * resolve faults depend on user binds). On non-faulting devices any copy engine
212 * Returns exec queue on success, ERR_PTR on failure
218 struct xe_gt *gt = tile->primary_gt; in xe_exec_queue_create_bind()
222 migrate_vm = xe_migrate_get_vm(tile->migrate); in xe_exec_queue_create_bind()
223 if (xe->info.has_usm) { in xe_exec_queue_create_bind()
226 gt->usm.reserved_bcs_instance, in xe_exec_queue_create_bind()
231 return ERR_PTR(-EINVAL); in xe_exec_queue_create_bind()
235 BIT(hwe->logical_instance), 1, hwe, in xe_exec_queue_create_bind()
254 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) { in xe_exec_queue_destroy()
255 list_for_each_entry_safe(eq, next, &q->multi_gt_list, in xe_exec_queue_destroy()
260 q->ops->fini(q); in xe_exec_queue_destroy()
272 if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal)) in xe_exec_queue_fini()
273 wake_up_var(&q->xef->exec_queue.pending_removal); in xe_exec_queue_fini()
275 for (i = 0; i < q->width; ++i) in xe_exec_queue_fini()
276 xe_lrc_put(q->lrc[i]); in xe_exec_queue_fini()
283 switch (q->class) { in xe_exec_queue_assign_name()
285 snprintf(q->name, sizeof(q->name), "rcs%d", instance); in xe_exec_queue_assign_name()
288 snprintf(q->name, sizeof(q->name), "vcs%d", instance); in xe_exec_queue_assign_name()
291 snprintf(q->name, sizeof(q->name), "vecs%d", instance); in xe_exec_queue_assign_name()
294 snprintf(q->name, sizeof(q->name), "bcs%d", instance); in xe_exec_queue_assign_name()
297 snprintf(q->name, sizeof(q->name), "ccs%d", instance); in xe_exec_queue_assign_name()
300 snprintf(q->name, sizeof(q->name), "gsccs%d", instance); in xe_exec_queue_assign_name()
303 XE_WARN_ON(q->class); in xe_exec_queue_assign_name()
311 mutex_lock(&xef->exec_queue.lock); in xe_exec_queue_lookup()
312 q = xa_load(&xef->exec_queue.xa, id); in xe_exec_queue_lookup()
315 mutex_unlock(&xef->exec_queue.lock); in xe_exec_queue_lookup()
331 return -EINVAL; in exec_queue_set_priority()
334 return -EPERM; in exec_queue_set_priority()
336 q->sched_props.priority = value; in exec_queue_set_priority()
356 *min = eclass->sched_props.job_timeout_min; in xe_exec_queue_get_prop_minmax()
357 *max = eclass->sched_props.job_timeout_max; in xe_exec_queue_get_prop_minmax()
360 *min = eclass->sched_props.timeslice_min; in xe_exec_queue_get_prop_minmax()
361 *max = eclass->sched_props.timeslice_max; in xe_exec_queue_get_prop_minmax()
364 *min = eclass->sched_props.preempt_timeout_min; in xe_exec_queue_get_prop_minmax()
365 *max = eclass->sched_props.preempt_timeout_max; in xe_exec_queue_get_prop_minmax()
397 xe_exec_queue_get_prop_minmax(q->hwe->eclass, in exec_queue_set_timeslice()
402 return -EINVAL; in exec_queue_set_timeslice()
404 q->sched_props.timeslice_us = value; in exec_queue_set_timeslice()
428 return -EFAULT; in exec_queue_user_ext_set_property()
435 return -EINVAL; in exec_queue_user_ext_set_property()
439 return -EINVAL; in exec_queue_user_ext_set_property()
462 return -E2BIG; in exec_queue_user_extensions()
466 return -EFAULT; in exec_queue_user_extensions()
471 return -EINVAL; in exec_queue_user_extensions()
545 u64_to_user_ptr(args->instances); in xe_exec_queue_create_ioctl()
556 if (XE_IOCTL_DBG(xe, args->flags) || in xe_exec_queue_create_ioctl()
557 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_create_ioctl()
558 return -EINVAL; in xe_exec_queue_create_ioctl()
560 len = args->width * args->num_placements; in xe_exec_queue_create_ioctl()
562 return -EINVAL; in xe_exec_queue_create_ioctl()
568 return -EFAULT; in xe_exec_queue_create_ioctl()
570 if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count)) in xe_exec_queue_create_ioctl()
571 return -EINVAL; in xe_exec_queue_create_ioctl()
574 if (XE_IOCTL_DBG(xe, args->width != 1) || in xe_exec_queue_create_ioctl()
575 XE_IOCTL_DBG(xe, args->num_placements != 1) || in xe_exec_queue_create_ioctl()
577 return -EINVAL; in xe_exec_queue_create_ioctl()
587 args->extensions); in xe_exec_queue_create_ioctl()
597 list_add_tail(&new->multi_gt_list, in xe_exec_queue_create_ioctl()
598 &q->multi_gt_link); in xe_exec_queue_create_ioctl()
603 args->width, in xe_exec_queue_create_ioctl()
604 args->num_placements); in xe_exec_queue_create_ioctl()
606 return -EINVAL; in xe_exec_queue_create_ioctl()
610 return -EINVAL; in xe_exec_queue_create_ioctl()
612 vm = xe_vm_lookup(xef, args->vm_id); in xe_exec_queue_create_ioctl()
614 return -ENOENT; in xe_exec_queue_create_ioctl()
616 err = down_read_interruptible(&vm->lock); in xe_exec_queue_create_ioctl()
623 up_read(&vm->lock); in xe_exec_queue_create_ioctl()
625 return -ENOENT; in xe_exec_queue_create_ioctl()
629 args->width, hwe, 0, in xe_exec_queue_create_ioctl()
630 args->extensions); in xe_exec_queue_create_ioctl()
631 up_read(&vm->lock); in xe_exec_queue_create_ioctl()
637 q->lr.context = dma_fence_context_alloc(1); in xe_exec_queue_create_ioctl()
644 if (q->vm && q->hwe->hw_engine_group) { in xe_exec_queue_create_ioctl()
645 err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q); in xe_exec_queue_create_ioctl()
651 q->xef = xe_file_get(xef); in xe_exec_queue_create_ioctl()
654 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); in xe_exec_queue_create_ioctl()
658 args->exec_queue_id = id; in xe_exec_queue_create_ioctl()
678 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_get_property_ioctl()
679 return -EINVAL; in xe_exec_queue_get_property_ioctl()
681 q = xe_exec_queue_lookup(xef, args->exec_queue_id); in xe_exec_queue_get_property_ioctl()
683 return -ENOENT; in xe_exec_queue_get_property_ioctl()
685 switch (args->property) { in xe_exec_queue_get_property_ioctl()
687 args->value = q->ops->reset_status(q); in xe_exec_queue_get_property_ioctl()
691 ret = -EINVAL; in xe_exec_queue_get_property_ioctl()
700 * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
703 * Return: True if the exec_queue is long-running, false otherwise.
707 return q->vm && xe_vm_in_lr_mode(q->vm) && in xe_exec_queue_is_lr()
708 !(q->flags & EXEC_QUEUE_FLAG_VM); in xe_exec_queue_is_lr()
713 return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1; in xe_exec_queue_num_job_inflight()
717 * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
724 struct xe_lrc *lrc = q->lrc[0]; in xe_exec_queue_ring_full()
725 s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES; in xe_exec_queue_ring_full()
731 * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
734 * FIXME: Need to determine what to use as the short-lived
737 * snapshot in time. The timeline lock must protect the
739 * Typically vm->resv, but user-created timeline locks use the migrate vm
740 * and never grabs the migrate vm->resv so we have a race there.
749 for (i = 0; i < q->width; ++i) { in xe_exec_queue_is_idle()
750 if (xe_lrc_seqno(q->lrc[i]) != in xe_exec_queue_is_idle()
751 q->lrc[i]->fence_ctx.next_seqno - 1) in xe_exec_queue_is_idle()
758 return xe_lrc_seqno(q->lrc[0]) == in xe_exec_queue_is_idle()
759 q->lrc[0]->fence_ctx.next_seqno - 1; in xe_exec_queue_is_idle()
763 * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue
765 * @q: The exec queue
767 * Update the timestamp saved by HW for this exec queue and save run ticks
772 struct xe_device *xe = gt_to_xe(q->gt); in xe_exec_queue_update_run_ticks()
781 if (!q->xef) in xe_exec_queue_update_run_ticks()
785 if (!drm_dev_enter(&xe->drm, &idx)) in xe_exec_queue_update_run_ticks()
790 * width - this may introduce errors if that premise is not true and in xe_exec_queue_update_run_ticks()
795 lrc = q->lrc[0]; in xe_exec_queue_update_run_ticks()
797 q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width; in xe_exec_queue_update_run_ticks()
803 * xe_exec_queue_kill - permanently stop all execution from an exec queue
804 * @q: The exec queue
806 * This function permanently stops all activity on an exec queue. If the queue
815 list_for_each_entry_safe(eq, next, &eq->multi_gt_list, in xe_exec_queue_kill()
817 q->ops->kill(eq); in xe_exec_queue_kill()
818 xe_vm_remove_compute_exec_queue(q->vm, eq); in xe_exec_queue_kill()
821 q->ops->kill(q); in xe_exec_queue_kill()
822 xe_vm_remove_compute_exec_queue(q->vm, q); in xe_exec_queue_kill()
833 if (XE_IOCTL_DBG(xe, args->pad) || in xe_exec_queue_destroy_ioctl()
834 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_destroy_ioctl()
835 return -EINVAL; in xe_exec_queue_destroy_ioctl()
837 mutex_lock(&xef->exec_queue.lock); in xe_exec_queue_destroy_ioctl()
838 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id); in xe_exec_queue_destroy_ioctl()
840 atomic_inc(&xef->exec_queue.pending_removal); in xe_exec_queue_destroy_ioctl()
841 mutex_unlock(&xef->exec_queue.lock); in xe_exec_queue_destroy_ioctl()
844 return -ENOENT; in xe_exec_queue_destroy_ioctl()
846 if (q->vm && q->hwe->hw_engine_group) in xe_exec_queue_destroy_ioctl()
847 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q); in xe_exec_queue_destroy_ioctl()
860 if (q->flags & EXEC_QUEUE_FLAG_VM) { in xe_exec_queue_last_fence_lockdep_assert()
861 lockdep_assert_held(&vm->lock); in xe_exec_queue_last_fence_lockdep_assert()
864 lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem); in xe_exec_queue_last_fence_lockdep_assert()
869 * xe_exec_queue_last_fence_put() - Drop ref to last fence
870 * @q: The exec queue
871 * @vm: The VM the engine does a bind or exec for
881 * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
882 * @q: The exec queue
888 if (q->last_fence) { in xe_exec_queue_last_fence_put_unlocked()
889 dma_fence_put(q->last_fence); in xe_exec_queue_last_fence_put_unlocked()
890 q->last_fence = NULL; in xe_exec_queue_last_fence_put_unlocked()
895 * xe_exec_queue_last_fence_get() - Get last fence
896 * @q: The exec queue
897 * @vm: The VM the engine does a bind or exec for
910 if (q->last_fence && in xe_exec_queue_last_fence_get()
911 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) in xe_exec_queue_last_fence_get()
914 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); in xe_exec_queue_last_fence_get()
920 * xe_exec_queue_last_fence_get_for_resume() - Get last fence
921 * @q: The exec queue
922 * @vm: The VM the engine does a bind or exec for
925 * resuming the hw engine group's long-running exec queue, when the group
935 lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem); in xe_exec_queue_last_fence_get_for_resume()
937 if (q->last_fence && in xe_exec_queue_last_fence_get_for_resume()
938 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) in xe_exec_queue_last_fence_get_for_resume()
941 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); in xe_exec_queue_last_fence_get_for_resume()
947 * xe_exec_queue_last_fence_set() - Set last fence
948 * @q: The exec queue
949 * @vm: The VM the engine does a bind or exec for
961 q->last_fence = dma_fence_get(fence); in xe_exec_queue_last_fence_set()
965 * xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue
966 * @q: The exec queue
967 * @vm: The VM the engine does a bind or exec for
970 * -ETIME if there exists an unsignalled last fence dependency, zero otherwise.
979 err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ? in xe_exec_queue_last_fence_test_dep()
980 0 : -ETIME; in xe_exec_queue_last_fence_test_dep()