Lines Matching +full:0 +full:xe
31 XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
37 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
53 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, in __xe_exec_queue_alloc() argument
105 err = exec_queue_user_extensions(xe, q, extensions, 0); in __xe_exec_queue_alloc()
118 u32 flags = 0; in __xe_exec_queue_init()
134 for (i = 0; i < q->width; ++i) { in __xe_exec_queue_init()
146 return 0; in __xe_exec_queue_init()
149 for (i = i - 1; i >= 0; --i) in __xe_exec_queue_init()
154 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, in xe_exec_queue_create() argument
163 …xe_assert(xe, !vm || (!!(vm->flags & XE_VM_FLAG_GSC) == !!(hwe->engine_id == XE_HW_ENGINE_GSCCS0))… in xe_exec_queue_create()
165 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags, in xe_exec_queue_create()
182 err = xe_pxp_exec_queue_add(xe->pxp, q); in xe_exec_queue_create()
195 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt, in xe_exec_queue_create_class() argument
202 u32 logical_mask = 0; in xe_exec_queue_create_class()
218 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions); in xe_exec_queue_create_class()
223 * @xe: Xe device.
237 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, in xe_exec_queue_create_bind() argument
246 if (xe->info.has_usm) { in xe_exec_queue_create_bind()
257 q = xe_exec_queue_create(xe, migrate_vm, in xe_exec_queue_create_bind()
261 q = xe_exec_queue_create_class(xe, gt, migrate_vm, in xe_exec_queue_create_bind()
301 for (i = 0; i < q->width; ++i) in xe_exec_queue_fini()
347 xe_exec_queue_device_get_max_priority(struct xe_device *xe) in xe_exec_queue_device_get_max_priority() argument
353 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_priority() argument
356 if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH)) in exec_queue_set_priority()
359 if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe))) in exec_queue_set_priority()
363 return 0; in exec_queue_set_priority()
418 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_timeslice() argument
421 u32 min = 0, max = 0; in exec_queue_set_timeslice()
431 return 0; in exec_queue_set_timeslice()
435 exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value) in exec_queue_set_pxp_type() argument
438 return 0; in exec_queue_set_pxp_type()
441 if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM)) in exec_queue_set_pxp_type()
444 if (!xe_pxp_is_enabled(xe->pxp)) in exec_queue_set_pxp_type()
447 return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM); in exec_queue_set_pxp_type()
450 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
460 static int exec_queue_user_ext_set_property(struct xe_device *xe, in exec_queue_user_ext_set_property() argument
470 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_ext_set_property()
473 if (XE_IOCTL_DBG(xe, ext.property >= in exec_queue_user_ext_set_property()
475 XE_IOCTL_DBG(xe, ext.pad) || in exec_queue_user_ext_set_property()
476 XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY && in exec_queue_user_ext_set_property()
485 return exec_queue_set_property_funcs[idx](xe, q, ext.value); in exec_queue_user_ext_set_property()
488 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
497 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_user_extensions() argument
505 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS)) in exec_queue_user_extensions()
509 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_extensions()
512 if (XE_IOCTL_DBG(xe, ext.pad) || in exec_queue_user_extensions()
513 XE_IOCTL_DBG(xe, ext.name >= in exec_queue_user_extensions()
519 err = exec_queue_user_extension_funcs[idx](xe, q, extensions); in exec_queue_user_extensions()
520 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_extensions()
524 return exec_queue_user_extensions(xe, q, ext.next_extension, in exec_queue_user_extensions()
527 return 0; in exec_queue_user_extensions()
530 static u32 calc_validate_logical_mask(struct xe_device *xe, in calc_validate_logical_mask() argument
538 u32 return_mask = 0, prev_mask; in calc_validate_logical_mask()
540 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) && in calc_validate_logical_mask()
542 return 0; in calc_validate_logical_mask()
544 for (i = 0; i < width; ++i) { in calc_validate_logical_mask()
545 u32 current_mask = 0; in calc_validate_logical_mask()
547 for (j = 0; j < num_placements; ++j) { in calc_validate_logical_mask()
552 hwe = xe_hw_engine_lookup(xe, eci[n]); in calc_validate_logical_mask()
553 if (XE_IOCTL_DBG(xe, !hwe)) in calc_validate_logical_mask()
554 return 0; in calc_validate_logical_mask()
556 if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe))) in calc_validate_logical_mask()
557 return 0; in calc_validate_logical_mask()
559 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) || in calc_validate_logical_mask()
560 XE_IOCTL_DBG(xe, n && eci[n].engine_class != class)) in calc_validate_logical_mask()
561 return 0; in calc_validate_logical_mask()
572 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1)) in calc_validate_logical_mask()
573 return 0; in calc_validate_logical_mask()
584 struct xe_device *xe = to_xe_device(dev); in xe_exec_queue_create_ioctl() local
595 u32 flags = 0; in xe_exec_queue_create_ioctl()
600 if (XE_IOCTL_DBG(xe, args->flags & ~DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT) || in xe_exec_queue_create_ioctl()
601 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_create_ioctl()
605 if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE)) in xe_exec_queue_create_ioctl()
610 if (XE_IOCTL_DBG(xe, err)) in xe_exec_queue_create_ioctl()
613 if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count)) in xe_exec_queue_create_ioctl()
619 if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) { in xe_exec_queue_create_ioctl()
620 if (XE_IOCTL_DBG(xe, args->width != 1) || in xe_exec_queue_create_ioctl()
621 XE_IOCTL_DBG(xe, args->num_placements != 1) || in xe_exec_queue_create_ioctl()
622 XE_IOCTL_DBG(xe, eci[0].engine_instance != 0)) in xe_exec_queue_create_ioctl()
625 for_each_tile(tile, xe, id) { in xe_exec_queue_create_ioctl()
632 new = xe_exec_queue_create_bind(xe, tile, flags, in xe_exec_queue_create_ioctl()
640 if (id == 0) in xe_exec_queue_create_ioctl()
647 logical_mask = calc_validate_logical_mask(xe, eci, in xe_exec_queue_create_ioctl()
650 if (XE_IOCTL_DBG(xe, !logical_mask)) in xe_exec_queue_create_ioctl()
653 hwe = xe_hw_engine_lookup(xe, eci[0]); in xe_exec_queue_create_ioctl()
654 if (XE_IOCTL_DBG(xe, !hwe)) in xe_exec_queue_create_ioctl()
658 if (XE_IOCTL_DBG(xe, !vm)) in xe_exec_queue_create_ioctl()
667 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { in xe_exec_queue_create_ioctl()
673 q = xe_exec_queue_create(xe, vm, logical_mask, in xe_exec_queue_create_ioctl()
685 if (XE_IOCTL_DBG(xe, err)) in xe_exec_queue_create_ioctl()
705 return 0; in xe_exec_queue_create_ioctl()
717 struct xe_device *xe = to_xe_device(dev); in xe_exec_queue_get_property_ioctl() local
723 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_get_property_ioctl()
727 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_get_property_ioctl()
733 ret = 0; in xe_exec_queue_get_property_ioctl()
758 return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1; in xe_exec_queue_num_job_inflight()
769 struct xe_lrc *lrc = q->lrc[0]; in xe_exec_queue_ring_full()
794 for (i = 0; i < q->width; ++i) { in xe_exec_queue_is_idle()
803 return xe_lrc_seqno(q->lrc[0]) == in xe_exec_queue_is_idle()
804 q->lrc[0]->fence_ctx.next_seqno - 1; in xe_exec_queue_is_idle()
817 struct xe_device *xe = gt_to_xe(q->gt); in xe_exec_queue_update_run_ticks() local
829 /* Synchronize with unbind while holding the xe file open */ in xe_exec_queue_update_run_ticks()
830 if (!drm_dev_enter(&xe->drm, &idx)) in xe_exec_queue_update_run_ticks()
840 lrc = q->lrc[0]; in xe_exec_queue_update_run_ticks()
873 struct xe_device *xe = to_xe_device(dev); in xe_exec_queue_destroy_ioctl() local
878 if (XE_IOCTL_DBG(xe, args->pad) || in xe_exec_queue_destroy_ioctl()
879 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_destroy_ioctl()
888 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_destroy_ioctl()
899 return 0; in xe_exec_queue_destroy_ioctl()
1020 int err = 0; in xe_exec_queue_last_fence_test_dep()
1025 0 : -ETIME; in xe_exec_queue_last_fence_test_dep()