Lines Matching refs:xe

63 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
85 static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q) in alloc_dep_schedulers() argument
106 dep_scheduler = xe_dep_scheduler_create(xe, wq, q->name, in alloc_dep_schedulers()
118 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, in __xe_exec_queue_alloc() argument
163 err = alloc_dep_schedulers(xe, q); in __xe_exec_queue_alloc()
178 err = exec_queue_user_extensions(xe, q, extensions, 0); in __xe_exec_queue_alloc()
257 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, in xe_exec_queue_create() argument
266 …xe_assert(xe, !vm || (!!(vm->flags & XE_VM_FLAG_GSC) == !!(hwe->engine_id == XE_HW_ENGINE_GSCCS0))… in xe_exec_queue_create()
268 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags, in xe_exec_queue_create()
285 err = xe_pxp_exec_queue_add(xe->pxp, q); in xe_exec_queue_create()
300 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt, in xe_exec_queue_create_class() argument
323 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions); in xe_exec_queue_create_class()
342 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, in xe_exec_queue_create_bind() argument
351 if (xe->info.has_usm) { in xe_exec_queue_create_bind()
362 q = xe_exec_queue_create(xe, migrate_vm, in xe_exec_queue_create_bind()
366 q = xe_exec_queue_create_class(xe, gt, migrate_vm, in xe_exec_queue_create_bind()
467 xe_exec_queue_device_get_max_priority(struct xe_device *xe) in xe_exec_queue_device_get_max_priority() argument
473 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_priority() argument
476 if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH)) in exec_queue_set_priority()
479 if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe))) in exec_queue_set_priority()
538 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_timeslice() argument
555 exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value) in exec_queue_set_pxp_type() argument
561 if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM)) in exec_queue_set_pxp_type()
564 if (!xe_pxp_is_enabled(xe->pxp)) in exec_queue_set_pxp_type()
567 return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM); in exec_queue_set_pxp_type()
570 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
580 static int exec_queue_user_ext_set_property(struct xe_device *xe, in exec_queue_user_ext_set_property() argument
590 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_ext_set_property()
593 if (XE_IOCTL_DBG(xe, ext.property >= in exec_queue_user_ext_set_property()
595 XE_IOCTL_DBG(xe, ext.pad) || in exec_queue_user_ext_set_property()
596 XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY && in exec_queue_user_ext_set_property()
605 return exec_queue_set_property_funcs[idx](xe, q, ext.value); in exec_queue_user_ext_set_property()
608 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
617 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_user_extensions() argument
625 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS)) in exec_queue_user_extensions()
629 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_extensions()
632 if (XE_IOCTL_DBG(xe, ext.pad) || in exec_queue_user_extensions()
633 XE_IOCTL_DBG(xe, ext.name >= in exec_queue_user_extensions()
639 err = exec_queue_user_extension_funcs[idx](xe, q, extensions); in exec_queue_user_extensions()
640 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_extensions()
644 return exec_queue_user_extensions(xe, q, ext.next_extension, in exec_queue_user_extensions()
650 static u32 calc_validate_logical_mask(struct xe_device *xe, in calc_validate_logical_mask() argument
660 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) && in calc_validate_logical_mask()
672 hwe = xe_hw_engine_lookup(xe, eci[n]); in calc_validate_logical_mask()
673 if (XE_IOCTL_DBG(xe, !hwe)) in calc_validate_logical_mask()
676 if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe))) in calc_validate_logical_mask()
679 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) || in calc_validate_logical_mask()
680 XE_IOCTL_DBG(xe, n && eci[n].engine_class != class)) in calc_validate_logical_mask()
692 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1)) in calc_validate_logical_mask()
704 struct xe_device *xe = to_xe_device(dev); in xe_exec_queue_create_ioctl() local
720 if (XE_IOCTL_DBG(xe, args->flags & ~DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT) || in xe_exec_queue_create_ioctl()
721 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_create_ioctl()
725 if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE)) in xe_exec_queue_create_ioctl()
730 if (XE_IOCTL_DBG(xe, err)) in xe_exec_queue_create_ioctl()
733 if (XE_IOCTL_DBG(xe, !xe_device_get_gt(xe, eci[0].gt_id))) in xe_exec_queue_create_ioctl()
740 if (XE_IOCTL_DBG(xe, args->width != 1) || in xe_exec_queue_create_ioctl()
741 XE_IOCTL_DBG(xe, args->num_placements != 1) || in xe_exec_queue_create_ioctl()
742 XE_IOCTL_DBG(xe, eci[0].engine_instance != 0)) in xe_exec_queue_create_ioctl()
745 for_each_tile(tile, xe, id) { in xe_exec_queue_create_ioctl()
752 new = xe_exec_queue_create_bind(xe, tile, flags, in xe_exec_queue_create_ioctl()
767 logical_mask = calc_validate_logical_mask(xe, eci, in xe_exec_queue_create_ioctl()
770 if (XE_IOCTL_DBG(xe, !logical_mask)) in xe_exec_queue_create_ioctl()
773 hwe = xe_hw_engine_lookup(xe, eci[0]); in xe_exec_queue_create_ioctl()
774 if (XE_IOCTL_DBG(xe, !hwe)) in xe_exec_queue_create_ioctl()
778 if (XE_IOCTL_DBG(xe, !vm)) in xe_exec_queue_create_ioctl()
787 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { in xe_exec_queue_create_ioctl()
793 q = xe_exec_queue_create(xe, vm, logical_mask, in xe_exec_queue_create_ioctl()
805 if (XE_IOCTL_DBG(xe, err)) in xe_exec_queue_create_ioctl()
837 struct xe_device *xe = to_xe_device(dev); in xe_exec_queue_get_property_ioctl() local
843 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_get_property_ioctl()
847 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_get_property_ioctl()
933 struct xe_device *xe = gt_to_xe(q->gt); in xe_exec_queue_update_run_ticks() local
946 if (!drm_dev_enter(&xe->drm, &idx)) in xe_exec_queue_update_run_ticks()
989 struct xe_device *xe = to_xe_device(dev); in xe_exec_queue_destroy_ioctl() local
994 if (XE_IOCTL_DBG(xe, args->pad) || in xe_exec_queue_destroy_ioctl()
995 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_destroy_ioctl()
1004 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_destroy_ioctl()
1122 xe_assert(vm->xe, !dma_fence_is_container(fence)); in xe_exec_queue_last_fence_set()
1139 xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT || in xe_exec_queue_tlb_inval_last_fence_put()
1156 xe_assert(q->vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT || in xe_exec_queue_tlb_inval_last_fence_put_unlocked()
1180 xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT || in xe_exec_queue_tlb_inval_last_fence_get()
1182 xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM | in xe_exec_queue_tlb_inval_last_fence_get()
1212 xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT || in xe_exec_queue_tlb_inval_last_fence_set()
1214 xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM | in xe_exec_queue_tlb_inval_last_fence_set()
1216 xe_assert(vm->xe, !dma_fence_is_container(fence)); in xe_exec_queue_tlb_inval_last_fence_set()