Lines Matching +full:0 +full:xe
104 XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
110 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
145 for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i) in __xe_exec_queue_free()
167 static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q) in alloc_dep_schedulers() argument
172 for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i) { in alloc_dep_schedulers()
188 dep_scheduler = xe_dep_scheduler_create(xe, wq, q->name, in alloc_dep_schedulers()
197 return 0; in alloc_dep_schedulers()
200 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, in __xe_exec_queue_alloc() argument
249 err = alloc_dep_schedulers(xe, q); in __xe_exec_queue_alloc()
264 err = exec_queue_user_extensions(xe, q, extensions); in __xe_exec_queue_alloc()
321 return q->lrc[0]; in xe_exec_queue_lrc()
330 for (i = 0; i < q->width; ++i) in __xe_exec_queue_fini()
337 u32 flags = 0; in __xe_exec_queue_init()
373 for (i = 0; i < q->width; ++i) { in __xe_exec_queue_init()
398 return 0; in __xe_exec_queue_init()
407 * @xe: Xe device
419 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, in xe_exec_queue_create() argument
428 …xe_assert(xe, !vm || (!!(vm->flags & XE_VM_FLAG_GSC) == !!(hwe->engine_id == XE_HW_ENGINE_GSCCS0))… in xe_exec_queue_create()
430 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags, in xe_exec_queue_create()
447 err = xe_pxp_exec_queue_add(xe->pxp, q); in xe_exec_queue_create()
464 * @xe: Xe device
475 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt, in xe_exec_queue_create_class() argument
482 u32 logical_mask = 0; in xe_exec_queue_create_class()
498 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions); in xe_exec_queue_create_class()
503 * @xe: Xe device.
518 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, in xe_exec_queue_create_bind() argument
528 if (xe->info.has_usm) { in xe_exec_queue_create_bind()
539 q = xe_exec_queue_create(xe, migrate_vm, in xe_exec_queue_create_bind()
543 q = xe_exec_queue_create_class(xe, gt, migrate_vm, in xe_exec_queue_create_bind()
580 xe_assert(gt_to_xe(q->gt), atomic_read(&q->job_cnt) == 0); in xe_exec_queue_destroy()
664 * @xef: Xe file private data
686 * @xe: Xe device
693 xe_exec_queue_device_get_max_priority(struct xe_device *xe) in xe_exec_queue_device_get_max_priority() argument
699 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_priority() argument
702 if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH)) in exec_queue_set_priority()
705 if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe))) in exec_queue_set_priority()
709 return 0; in exec_queue_set_priority()
764 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_timeslice() argument
767 u32 min = 0, max = 0; in exec_queue_set_timeslice()
777 return 0; in exec_queue_set_timeslice()
781 exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value) in exec_queue_set_pxp_type() argument
784 return 0; in exec_queue_set_pxp_type()
787 if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM)) in exec_queue_set_pxp_type()
790 if (!xe_pxp_is_enabled(xe->pxp)) in exec_queue_set_pxp_type()
793 return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM); in exec_queue_set_pxp_type()
796 static int exec_queue_set_hang_replay_state(struct xe_device *xe, in exec_queue_set_hang_replay_state() argument
805 if (XE_IOCTL_DBG(xe, IS_ERR(ptr))) in exec_queue_set_hang_replay_state()
810 return 0; in exec_queue_set_hang_replay_state()
813 static int xe_exec_queue_group_init(struct xe_device *xe, struct xe_exec_queue *q) in xe_exec_queue_group_init() argument
823 bo = xe_bo_create_pin_map_novm(xe, tile, SZ_4K, ttm_bo_type_kernel, in xe_exec_queue_group_init()
830 drm_err(&xe->drm, "CGP bo allocation for queue group failed: %ld\n", in xe_exec_queue_group_init()
836 xe_map_memset(xe, &bo->vmap, 0, 0, SZ_4K); in xe_exec_queue_group_init()
852 return 0; in xe_exec_queue_group_init()
860 static int xe_exec_queue_group_validate(struct xe_device *xe, struct xe_exec_queue *q, in xe_exec_queue_group_validate() argument
872 if (XE_IOCTL_DBG(xe, !primary)) in xe_exec_queue_group_validate()
875 if (XE_IOCTL_DBG(xe, !xe_exec_queue_is_multi_queue_primary(primary)) || in xe_exec_queue_group_validate()
876 XE_IOCTL_DBG(xe, q->vm != primary->vm) || in xe_exec_queue_group_validate()
877 XE_IOCTL_DBG(xe, q->logical_mask != primary->logical_mask)) { in xe_exec_queue_group_validate()
886 return 0; in xe_exec_queue_group_validate()
893 static int xe_exec_queue_group_add(struct xe_device *xe, struct xe_exec_queue *q) in xe_exec_queue_group_add() argument
899 xe_assert(xe, xe_exec_queue_is_multi_queue_secondary(q)); in xe_exec_queue_group_add()
902 err = xa_alloc(&group->xa, &pos, xe_lrc_get(q->lrc[0]), in xe_exec_queue_group_add()
904 if (XE_IOCTL_DBG(xe, err)) { in xe_exec_queue_group_add()
905 xe_lrc_put(q->lrc[0]); in xe_exec_queue_group_add()
916 return 0; in xe_exec_queue_group_add()
919 static void xe_exec_queue_group_delete(struct xe_device *xe, struct xe_exec_queue *q) in xe_exec_queue_group_delete() argument
924 xe_assert(xe, xe_exec_queue_is_multi_queue_secondary(q)); in xe_exec_queue_group_delete()
927 xe_assert(xe, lrc); in xe_exec_queue_group_delete()
931 static int exec_queue_set_multi_group(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_multi_group() argument
934 if (XE_IOCTL_DBG(xe, !xe_exec_queue_supports_multi_queue(q))) in exec_queue_set_multi_group()
937 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe))) in exec_queue_set_multi_group()
940 if (XE_IOCTL_DBG(xe, !q->vm->xef)) in exec_queue_set_multi_group()
943 if (XE_IOCTL_DBG(xe, xe_exec_queue_is_parallel(q))) in exec_queue_set_multi_group()
946 if (XE_IOCTL_DBG(xe, xe_exec_queue_is_multi_queue(q))) in exec_queue_set_multi_group()
950 if (XE_IOCTL_DBG(xe, value & ~DRM_XE_MULTI_GROUP_CREATE)) in exec_queue_set_multi_group()
955 q->multi_queue.pos = 0; in exec_queue_set_multi_group()
956 return 0; in exec_queue_set_multi_group()
959 /* While adding secondary queues, the upper 32 bits must be 0 */ in exec_queue_set_multi_group()
960 if (XE_IOCTL_DBG(xe, value & (~0ull << 32))) in exec_queue_set_multi_group()
963 return xe_exec_queue_group_validate(xe, q, value); in exec_queue_set_multi_group()
966 static int exec_queue_set_multi_queue_priority(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_multi_queue_priority() argument
969 if (XE_IOCTL_DBG(xe, value > XE_MULTI_QUEUE_PRIORITY_HIGH)) in exec_queue_set_multi_queue_priority()
975 return 0; in exec_queue_set_multi_queue_priority()
984 static int exec_queue_set_state_cache_perf_fix(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_state_cache_perf_fix() argument
987 if (XE_IOCTL_DBG(xe, q->class != XE_ENGINE_CLASS_RENDER)) in exec_queue_set_state_cache_perf_fix()
990 q->flags |= value != 0 ? EXEC_QUEUE_FLAG_DISABLE_STATE_CACHE_PERF_FIX : 0; in exec_queue_set_state_cache_perf_fix()
992 return 0; in exec_queue_set_state_cache_perf_fix()
995 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
1020 * Return: 0 on success, negative error code on failure
1025 struct xe_device *xe = to_xe_device(dev); in xe_exec_queue_set_property_ioctl() local
1032 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_set_property_ioctl()
1035 if (XE_IOCTL_DBG(xe, args->property != in xe_exec_queue_set_property_ioctl()
1040 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_set_property_ioctl()
1045 ret = exec_queue_set_property_funcs[idx](xe, q, args->value); in xe_exec_queue_set_property_ioctl()
1046 if (XE_IOCTL_DBG(xe, ret)) in xe_exec_queue_set_property_ioctl()
1050 return 0; in xe_exec_queue_set_property_ioctl()
1070 return 0; in exec_queue_user_ext_check()
1080 return 0; in exec_queue_user_ext_check_final()
1083 static int exec_queue_user_ext_set_property(struct xe_device *xe, in exec_queue_user_ext_set_property() argument
1093 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_ext_set_property()
1096 if (XE_IOCTL_DBG(xe, ext.property >= in exec_queue_user_ext_set_property()
1098 XE_IOCTL_DBG(xe, ext.pad) || in exec_queue_user_ext_set_property()
1099 XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY && in exec_queue_user_ext_set_property()
1114 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_ext_set_property()
1117 return exec_queue_set_property_funcs[idx](xe, q, ext.value); in exec_queue_user_ext_set_property()
1120 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
1129 static int __exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, in __exec_queue_user_extensions() argument
1137 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS)) in __exec_queue_user_extensions()
1141 if (XE_IOCTL_DBG(xe, err)) in __exec_queue_user_extensions()
1144 if (XE_IOCTL_DBG(xe, ext.pad) || in __exec_queue_user_extensions()
1145 XE_IOCTL_DBG(xe, ext.name >= in __exec_queue_user_extensions()
1151 err = exec_queue_user_extension_funcs[idx](xe, q, extensions, properties); in __exec_queue_user_extensions()
1152 if (XE_IOCTL_DBG(xe, err)) in __exec_queue_user_extensions()
1156 return __exec_queue_user_extensions(xe, q, ext.next_extension, in __exec_queue_user_extensions()
1159 return 0; in __exec_queue_user_extensions()
1162 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_user_extensions() argument
1165 u64 properties = 0; in exec_queue_user_extensions()
1168 err = __exec_queue_user_extensions(xe, q, extensions, 0, &properties); in exec_queue_user_extensions()
1169 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_extensions()
1173 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_extensions()
1177 err = xe_exec_queue_group_init(xe, q); in exec_queue_user_extensions()
1178 if (XE_IOCTL_DBG(xe, err)) in exec_queue_user_extensions()
1182 return 0; in exec_queue_user_extensions()
1185 static u32 calc_validate_logical_mask(struct xe_device *xe, in calc_validate_logical_mask() argument
1193 u32 return_mask = 0, prev_mask; in calc_validate_logical_mask()
1195 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) && in calc_validate_logical_mask()
1197 return 0; in calc_validate_logical_mask()
1199 for (i = 0; i < width; ++i) { in calc_validate_logical_mask()
1200 u32 current_mask = 0; in calc_validate_logical_mask()
1202 for (j = 0; j < num_placements; ++j) { in calc_validate_logical_mask()
1207 hwe = xe_hw_engine_lookup(xe, eci[n]); in calc_validate_logical_mask()
1208 if (XE_IOCTL_DBG(xe, !hwe)) in calc_validate_logical_mask()
1209 return 0; in calc_validate_logical_mask()
1211 if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe))) in calc_validate_logical_mask()
1212 return 0; in calc_validate_logical_mask()
1214 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) || in calc_validate_logical_mask()
1215 XE_IOCTL_DBG(xe, n && eci[n].engine_class != class)) in calc_validate_logical_mask()
1216 return 0; in calc_validate_logical_mask()
1227 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1)) in calc_validate_logical_mask()
1228 return 0; in calc_validate_logical_mask()
1257 * Return: 0 on success with exec_queue_id filled in, negative error code on failure
1262 struct xe_device *xe = to_xe_device(dev); in xe_exec_queue_create_ioctl() local
1273 u32 flags = 0; in xe_exec_queue_create_ioctl()
1278 if (XE_IOCTL_DBG(xe, args->flags & ~DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT) || in xe_exec_queue_create_ioctl()
1279 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_create_ioctl()
1283 if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE)) in xe_exec_queue_create_ioctl()
1288 if (XE_IOCTL_DBG(xe, err)) in xe_exec_queue_create_ioctl()
1291 if (XE_IOCTL_DBG(xe, !xe_device_get_gt(xe, eci[0].gt_id))) in xe_exec_queue_create_ioctl()
1297 if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) { in xe_exec_queue_create_ioctl()
1298 if (XE_IOCTL_DBG(xe, args->width != 1) || in xe_exec_queue_create_ioctl()
1299 XE_IOCTL_DBG(xe, args->num_placements != 1) || in xe_exec_queue_create_ioctl()
1300 XE_IOCTL_DBG(xe, eci[0].engine_instance != 0)) in xe_exec_queue_create_ioctl()
1304 if (XE_IOCTL_DBG(xe, !vm)) in xe_exec_queue_create_ioctl()
1313 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { in xe_exec_queue_create_ioctl()
1319 for_each_tile(tile, xe, id) { in xe_exec_queue_create_ioctl()
1326 new = xe_exec_queue_create_bind(xe, tile, vm, flags, in xe_exec_queue_create_ioctl()
1336 if (id == 0) in xe_exec_queue_create_ioctl()
1345 logical_mask = calc_validate_logical_mask(xe, eci, in xe_exec_queue_create_ioctl()
1348 if (XE_IOCTL_DBG(xe, !logical_mask)) in xe_exec_queue_create_ioctl()
1351 hwe = xe_hw_engine_lookup(xe, eci[0]); in xe_exec_queue_create_ioctl()
1352 if (XE_IOCTL_DBG(xe, !hwe)) in xe_exec_queue_create_ioctl()
1356 if (XE_IOCTL_DBG(xe, args->width > 1 && in xe_exec_queue_create_ioctl()
1357 !(xe->info.multi_lrc_mask & BIT(hwe->class)))) in xe_exec_queue_create_ioctl()
1361 if (XE_IOCTL_DBG(xe, !vm)) in xe_exec_queue_create_ioctl()
1370 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { in xe_exec_queue_create_ioctl()
1377 if (XE_IOCTL_DBG(xe, args->width > 1 && has_sched_groups(hwe->gt))) { in xe_exec_queue_create_ioctl()
1383 q = xe_exec_queue_create(xe, vm, logical_mask, in xe_exec_queue_create_ioctl()
1392 err = xe_exec_queue_group_add(xe, q); in xe_exec_queue_create_ioctl()
1393 if (XE_IOCTL_DBG(xe, err)) in xe_exec_queue_create_ioctl()
1401 if (XE_IOCTL_DBG(xe, err)) in xe_exec_queue_create_ioctl()
1413 if (eci[0].engine_class != DRM_XE_ENGINE_CLASS_VM_BIND) in xe_exec_queue_create_ioctl()
1423 return 0; in xe_exec_queue_create_ioctl()
1432 xe_exec_queue_group_delete(xe, q); in xe_exec_queue_create_ioctl()
1447 * Return: 0 on success with value filled in, negative error code on failure
1452 struct xe_device *xe = to_xe_device(dev); in xe_exec_queue_get_property_ioctl() local
1458 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_get_property_ioctl()
1462 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_get_property_ioctl()
1468 ret = 0; in xe_exec_queue_get_property_ioctl()
1510 for (i = 0; i < q->width; ++i) { in xe_exec_queue_is_idle()
1519 return xe_lrc_seqno(q->lrc[0]) == in xe_exec_queue_is_idle()
1520 q->lrc[0]->fence_ctx.next_seqno - 1; in xe_exec_queue_is_idle()
1533 struct xe_device *xe = gt_to_xe(q->gt); in xe_exec_queue_update_run_ticks() local
1545 /* Synchronize with unbind while holding the xe file open */ in xe_exec_queue_update_run_ticks()
1546 if (!drm_dev_enter(&xe->drm, &idx)) in xe_exec_queue_update_run_ticks()
1556 lrc = q->lrc[0]; in xe_exec_queue_update_run_ticks()
1594 * Return: 0 on success, negative error code on failure
1599 struct xe_device *xe = to_xe_device(dev); in xe_exec_queue_destroy_ioctl() local
1604 if (XE_IOCTL_DBG(xe, args->pad) || in xe_exec_queue_destroy_ioctl()
1605 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_destroy_ioctl()
1614 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_destroy_ioctl()
1625 return 0; in xe_exec_queue_destroy_ioctl()
1732 xe_assert(vm->xe, !dma_fence_is_container(fence)); in xe_exec_queue_last_fence_set()
1749 xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT || in xe_exec_queue_tlb_inval_last_fence_put()
1790 xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT || in xe_exec_queue_tlb_inval_last_fence_get()
1792 xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM | in xe_exec_queue_tlb_inval_last_fence_get()
1822 xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT || in xe_exec_queue_tlb_inval_last_fence_set()
1824 xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM | in xe_exec_queue_tlb_inval_last_fence_set()
1826 xe_assert(vm->xe, !dma_fence_is_container(fence)); in xe_exec_queue_tlb_inval_last_fence_set()
1843 int err = 0; in xe_exec_queue_contexts_hwsp_rebase()
1845 for (i = 0; i < q->width; ++i) { in xe_exec_queue_contexts_hwsp_rebase()