xref: /linux/drivers/gpu/drm/xe/xe_exec_queue.c (revision d40981350844c2cfa437abfc80596e10ea8f1149)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_exec_queue.h"
7 
8 #include <linux/nospec.h>
9 
10 #include <drm/drm_device.h>
11 #include <drm/drm_file.h>
12 #include <drm/xe_drm.h>
13 
14 #include "xe_device.h"
15 #include "xe_gt.h"
16 #include "xe_hw_engine_class_sysfs.h"
17 #include "xe_hw_fence.h"
18 #include "xe_lrc.h"
19 #include "xe_macros.h"
20 #include "xe_migrate.h"
21 #include "xe_pm.h"
22 #include "xe_ring_ops_types.h"
23 #include "xe_trace.h"
24 #include "xe_vm.h"
25 
26 enum xe_exec_queue_sched_prop {
27 	XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
28 	XE_EXEC_QUEUE_TIMESLICE = 1,
29 	XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
30 	XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
31 };
32 
33 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
34 				      u64 extensions, int ext_number);
35 
36 static void __xe_exec_queue_free(struct xe_exec_queue *q)
37 {
38 	if (q->vm)
39 		xe_vm_put(q->vm);
40 	kfree(q);
41 }
42 
43 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
44 						   struct xe_vm *vm,
45 						   u32 logical_mask,
46 						   u16 width, struct xe_hw_engine *hwe,
47 						   u32 flags, u64 extensions)
48 {
49 	struct xe_exec_queue *q;
50 	struct xe_gt *gt = hwe->gt;
51 	int err;
52 
53 	/* only kernel queues can be permanent */
54 	XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
55 
56 	q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL);
57 	if (!q)
58 		return ERR_PTR(-ENOMEM);
59 
60 	kref_init(&q->refcount);
61 	q->flags = flags;
62 	q->hwe = hwe;
63 	q->gt = gt;
64 	q->class = hwe->class;
65 	q->width = width;
66 	q->logical_mask = logical_mask;
67 	q->fence_irq = &gt->fence_irq[hwe->class];
68 	q->ring_ops = gt->ring_ops[hwe->class];
69 	q->ops = gt->exec_queue_ops;
70 	INIT_LIST_HEAD(&q->lr.link);
71 	INIT_LIST_HEAD(&q->multi_gt_link);
72 
73 	q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
74 	q->sched_props.preempt_timeout_us =
75 				hwe->eclass->sched_props.preempt_timeout_us;
76 	q->sched_props.job_timeout_ms =
77 				hwe->eclass->sched_props.job_timeout_ms;
78 	if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
79 	    q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
80 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
81 	else
82 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
83 
84 	if (vm)
85 		q->vm = xe_vm_get(vm);
86 
87 	if (extensions) {
88 		/*
89 		 * may set q->usm, must come before xe_lrc_create(),
90 		 * may overwrite q->sched_props, must come before q->ops->init()
91 		 */
92 		err = exec_queue_user_extensions(xe, q, extensions, 0);
93 		if (err) {
94 			__xe_exec_queue_free(q);
95 			return ERR_PTR(err);
96 		}
97 	}
98 
99 	return q;
100 }
101 
102 static int __xe_exec_queue_init(struct xe_exec_queue *q)
103 {
104 	int i, err;
105 
106 	for (i = 0; i < q->width; ++i) {
107 		q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K);
108 		if (IS_ERR(q->lrc[i])) {
109 			err = PTR_ERR(q->lrc[i]);
110 			goto err_lrc;
111 		}
112 	}
113 
114 	err = q->ops->init(q);
115 	if (err)
116 		goto err_lrc;
117 
118 	return 0;
119 
120 err_lrc:
121 	for (i = i - 1; i >= 0; --i)
122 		xe_lrc_put(q->lrc[i]);
123 	return err;
124 }
125 
126 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
127 					   u32 logical_mask, u16 width,
128 					   struct xe_hw_engine *hwe, u32 flags,
129 					   u64 extensions)
130 {
131 	struct xe_exec_queue *q;
132 	int err;
133 
134 	q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
135 				  extensions);
136 	if (IS_ERR(q))
137 		return q;
138 
139 	if (vm) {
140 		err = xe_vm_lock(vm, true);
141 		if (err)
142 			goto err_post_alloc;
143 	}
144 
145 	err = __xe_exec_queue_init(q);
146 	if (vm)
147 		xe_vm_unlock(vm);
148 	if (err)
149 		goto err_post_alloc;
150 
151 	return q;
152 
153 err_post_alloc:
154 	__xe_exec_queue_free(q);
155 	return ERR_PTR(err);
156 }
157 
158 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
159 						 struct xe_vm *vm,
160 						 enum xe_engine_class class, u32 flags)
161 {
162 	struct xe_hw_engine *hwe, *hwe0 = NULL;
163 	enum xe_hw_engine_id id;
164 	u32 logical_mask = 0;
165 
166 	for_each_hw_engine(hwe, gt, id) {
167 		if (xe_hw_engine_is_reserved(hwe))
168 			continue;
169 
170 		if (hwe->class == class) {
171 			logical_mask |= BIT(hwe->logical_instance);
172 			if (!hwe0)
173 				hwe0 = hwe;
174 		}
175 	}
176 
177 	if (!logical_mask)
178 		return ERR_PTR(-ENODEV);
179 
180 	return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, 0);
181 }
182 
183 void xe_exec_queue_destroy(struct kref *ref)
184 {
185 	struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
186 	struct xe_exec_queue *eq, *next;
187 
188 	xe_exec_queue_last_fence_put_unlocked(q);
189 	if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
190 		list_for_each_entry_safe(eq, next, &q->multi_gt_list,
191 					 multi_gt_link)
192 			xe_exec_queue_put(eq);
193 	}
194 
195 	q->ops->fini(q);
196 }
197 
198 void xe_exec_queue_fini(struct xe_exec_queue *q)
199 {
200 	int i;
201 
202 	for (i = 0; i < q->width; ++i)
203 		xe_lrc_put(q->lrc[i]);
204 	__xe_exec_queue_free(q);
205 }
206 
207 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
208 {
209 	switch (q->class) {
210 	case XE_ENGINE_CLASS_RENDER:
211 		snprintf(q->name, sizeof(q->name), "rcs%d", instance);
212 		break;
213 	case XE_ENGINE_CLASS_VIDEO_DECODE:
214 		snprintf(q->name, sizeof(q->name), "vcs%d", instance);
215 		break;
216 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
217 		snprintf(q->name, sizeof(q->name), "vecs%d", instance);
218 		break;
219 	case XE_ENGINE_CLASS_COPY:
220 		snprintf(q->name, sizeof(q->name), "bcs%d", instance);
221 		break;
222 	case XE_ENGINE_CLASS_COMPUTE:
223 		snprintf(q->name, sizeof(q->name), "ccs%d", instance);
224 		break;
225 	case XE_ENGINE_CLASS_OTHER:
226 		snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
227 		break;
228 	default:
229 		XE_WARN_ON(q->class);
230 	}
231 }
232 
233 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
234 {
235 	struct xe_exec_queue *q;
236 
237 	mutex_lock(&xef->exec_queue.lock);
238 	q = xa_load(&xef->exec_queue.xa, id);
239 	if (q)
240 		xe_exec_queue_get(q);
241 	mutex_unlock(&xef->exec_queue.lock);
242 
243 	return q;
244 }
245 
246 enum xe_exec_queue_priority
247 xe_exec_queue_device_get_max_priority(struct xe_device *xe)
248 {
249 	return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
250 				       XE_EXEC_QUEUE_PRIORITY_NORMAL;
251 }
252 
253 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
254 				   u64 value)
255 {
256 	if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
257 		return -EINVAL;
258 
259 	if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
260 		return -EPERM;
261 
262 	q->sched_props.priority = value;
263 	return 0;
264 }
265 
266 static bool xe_exec_queue_enforce_schedule_limit(void)
267 {
268 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
269 	return true;
270 #else
271 	return !capable(CAP_SYS_NICE);
272 #endif
273 }
274 
275 static void
276 xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
277 			      enum xe_exec_queue_sched_prop prop,
278 			      u32 *min, u32 *max)
279 {
280 	switch (prop) {
281 	case XE_EXEC_QUEUE_JOB_TIMEOUT:
282 		*min = eclass->sched_props.job_timeout_min;
283 		*max = eclass->sched_props.job_timeout_max;
284 		break;
285 	case XE_EXEC_QUEUE_TIMESLICE:
286 		*min = eclass->sched_props.timeslice_min;
287 		*max = eclass->sched_props.timeslice_max;
288 		break;
289 	case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
290 		*min = eclass->sched_props.preempt_timeout_min;
291 		*max = eclass->sched_props.preempt_timeout_max;
292 		break;
293 	default:
294 		break;
295 	}
296 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
297 	if (capable(CAP_SYS_NICE)) {
298 		switch (prop) {
299 		case XE_EXEC_QUEUE_JOB_TIMEOUT:
300 			*min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
301 			*max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
302 			break;
303 		case XE_EXEC_QUEUE_TIMESLICE:
304 			*min = XE_HW_ENGINE_TIMESLICE_MIN;
305 			*max = XE_HW_ENGINE_TIMESLICE_MAX;
306 			break;
307 		case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
308 			*min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
309 			*max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
310 			break;
311 		default:
312 			break;
313 		}
314 	}
315 #endif
316 }
317 
318 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
319 				    u64 value)
320 {
321 	u32 min = 0, max = 0;
322 
323 	xe_exec_queue_get_prop_minmax(q->hwe->eclass,
324 				      XE_EXEC_QUEUE_TIMESLICE, &min, &max);
325 
326 	if (xe_exec_queue_enforce_schedule_limit() &&
327 	    !xe_hw_engine_timeout_in_range(value, min, max))
328 		return -EINVAL;
329 
330 	q->sched_props.timeslice_us = value;
331 	return 0;
332 }
333 
334 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
335 					     struct xe_exec_queue *q,
336 					     u64 value);
337 
338 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
339 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
340 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
341 };
342 
343 static int exec_queue_user_ext_set_property(struct xe_device *xe,
344 					    struct xe_exec_queue *q,
345 					    u64 extension)
346 {
347 	u64 __user *address = u64_to_user_ptr(extension);
348 	struct drm_xe_ext_set_property ext;
349 	int err;
350 	u32 idx;
351 
352 	err = __copy_from_user(&ext, address, sizeof(ext));
353 	if (XE_IOCTL_DBG(xe, err))
354 		return -EFAULT;
355 
356 	if (XE_IOCTL_DBG(xe, ext.property >=
357 			 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
358 	    XE_IOCTL_DBG(xe, ext.pad) ||
359 	    XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
360 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
361 		return -EINVAL;
362 
363 	idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
364 	if (!exec_queue_set_property_funcs[idx])
365 		return -EINVAL;
366 
367 	return exec_queue_set_property_funcs[idx](xe, q, ext.value);
368 }
369 
370 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
371 					       struct xe_exec_queue *q,
372 					       u64 extension);
373 
374 static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
375 	[DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
376 };
377 
378 #define MAX_USER_EXTENSIONS	16
379 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
380 				      u64 extensions, int ext_number)
381 {
382 	u64 __user *address = u64_to_user_ptr(extensions);
383 	struct drm_xe_user_extension ext;
384 	int err;
385 	u32 idx;
386 
387 	if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
388 		return -E2BIG;
389 
390 	err = __copy_from_user(&ext, address, sizeof(ext));
391 	if (XE_IOCTL_DBG(xe, err))
392 		return -EFAULT;
393 
394 	if (XE_IOCTL_DBG(xe, ext.pad) ||
395 	    XE_IOCTL_DBG(xe, ext.name >=
396 			 ARRAY_SIZE(exec_queue_user_extension_funcs)))
397 		return -EINVAL;
398 
399 	idx = array_index_nospec(ext.name,
400 				 ARRAY_SIZE(exec_queue_user_extension_funcs));
401 	err = exec_queue_user_extension_funcs[idx](xe, q, extensions);
402 	if (XE_IOCTL_DBG(xe, err))
403 		return err;
404 
405 	if (ext.next_extension)
406 		return exec_queue_user_extensions(xe, q, ext.next_extension,
407 						  ++ext_number);
408 
409 	return 0;
410 }
411 
412 static const enum xe_engine_class user_to_xe_engine_class[] = {
413 	[DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
414 	[DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
415 	[DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
416 	[DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
417 	[DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
418 };
419 
420 static struct xe_hw_engine *
421 find_hw_engine(struct xe_device *xe,
422 	       struct drm_xe_engine_class_instance eci)
423 {
424 	u32 idx;
425 
426 	if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
427 		return NULL;
428 
429 	if (eci.gt_id >= xe->info.gt_count)
430 		return NULL;
431 
432 	idx = array_index_nospec(eci.engine_class,
433 				 ARRAY_SIZE(user_to_xe_engine_class));
434 
435 	return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
436 			       user_to_xe_engine_class[idx],
437 			       eci.engine_instance, true);
438 }
439 
440 static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt,
441 					struct drm_xe_engine_class_instance *eci,
442 					u16 width, u16 num_placements)
443 {
444 	struct xe_hw_engine *hwe;
445 	enum xe_hw_engine_id id;
446 	u32 logical_mask = 0;
447 
448 	if (XE_IOCTL_DBG(xe, width != 1))
449 		return 0;
450 	if (XE_IOCTL_DBG(xe, num_placements != 1))
451 		return 0;
452 	if (XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
453 		return 0;
454 
455 	eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY;
456 
457 	for_each_hw_engine(hwe, gt, id) {
458 		if (xe_hw_engine_is_reserved(hwe))
459 			continue;
460 
461 		if (hwe->class ==
462 		    user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY])
463 			logical_mask |= BIT(hwe->logical_instance);
464 	}
465 
466 	return logical_mask;
467 }
468 
469 static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
470 				      struct drm_xe_engine_class_instance *eci,
471 				      u16 width, u16 num_placements)
472 {
473 	int len = width * num_placements;
474 	int i, j, n;
475 	u16 class;
476 	u16 gt_id;
477 	u32 return_mask = 0, prev_mask;
478 
479 	if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
480 			 len > 1))
481 		return 0;
482 
483 	for (i = 0; i < width; ++i) {
484 		u32 current_mask = 0;
485 
486 		for (j = 0; j < num_placements; ++j) {
487 			struct xe_hw_engine *hwe;
488 
489 			n = j * width + i;
490 
491 			hwe = find_hw_engine(xe, eci[n]);
492 			if (XE_IOCTL_DBG(xe, !hwe))
493 				return 0;
494 
495 			if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
496 				return 0;
497 
498 			if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
499 			    XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
500 				return 0;
501 
502 			class = eci[n].engine_class;
503 			gt_id = eci[n].gt_id;
504 
505 			if (width == 1 || !i)
506 				return_mask |= BIT(eci[n].engine_instance);
507 			current_mask |= BIT(eci[n].engine_instance);
508 		}
509 
510 		/* Parallel submissions must be logically contiguous */
511 		if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
512 			return 0;
513 
514 		prev_mask = current_mask;
515 	}
516 
517 	return return_mask;
518 }
519 
520 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
521 			       struct drm_file *file)
522 {
523 	struct xe_device *xe = to_xe_device(dev);
524 	struct xe_file *xef = to_xe_file(file);
525 	struct drm_xe_exec_queue_create *args = data;
526 	struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
527 	struct drm_xe_engine_class_instance __user *user_eci =
528 		u64_to_user_ptr(args->instances);
529 	struct xe_hw_engine *hwe;
530 	struct xe_vm *vm, *migrate_vm;
531 	struct xe_gt *gt;
532 	struct xe_exec_queue *q = NULL;
533 	u32 logical_mask;
534 	u32 id;
535 	u32 len;
536 	int err;
537 
538 	if (XE_IOCTL_DBG(xe, args->flags) ||
539 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
540 		return -EINVAL;
541 
542 	len = args->width * args->num_placements;
543 	if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
544 		return -EINVAL;
545 
546 	err = __copy_from_user(eci, user_eci,
547 			       sizeof(struct drm_xe_engine_class_instance) *
548 			       len);
549 	if (XE_IOCTL_DBG(xe, err))
550 		return -EFAULT;
551 
552 	if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
553 		return -EINVAL;
554 
555 	if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
556 		for_each_gt(gt, xe, id) {
557 			struct xe_exec_queue *new;
558 			u32 flags;
559 
560 			if (xe_gt_is_media_type(gt))
561 				continue;
562 
563 			eci[0].gt_id = gt->info.id;
564 			logical_mask = bind_exec_queue_logical_mask(xe, gt, eci,
565 								    args->width,
566 								    args->num_placements);
567 			if (XE_IOCTL_DBG(xe, !logical_mask))
568 				return -EINVAL;
569 
570 			hwe = find_hw_engine(xe, eci[0]);
571 			if (XE_IOCTL_DBG(xe, !hwe))
572 				return -EINVAL;
573 
574 			/* The migration vm doesn't hold rpm ref */
575 			xe_pm_runtime_get_noresume(xe);
576 
577 			flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
578 
579 			migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
580 			new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
581 						   args->width, hwe, flags,
582 						   args->extensions);
583 
584 			xe_pm_runtime_put(xe); /* now held by engine */
585 
586 			xe_vm_put(migrate_vm);
587 			if (IS_ERR(new)) {
588 				err = PTR_ERR(new);
589 				if (q)
590 					goto put_exec_queue;
591 				return err;
592 			}
593 			if (id == 0)
594 				q = new;
595 			else
596 				list_add_tail(&new->multi_gt_list,
597 					      &q->multi_gt_link);
598 		}
599 	} else {
600 		gt = xe_device_get_gt(xe, eci[0].gt_id);
601 		logical_mask = calc_validate_logical_mask(xe, gt, eci,
602 							  args->width,
603 							  args->num_placements);
604 		if (XE_IOCTL_DBG(xe, !logical_mask))
605 			return -EINVAL;
606 
607 		hwe = find_hw_engine(xe, eci[0]);
608 		if (XE_IOCTL_DBG(xe, !hwe))
609 			return -EINVAL;
610 
611 		vm = xe_vm_lookup(xef, args->vm_id);
612 		if (XE_IOCTL_DBG(xe, !vm))
613 			return -ENOENT;
614 
615 		err = down_read_interruptible(&vm->lock);
616 		if (err) {
617 			xe_vm_put(vm);
618 			return err;
619 		}
620 
621 		if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
622 			up_read(&vm->lock);
623 			xe_vm_put(vm);
624 			return -ENOENT;
625 		}
626 
627 		q = xe_exec_queue_create(xe, vm, logical_mask,
628 					 args->width, hwe, 0,
629 					 args->extensions);
630 		up_read(&vm->lock);
631 		xe_vm_put(vm);
632 		if (IS_ERR(q))
633 			return PTR_ERR(q);
634 
635 		if (xe_vm_in_preempt_fence_mode(vm)) {
636 			q->lr.context = dma_fence_context_alloc(1);
637 			spin_lock_init(&q->lr.lock);
638 
639 			err = xe_vm_add_compute_exec_queue(vm, q);
640 			if (XE_IOCTL_DBG(xe, err))
641 				goto put_exec_queue;
642 		}
643 	}
644 
645 	mutex_lock(&xef->exec_queue.lock);
646 	err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
647 	mutex_unlock(&xef->exec_queue.lock);
648 	if (err)
649 		goto kill_exec_queue;
650 
651 	args->exec_queue_id = id;
652 
653 	return 0;
654 
655 kill_exec_queue:
656 	xe_exec_queue_kill(q);
657 put_exec_queue:
658 	xe_exec_queue_put(q);
659 	return err;
660 }
661 
662 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
663 				     struct drm_file *file)
664 {
665 	struct xe_device *xe = to_xe_device(dev);
666 	struct xe_file *xef = to_xe_file(file);
667 	struct drm_xe_exec_queue_get_property *args = data;
668 	struct xe_exec_queue *q;
669 	int ret;
670 
671 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
672 		return -EINVAL;
673 
674 	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
675 	if (XE_IOCTL_DBG(xe, !q))
676 		return -ENOENT;
677 
678 	switch (args->property) {
679 	case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
680 		args->value = q->ops->reset_status(q);
681 		ret = 0;
682 		break;
683 	default:
684 		ret = -EINVAL;
685 	}
686 
687 	xe_exec_queue_put(q);
688 
689 	return ret;
690 }
691 
692 /**
693  * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
694  * @q: The exec_queue
695  *
696  * Return: True if the exec_queue is long-running, false otherwise.
697  */
698 bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
699 {
700 	return q->vm && xe_vm_in_lr_mode(q->vm) &&
701 		!(q->flags & EXEC_QUEUE_FLAG_VM);
702 }
703 
704 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
705 {
706 	return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1;
707 }
708 
709 /**
710  * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
711  * @q: The exec_queue
712  *
713  * Return: True if the exec_queue's ring is full, false otherwise.
714  */
715 bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
716 {
717 	struct xe_lrc *lrc = q->lrc[0];
718 	s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
719 
720 	return xe_exec_queue_num_job_inflight(q) >= max_job;
721 }
722 
723 /**
724  * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
725  * @q: The exec_queue
726  *
727  * FIXME: Need to determine what to use as the short-lived
728  * timeline lock for the exec_queues, so that the return value
729  * of this function becomes more than just an advisory
730  * snapshot in time. The timeline lock must protect the
731  * seqno from racing submissions on the same exec_queue.
732  * Typically vm->resv, but user-created timeline locks use the migrate vm
733  * and never grabs the migrate vm->resv so we have a race there.
734  *
735  * Return: True if the exec_queue is idle, false otherwise.
736  */
737 bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
738 {
739 	if (xe_exec_queue_is_parallel(q)) {
740 		int i;
741 
742 		for (i = 0; i < q->width; ++i) {
743 			if (xe_lrc_seqno(q->lrc[i]) !=
744 			    q->lrc[i]->fence_ctx.next_seqno - 1)
745 				return false;
746 		}
747 
748 		return true;
749 	}
750 
751 	return xe_lrc_seqno(q->lrc[0]) ==
752 		q->lrc[0]->fence_ctx.next_seqno - 1;
753 }
754 
755 /**
756  * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue
757  * from hw
758  * @q: The exec queue
759  *
760  * Update the timestamp saved by HW for this exec queue and save run ticks
761  * calculated by using the delta from last update.
762  */
763 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
764 {
765 	struct xe_lrc *lrc;
766 	u32 old_ts, new_ts;
767 
768 	/*
769 	 * Jobs that are run during driver load may use an exec_queue, but are
770 	 * not associated with a user xe file, so avoid accumulating busyness
771 	 * for kernel specific work.
772 	 */
773 	if (!q->vm || !q->vm->xef)
774 		return;
775 
776 	/*
777 	 * Only sample the first LRC. For parallel submission, all of them are
778 	 * scheduled together and we compensate that below by multiplying by
779 	 * width - this may introduce errors if that premise is not true and
780 	 * they don't exit 100% aligned. On the other hand, looping through
781 	 * the LRCs and reading them in different time could also introduce
782 	 * errors.
783 	 */
784 	lrc = q->lrc[0];
785 	new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
786 	q->run_ticks += (new_ts - old_ts) * q->width;
787 }
788 
789 void xe_exec_queue_kill(struct xe_exec_queue *q)
790 {
791 	struct xe_exec_queue *eq = q, *next;
792 
793 	list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
794 				 multi_gt_link) {
795 		q->ops->kill(eq);
796 		xe_vm_remove_compute_exec_queue(q->vm, eq);
797 	}
798 
799 	q->ops->kill(q);
800 	xe_vm_remove_compute_exec_queue(q->vm, q);
801 }
802 
803 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
804 				struct drm_file *file)
805 {
806 	struct xe_device *xe = to_xe_device(dev);
807 	struct xe_file *xef = to_xe_file(file);
808 	struct drm_xe_exec_queue_destroy *args = data;
809 	struct xe_exec_queue *q;
810 
811 	if (XE_IOCTL_DBG(xe, args->pad) ||
812 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
813 		return -EINVAL;
814 
815 	mutex_lock(&xef->exec_queue.lock);
816 	q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
817 	mutex_unlock(&xef->exec_queue.lock);
818 	if (XE_IOCTL_DBG(xe, !q))
819 		return -ENOENT;
820 
821 	xe_exec_queue_kill(q);
822 
823 	trace_xe_exec_queue_close(q);
824 	xe_exec_queue_put(q);
825 
826 	return 0;
827 }
828 
829 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
830 						    struct xe_vm *vm)
831 {
832 	if (q->flags & EXEC_QUEUE_FLAG_VM)
833 		lockdep_assert_held(&vm->lock);
834 	else
835 		xe_vm_assert_held(vm);
836 }
837 
838 /**
839  * xe_exec_queue_last_fence_put() - Drop ref to last fence
840  * @q: The exec queue
841  * @vm: The VM the engine does a bind or exec for
842  */
843 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
844 {
845 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
846 
847 	if (q->last_fence) {
848 		dma_fence_put(q->last_fence);
849 		q->last_fence = NULL;
850 	}
851 }
852 
853 /**
854  * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
855  * @q: The exec queue
856  *
857  * Only safe to be called from xe_exec_queue_destroy().
858  */
859 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
860 {
861 	if (q->last_fence) {
862 		dma_fence_put(q->last_fence);
863 		q->last_fence = NULL;
864 	}
865 }
866 
867 /**
868  * xe_exec_queue_last_fence_get() - Get last fence
869  * @q: The exec queue
870  * @vm: The VM the engine does a bind or exec for
871  *
872  * Get last fence, takes a ref
873  *
874  * Returns: last fence if not signaled, dma fence stub if signaled
875  */
876 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
877 					       struct xe_vm *vm)
878 {
879 	struct dma_fence *fence;
880 
881 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
882 
883 	if (q->last_fence &&
884 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
885 		xe_exec_queue_last_fence_put(q, vm);
886 
887 	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
888 	dma_fence_get(fence);
889 	return fence;
890 }
891 
892 /**
893  * xe_exec_queue_last_fence_set() - Set last fence
894  * @q: The exec queue
895  * @vm: The VM the engine does a bind or exec for
896  * @fence: The fence
897  *
898  * Set the last fence for the engine. Increases reference count for fence, when
899  * closing engine xe_exec_queue_last_fence_put should be called.
900  */
901 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
902 				  struct dma_fence *fence)
903 {
904 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
905 
906 	xe_exec_queue_last_fence_put(q, vm);
907 	q->last_fence = dma_fence_get(fence);
908 }
909