xref: /linux/drivers/gpu/drm/xe/xe_exec_queue.c (revision 38f7e5450ebfc6f2e046a249a3f629ea7bec8c31)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_exec_queue.h"
7 
8 #include <linux/nospec.h>
9 
10 #include <drm/drm_device.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_file.h>
13 #include <drm/drm_syncobj.h>
14 #include <uapi/drm/xe_drm.h>
15 
16 #include "xe_bo.h"
17 #include "xe_dep_scheduler.h"
18 #include "xe_device.h"
19 #include "xe_gt.h"
20 #include "xe_gt_sriov_pf.h"
21 #include "xe_gt_sriov_vf.h"
22 #include "xe_hw_engine_class_sysfs.h"
23 #include "xe_hw_engine_group.h"
24 #include "xe_irq.h"
25 #include "xe_lrc.h"
26 #include "xe_macros.h"
27 #include "xe_migrate.h"
28 #include "xe_pm.h"
29 #include "xe_trace.h"
30 #include "xe_vm.h"
31 #include "xe_pxp.h"
32 
33 /**
34  * DOC: Execution Queue
35  *
36  * An Execution queue is an interface for the HW context of execution.
37  * The user creates an execution queue, submits the GPU jobs through those
38  * queues and in the end destroys them.
39  *
40  * Execution queues can also be created by XeKMD itself for driver internal
41  * operations like object migration etc.
42  *
43  * An execution queue is associated with a specified HW engine or a group of
44  * engines (belonging to the same tile and engine class) and any GPU job
45  * submitted on the queue will be run on one of these engines.
46  *
47  * An execution queue is tied to an address space (VM). It holds a reference
48  * of the associated VM and the underlying Logical Ring Context/s (LRC/s)
49  * until the queue is destroyed.
50  *
51  * The execution queue sits on top of the submission backend. It opaquely
52  * handles the GuC and Execlist backends whichever the platform uses, and
53  * the ring operations the different engine classes support.
54  */
55 
56 /**
57  * DOC: Multi Queue Group
58  *
59  * Multi Queue Group is another mode of execution supported by the compute
60  * and blitter copy command streamers (CCS and BCS, respectively). It is
61  * an enhancement of the existing hardware architecture and leverages the
62  * same submission model. It enables support for efficient, parallel
63  * execution of multiple queues within a single shared context. The multi
64  * queue group functionality is only supported with GuC submission backend.
65  * All the queues of a group must use the same address space (VM).
66  *
67  * The DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE execution queue property
68  * supports creating a multi queue group and adding queues to a queue group.
69  *
70  * The XE_EXEC_QUEUE_CREATE ioctl call with above property with value field
71  * set to DRM_XE_MULTI_GROUP_CREATE, will create a new multi queue group with
72  * the queue being created as the primary queue (aka q0) of the group. To add
73  * secondary queues to the group, they need to be created with the above
74  * property with id of the primary queue as the value. The properties of
75  * the primary queue (like priority, time slice) applies to the whole group.
76  * So, these properties can't be set for secondary queues of a group.
77  *
78  * The hardware does not support removing a queue from a multi-queue group.
79  * However, queues can be dynamically added to the group. A group can have
80  * up to 64 queues. To support this, XeKMD holds references to LRCs of the
81  * queues even after the queues are destroyed by the user until the whole
82  * group is destroyed. The secondary queues hold a reference to the primary
83  * queue thus preventing the group from being destroyed when user destroys
84  * the primary queue. Once the primary queue is destroyed, secondary queues
85  * can't be added to the queue group and new job submissions on existing
86  * secondary queues are not allowed.
87  *
88  * The queues of a multi queue group can set their priority within the group
89  * through the DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY property.
90  * This multi queue priority can also be set dynamically through the
91  * XE_EXEC_QUEUE_SET_PROPERTY ioctl. This is the only other property
92  * supported by the secondary queues of a multi queue group, other than
93  * DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE.
94  *
95  * When GuC reports an error on any of the queues of a multi queue group,
96  * the queue cleanup mechanism is invoked for all the queues of the group
97  * as hardware cannot make progress on the multi queue context.
98  *
99  * Refer :ref:`multi-queue-group-guc-interface` for multi queue group GuC
100  * interface.
101  */
102 
103 enum xe_exec_queue_sched_prop {
104 	XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
105 	XE_EXEC_QUEUE_TIMESLICE = 1,
106 	XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
107 	XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
108 };
109 
110 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
111 				      u64 extensions);
112 
113 static void xe_exec_queue_group_cleanup(struct xe_exec_queue *q)
114 {
115 	struct xe_exec_queue_group *group = q->multi_queue.group;
116 	struct xe_lrc *lrc;
117 	unsigned long idx;
118 
119 	if (xe_exec_queue_is_multi_queue_secondary(q)) {
120 		/*
121 		 * Put pairs with get from xe_exec_queue_lookup() call
122 		 * in xe_exec_queue_group_validate().
123 		 */
124 		xe_exec_queue_put(xe_exec_queue_multi_queue_primary(q));
125 		return;
126 	}
127 
128 	if (!group)
129 		return;
130 
131 	/* Primary queue cleanup */
132 	xa_for_each(&group->xa, idx, lrc)
133 		xe_lrc_put(lrc);
134 
135 	xa_destroy(&group->xa);
136 	mutex_destroy(&group->list_lock);
137 	xe_bo_unpin_map_no_vm(group->cgp_bo);
138 	kfree(group);
139 }
140 
141 static void __xe_exec_queue_free(struct xe_exec_queue *q)
142 {
143 	int i;
144 
145 	for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i)
146 		if (q->tlb_inval[i].dep_scheduler)
147 			xe_dep_scheduler_fini(q->tlb_inval[i].dep_scheduler);
148 
149 	if (xe_exec_queue_uses_pxp(q))
150 		xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
151 
152 	if (xe_exec_queue_is_multi_queue(q))
153 		xe_exec_queue_group_cleanup(q);
154 
155 	if (q->vm) {
156 		xe_vm_remove_exec_queue(q->vm, q);
157 		xe_vm_put(q->vm);
158 	}
159 
160 	if (q->xef)
161 		xe_file_put(q->xef);
162 
163 	kvfree(q->replay_state);
164 	kfree(q);
165 }
166 
167 static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q)
168 {
169 	struct xe_tile *tile = gt_to_tile(q->gt);
170 	int i;
171 
172 	for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i) {
173 		struct xe_dep_scheduler *dep_scheduler;
174 		struct xe_gt *gt;
175 		struct workqueue_struct *wq;
176 
177 		if (i == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT)
178 			gt = tile->primary_gt;
179 		else
180 			gt = tile->media_gt;
181 
182 		if (!gt)
183 			continue;
184 
185 		wq = gt->tlb_inval.job_wq;
186 
187 #define MAX_TLB_INVAL_JOBS	16	/* Picking a reasonable value */
188 		dep_scheduler = xe_dep_scheduler_create(xe, wq, q->name,
189 							MAX_TLB_INVAL_JOBS);
190 		if (IS_ERR(dep_scheduler))
191 			return PTR_ERR(dep_scheduler);
192 
193 		q->tlb_inval[i].dep_scheduler = dep_scheduler;
194 	}
195 #undef MAX_TLB_INVAL_JOBS
196 
197 	return 0;
198 }
199 
200 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
201 						   struct xe_vm *vm,
202 						   u32 logical_mask,
203 						   u16 width, struct xe_hw_engine *hwe,
204 						   u32 flags, u64 extensions)
205 {
206 	struct xe_exec_queue *q;
207 	struct xe_gt *gt = hwe->gt;
208 	int err;
209 
210 	/* only kernel queues can be permanent */
211 	XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
212 
213 	q = kzalloc_flex(*q, lrc, width);
214 	if (!q)
215 		return ERR_PTR(-ENOMEM);
216 
217 	kref_init(&q->refcount);
218 	q->flags = flags;
219 	q->hwe = hwe;
220 	q->gt = gt;
221 	q->class = hwe->class;
222 	q->width = width;
223 	q->msix_vec = XE_IRQ_DEFAULT_MSIX;
224 	q->logical_mask = logical_mask;
225 	q->fence_irq = &gt->fence_irq[hwe->class];
226 	q->ring_ops = gt->ring_ops[hwe->class];
227 	q->ops = gt->exec_queue_ops;
228 	INIT_LIST_HEAD(&q->lr.link);
229 	INIT_LIST_HEAD(&q->vm_exec_queue_link);
230 	INIT_LIST_HEAD(&q->multi_gt_link);
231 	INIT_LIST_HEAD(&q->hw_engine_group_link);
232 	INIT_LIST_HEAD(&q->pxp.link);
233 	spin_lock_init(&q->multi_queue.lock);
234 	spin_lock_init(&q->lrc_lookup_lock);
235 	q->multi_queue.priority = XE_MULTI_QUEUE_PRIORITY_NORMAL;
236 
237 	q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
238 	q->sched_props.preempt_timeout_us =
239 				hwe->eclass->sched_props.preempt_timeout_us;
240 	q->sched_props.job_timeout_ms =
241 				hwe->eclass->sched_props.job_timeout_ms;
242 	if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
243 	    q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
244 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
245 	else
246 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
247 
248 	if (q->flags & (EXEC_QUEUE_FLAG_MIGRATE | EXEC_QUEUE_FLAG_VM)) {
249 		err = alloc_dep_schedulers(xe, q);
250 		if (err) {
251 			__xe_exec_queue_free(q);
252 			return ERR_PTR(err);
253 		}
254 	}
255 
256 	if (vm)
257 		q->vm = xe_vm_get(vm);
258 
259 	if (extensions) {
260 		/*
261 		 * may set q->usm, must come before xe_lrc_create(),
262 		 * may overwrite q->sched_props, must come before q->ops->init()
263 		 */
264 		err = exec_queue_user_extensions(xe, q, extensions);
265 		if (err) {
266 			__xe_exec_queue_free(q);
267 			return ERR_PTR(err);
268 		}
269 	}
270 
271 	return q;
272 }
273 
274 static void xe_exec_queue_set_lrc(struct xe_exec_queue *q, struct xe_lrc *lrc, u16 idx)
275 {
276 	xe_assert(gt_to_xe(q->gt), idx < q->width);
277 
278 	scoped_guard(spinlock, &q->lrc_lookup_lock)
279 		q->lrc[idx] = lrc;
280 }
281 
282 /**
283  * xe_exec_queue_get_lrc() - Get the LRC from exec queue.
284  * @q: The exec queue instance.
285  * @idx: Index within multi-LRC array.
286  *
287  * Retrieves LRC of given index for the exec queue under lock
288  * and takes reference.
289  *
290  * Return: Pointer to LRC on success, error on failure, NULL on
291  * lookup failure.
292  */
293 struct xe_lrc *xe_exec_queue_get_lrc(struct xe_exec_queue *q, u16 idx)
294 {
295 	struct xe_lrc *lrc;
296 
297 	xe_assert(gt_to_xe(q->gt), idx < q->width);
298 
299 	scoped_guard(spinlock, &q->lrc_lookup_lock) {
300 		lrc = q->lrc[idx];
301 		if (lrc)
302 			xe_lrc_get(lrc);
303 	}
304 
305 	return lrc;
306 }
307 
308 /**
309  * xe_exec_queue_lrc() - Get the LRC from exec queue.
310  * @q: The exec queue instance.
311  *
312  * Retrieves the primary LRC for the exec queue. Note that this function
313  * returns only the first LRC instance, even when multiple parallel LRCs
314  * are configured. This function does not increment reference count,
315  * so the reference can be just forgotten after use.
316  *
317  * Return: Pointer to LRC on success, error on failure
318  */
319 struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q)
320 {
321 	return q->lrc[0];
322 }
323 
324 static void __xe_exec_queue_fini(struct xe_exec_queue *q)
325 {
326 	int i;
327 
328 	q->ops->fini(q);
329 
330 	for (i = 0; i < q->width; ++i)
331 		xe_lrc_put(q->lrc[i]);
332 }
333 
334 static int __xe_exec_queue_init(struct xe_exec_queue *q, u32 exec_queue_flags)
335 {
336 	int i, err;
337 	u32 flags = 0;
338 
339 	/*
340 	 * PXP workloads executing on RCS or CCS must run in isolation (i.e. no
341 	 * other workload can use the EUs at the same time). On MTL this is done
342 	 * by setting the RUNALONE bit in the LRC, while starting on Xe2 there
343 	 * is a dedicated bit for it.
344 	 */
345 	if (xe_exec_queue_uses_pxp(q) &&
346 	    (q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) {
347 		if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20)
348 			flags |= XE_LRC_CREATE_PXP;
349 		else
350 			flags |= XE_LRC_CREATE_RUNALONE;
351 	}
352 
353 	if (!(exec_queue_flags & EXEC_QUEUE_FLAG_KERNEL))
354 		flags |= XE_LRC_CREATE_USER_CTX;
355 
356 	err = q->ops->init(q);
357 	if (err)
358 		return err;
359 
360 	/*
361 	 * This must occur after q->ops->init to avoid race conditions during VF
362 	 * post-migration recovery, as the fixups for the LRC GGTT addresses
363 	 * depend on the queue being present in the backend tracking structure.
364 	 *
365 	 * In addition to above, we must wait on inflight GGTT changes to avoid
366 	 * writing out stale values here. Such wait provides a solid solution
367 	 * (without a race) only if the function can detect migration instantly
368 	 * from the moment vCPU resumes execution.
369 	 */
370 	for (i = 0; i < q->width; ++i) {
371 		struct xe_lrc *__lrc = NULL;
372 		int marker;
373 
374 		do {
375 			struct xe_lrc *lrc;
376 
377 			marker = xe_gt_sriov_vf_wait_valid_ggtt(q->gt);
378 
379 			lrc = xe_lrc_create(q->hwe, q->vm, q->replay_state,
380 					    xe_lrc_ring_size(), q->msix_vec, flags);
381 			if (IS_ERR(lrc)) {
382 				err = PTR_ERR(lrc);
383 				goto err_lrc;
384 			}
385 
386 			xe_exec_queue_set_lrc(q, lrc, i);
387 
388 			if (__lrc)
389 				xe_lrc_put(__lrc);
390 			__lrc = lrc;
391 
392 		} while (marker != xe_vf_migration_fixups_complete_count(q->gt));
393 	}
394 
395 	return 0;
396 
397 err_lrc:
398 	__xe_exec_queue_fini(q);
399 	return err;
400 }
401 
402 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
403 					   u32 logical_mask, u16 width,
404 					   struct xe_hw_engine *hwe, u32 flags,
405 					   u64 extensions)
406 {
407 	struct xe_exec_queue *q;
408 	int err;
409 
410 	/* VMs for GSCCS queues (and only those) must have the XE_VM_FLAG_GSC flag */
411 	xe_assert(xe, !vm || (!!(vm->flags & XE_VM_FLAG_GSC) == !!(hwe->engine_id == XE_HW_ENGINE_GSCCS0)));
412 
413 	q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
414 				  extensions);
415 	if (IS_ERR(q))
416 		return q;
417 
418 	err = __xe_exec_queue_init(q, flags);
419 	if (err)
420 		goto err_post_alloc;
421 
422 	/*
423 	 * We can only add the queue to the PXP list after the init is complete,
424 	 * because the PXP termination can call exec_queue_kill and that will
425 	 * go bad if the queue is only half-initialized. This means that we
426 	 * can't do it when we handle the PXP extension in __xe_exec_queue_alloc
427 	 * and we need to do it here instead.
428 	 */
429 	if (xe_exec_queue_uses_pxp(q)) {
430 		err = xe_pxp_exec_queue_add(xe->pxp, q);
431 		if (err)
432 			goto err_post_init;
433 	}
434 
435 	return q;
436 
437 err_post_init:
438 	__xe_exec_queue_fini(q);
439 err_post_alloc:
440 	__xe_exec_queue_free(q);
441 	return ERR_PTR(err);
442 }
443 ALLOW_ERROR_INJECTION(xe_exec_queue_create, ERRNO);
444 
445 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
446 						 struct xe_vm *vm,
447 						 enum xe_engine_class class,
448 						 u32 flags, u64 extensions)
449 {
450 	struct xe_hw_engine *hwe, *hwe0 = NULL;
451 	enum xe_hw_engine_id id;
452 	u32 logical_mask = 0;
453 
454 	for_each_hw_engine(hwe, gt, id) {
455 		if (xe_hw_engine_is_reserved(hwe))
456 			continue;
457 
458 		if (hwe->class == class) {
459 			logical_mask |= BIT(hwe->logical_instance);
460 			if (!hwe0)
461 				hwe0 = hwe;
462 		}
463 	}
464 
465 	if (!logical_mask)
466 		return ERR_PTR(-ENODEV);
467 
468 	return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions);
469 }
470 
471 /**
472  * xe_exec_queue_create_bind() - Create bind exec queue.
473  * @xe: Xe device.
474  * @tile: tile which bind exec queue belongs to.
475  * @flags: exec queue creation flags
476  * @user_vm: The user VM which this exec queue belongs to
477  * @extensions: exec queue creation extensions
478  *
479  * Normalize bind exec queue creation. Bind exec queue is tied to migration VM
480  * for access to physical memory required for page table programming. On a
481  * faulting devices the reserved copy engine instance must be used to avoid
482  * deadlocking (user binds cannot get stuck behind faults as kernel binds which
483  * resolve faults depend on user binds). On non-faulting devices any copy engine
484  * can be used.
485  *
486  * Returns exec queue on success, ERR_PTR on failure
487  */
488 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
489 						struct xe_tile *tile,
490 						struct xe_vm *user_vm,
491 						u32 flags, u64 extensions)
492 {
493 	struct xe_gt *gt = tile->primary_gt;
494 	struct xe_exec_queue *q;
495 	struct xe_vm *migrate_vm;
496 
497 	migrate_vm = xe_migrate_get_vm(tile->migrate);
498 	if (xe->info.has_usm) {
499 		struct xe_hw_engine *hwe = xe_gt_hw_engine(gt,
500 							   XE_ENGINE_CLASS_COPY,
501 							   gt->usm.reserved_bcs_instance,
502 							   false);
503 
504 		if (!hwe) {
505 			xe_vm_put(migrate_vm);
506 			return ERR_PTR(-EINVAL);
507 		}
508 
509 		q = xe_exec_queue_create(xe, migrate_vm,
510 					 BIT(hwe->logical_instance), 1, hwe,
511 					 flags, extensions);
512 	} else {
513 		q = xe_exec_queue_create_class(xe, gt, migrate_vm,
514 					       XE_ENGINE_CLASS_COPY, flags,
515 					       extensions);
516 	}
517 	xe_vm_put(migrate_vm);
518 
519 	if (!IS_ERR(q)) {
520 		int err = drm_syncobj_create(&q->ufence_syncobj,
521 					     DRM_SYNCOBJ_CREATE_SIGNALED,
522 					     NULL);
523 		if (err) {
524 			xe_exec_queue_put(q);
525 			return ERR_PTR(err);
526 		}
527 
528 		if (user_vm)
529 			q->user_vm = xe_vm_get(user_vm);
530 	}
531 
532 	return q;
533 }
534 ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
535 
536 void xe_exec_queue_destroy(struct kref *ref)
537 {
538 	struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
539 	struct xe_exec_queue *eq, *next;
540 	int i;
541 
542 	xe_assert(gt_to_xe(q->gt), atomic_read(&q->job_cnt) == 0);
543 
544 	if (q->ufence_syncobj)
545 		drm_syncobj_put(q->ufence_syncobj);
546 
547 	if (xe_exec_queue_uses_pxp(q))
548 		xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
549 
550 	xe_exec_queue_last_fence_put_unlocked(q);
551 	for_each_tlb_inval(i)
552 		xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, i);
553 
554 	if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
555 		list_for_each_entry_safe(eq, next, &q->multi_gt_list,
556 					 multi_gt_link)
557 			xe_exec_queue_put(eq);
558 	}
559 
560 	if (q->user_vm) {
561 		xe_vm_put(q->user_vm);
562 		q->user_vm = NULL;
563 	}
564 
565 	q->ops->destroy(q);
566 }
567 
568 void xe_exec_queue_fini(struct xe_exec_queue *q)
569 {
570 	/*
571 	 * Before releasing our ref to lrc and xef, accumulate our run ticks
572 	 * and wakeup any waiters.
573 	 */
574 	xe_exec_queue_update_run_ticks(q);
575 	if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
576 		wake_up_var(&q->xef->exec_queue.pending_removal);
577 
578 	__xe_exec_queue_fini(q);
579 	__xe_exec_queue_free(q);
580 }
581 
582 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
583 {
584 	switch (q->class) {
585 	case XE_ENGINE_CLASS_RENDER:
586 		snprintf(q->name, sizeof(q->name), "rcs%d", instance);
587 		break;
588 	case XE_ENGINE_CLASS_VIDEO_DECODE:
589 		snprintf(q->name, sizeof(q->name), "vcs%d", instance);
590 		break;
591 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
592 		snprintf(q->name, sizeof(q->name), "vecs%d", instance);
593 		break;
594 	case XE_ENGINE_CLASS_COPY:
595 		snprintf(q->name, sizeof(q->name), "bcs%d", instance);
596 		break;
597 	case XE_ENGINE_CLASS_COMPUTE:
598 		snprintf(q->name, sizeof(q->name), "ccs%d", instance);
599 		break;
600 	case XE_ENGINE_CLASS_OTHER:
601 		snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
602 		break;
603 	default:
604 		XE_WARN_ON(q->class);
605 	}
606 }
607 
608 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
609 {
610 	struct xe_exec_queue *q;
611 
612 	mutex_lock(&xef->exec_queue.lock);
613 	q = xa_load(&xef->exec_queue.xa, id);
614 	if (q)
615 		xe_exec_queue_get(q);
616 	mutex_unlock(&xef->exec_queue.lock);
617 
618 	return q;
619 }
620 
621 enum xe_exec_queue_priority
622 xe_exec_queue_device_get_max_priority(struct xe_device *xe)
623 {
624 	return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
625 				       XE_EXEC_QUEUE_PRIORITY_NORMAL;
626 }
627 
628 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
629 				   u64 value)
630 {
631 	if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
632 		return -EINVAL;
633 
634 	if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
635 		return -EPERM;
636 
637 	q->sched_props.priority = value;
638 	return 0;
639 }
640 
641 static bool xe_exec_queue_enforce_schedule_limit(void)
642 {
643 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
644 	return true;
645 #else
646 	return !capable(CAP_SYS_NICE);
647 #endif
648 }
649 
650 static void
651 xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
652 			      enum xe_exec_queue_sched_prop prop,
653 			      u32 *min, u32 *max)
654 {
655 	switch (prop) {
656 	case XE_EXEC_QUEUE_JOB_TIMEOUT:
657 		*min = eclass->sched_props.job_timeout_min;
658 		*max = eclass->sched_props.job_timeout_max;
659 		break;
660 	case XE_EXEC_QUEUE_TIMESLICE:
661 		*min = eclass->sched_props.timeslice_min;
662 		*max = eclass->sched_props.timeslice_max;
663 		break;
664 	case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
665 		*min = eclass->sched_props.preempt_timeout_min;
666 		*max = eclass->sched_props.preempt_timeout_max;
667 		break;
668 	default:
669 		break;
670 	}
671 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
672 	if (capable(CAP_SYS_NICE)) {
673 		switch (prop) {
674 		case XE_EXEC_QUEUE_JOB_TIMEOUT:
675 			*min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
676 			*max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
677 			break;
678 		case XE_EXEC_QUEUE_TIMESLICE:
679 			*min = XE_HW_ENGINE_TIMESLICE_MIN;
680 			*max = XE_HW_ENGINE_TIMESLICE_MAX;
681 			break;
682 		case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
683 			*min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
684 			*max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
685 			break;
686 		default:
687 			break;
688 		}
689 	}
690 #endif
691 }
692 
693 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
694 				    u64 value)
695 {
696 	u32 min = 0, max = 0;
697 
698 	xe_exec_queue_get_prop_minmax(q->hwe->eclass,
699 				      XE_EXEC_QUEUE_TIMESLICE, &min, &max);
700 
701 	if (xe_exec_queue_enforce_schedule_limit() &&
702 	    !xe_hw_engine_timeout_in_range(value, min, max))
703 		return -EINVAL;
704 
705 	q->sched_props.timeslice_us = value;
706 	return 0;
707 }
708 
709 static int
710 exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value)
711 {
712 	if (value == DRM_XE_PXP_TYPE_NONE)
713 		return 0;
714 
715 	/* we only support HWDRM sessions right now */
716 	if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
717 		return -EINVAL;
718 
719 	if (!xe_pxp_is_enabled(xe->pxp))
720 		return -ENODEV;
721 
722 	return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM);
723 }
724 
725 static int exec_queue_set_hang_replay_state(struct xe_device *xe,
726 					    struct xe_exec_queue *q,
727 					    u64 value)
728 {
729 	size_t size = xe_gt_lrc_hang_replay_size(q->gt, q->class);
730 	u64 __user *address = u64_to_user_ptr(value);
731 	void *ptr;
732 
733 	ptr = vmemdup_user(address, size);
734 	if (XE_IOCTL_DBG(xe, IS_ERR(ptr)))
735 		return PTR_ERR(ptr);
736 
737 	q->replay_state = ptr;
738 
739 	return 0;
740 }
741 
742 static int xe_exec_queue_group_init(struct xe_device *xe, struct xe_exec_queue *q)
743 {
744 	struct xe_tile *tile = gt_to_tile(q->gt);
745 	struct xe_exec_queue_group *group;
746 	struct xe_bo *bo;
747 
748 	group = kzalloc_obj(*group);
749 	if (!group)
750 		return -ENOMEM;
751 
752 	bo = xe_bo_create_pin_map_novm(xe, tile, SZ_4K, ttm_bo_type_kernel,
753 				       XE_BO_FLAG_VRAM_IF_DGFX(tile) |
754 				       XE_BO_FLAG_PINNED_LATE_RESTORE |
755 				       XE_BO_FLAG_FORCE_USER_VRAM |
756 				       XE_BO_FLAG_GGTT_INVALIDATE |
757 				       XE_BO_FLAG_GGTT, false);
758 	if (IS_ERR(bo)) {
759 		drm_err(&xe->drm, "CGP bo allocation for queue group failed: %ld\n",
760 			PTR_ERR(bo));
761 		kfree(group);
762 		return PTR_ERR(bo);
763 	}
764 
765 	xe_map_memset(xe, &bo->vmap, 0, 0, SZ_4K);
766 
767 	group->primary = q;
768 	group->cgp_bo = bo;
769 	INIT_LIST_HEAD(&group->list);
770 	xa_init_flags(&group->xa, XA_FLAGS_ALLOC1);
771 	mutex_init(&group->list_lock);
772 	q->multi_queue.group = group;
773 
774 	/* group->list_lock is used in submission backend */
775 	if (IS_ENABLED(CONFIG_LOCKDEP)) {
776 		fs_reclaim_acquire(GFP_KERNEL);
777 		might_lock(&group->list_lock);
778 		fs_reclaim_release(GFP_KERNEL);
779 	}
780 
781 	return 0;
782 }
783 
784 static inline bool xe_exec_queue_supports_multi_queue(struct xe_exec_queue *q)
785 {
786 	return q->gt->info.multi_queue_engine_class_mask & BIT(q->class);
787 }
788 
789 static int xe_exec_queue_group_validate(struct xe_device *xe, struct xe_exec_queue *q,
790 					u32 primary_id)
791 {
792 	struct xe_exec_queue_group *group;
793 	struct xe_exec_queue *primary;
794 	int ret;
795 
796 	/*
797 	 * Get from below xe_exec_queue_lookup() pairs with put
798 	 * in xe_exec_queue_group_cleanup().
799 	 */
800 	primary = xe_exec_queue_lookup(q->vm->xef, primary_id);
801 	if (XE_IOCTL_DBG(xe, !primary))
802 		return -ENOENT;
803 
804 	if (XE_IOCTL_DBG(xe, !xe_exec_queue_is_multi_queue_primary(primary)) ||
805 	    XE_IOCTL_DBG(xe, q->vm != primary->vm) ||
806 	    XE_IOCTL_DBG(xe, q->logical_mask != primary->logical_mask)) {
807 		ret = -EINVAL;
808 		goto put_primary;
809 	}
810 
811 	group = primary->multi_queue.group;
812 	q->multi_queue.valid = true;
813 	q->multi_queue.group = group;
814 
815 	return 0;
816 put_primary:
817 	xe_exec_queue_put(primary);
818 	return ret;
819 }
820 
821 #define XE_MAX_GROUP_SIZE	64
822 static int xe_exec_queue_group_add(struct xe_device *xe, struct xe_exec_queue *q)
823 {
824 	struct xe_exec_queue_group *group = q->multi_queue.group;
825 	u32 pos;
826 	int err;
827 
828 	xe_assert(xe, xe_exec_queue_is_multi_queue_secondary(q));
829 
830 	/* Primary queue holds a reference to LRCs of all secondary queues */
831 	err = xa_alloc(&group->xa, &pos, xe_lrc_get(q->lrc[0]),
832 		       XA_LIMIT(1, XE_MAX_GROUP_SIZE - 1), GFP_KERNEL);
833 	if (XE_IOCTL_DBG(xe, err)) {
834 		xe_lrc_put(q->lrc[0]);
835 
836 		/* It is invalid if queue group limit is exceeded */
837 		if (err == -EBUSY)
838 			err = -EINVAL;
839 
840 		return err;
841 	}
842 
843 	q->multi_queue.pos = pos;
844 
845 	return 0;
846 }
847 
848 static void xe_exec_queue_group_delete(struct xe_device *xe, struct xe_exec_queue *q)
849 {
850 	struct xe_exec_queue_group *group = q->multi_queue.group;
851 	struct xe_lrc *lrc;
852 
853 	xe_assert(xe, xe_exec_queue_is_multi_queue_secondary(q));
854 
855 	lrc = xa_erase(&group->xa, q->multi_queue.pos);
856 	xe_assert(xe, lrc);
857 	xe_lrc_put(lrc);
858 }
859 
860 static int exec_queue_set_multi_group(struct xe_device *xe, struct xe_exec_queue *q,
861 				      u64 value)
862 {
863 	if (XE_IOCTL_DBG(xe, !xe_exec_queue_supports_multi_queue(q)))
864 		return -ENODEV;
865 
866 	if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe)))
867 		return -EOPNOTSUPP;
868 
869 	if (XE_IOCTL_DBG(xe, !q->vm->xef))
870 		return -EINVAL;
871 
872 	if (XE_IOCTL_DBG(xe, xe_exec_queue_is_parallel(q)))
873 		return -EINVAL;
874 
875 	if (XE_IOCTL_DBG(xe, xe_exec_queue_is_multi_queue(q)))
876 		return -EINVAL;
877 
878 	if (value & DRM_XE_MULTI_GROUP_CREATE) {
879 		if (XE_IOCTL_DBG(xe, value & ~DRM_XE_MULTI_GROUP_CREATE))
880 			return -EINVAL;
881 
882 		q->multi_queue.valid = true;
883 		q->multi_queue.is_primary = true;
884 		q->multi_queue.pos = 0;
885 		return 0;
886 	}
887 
888 	/* While adding secondary queues, the upper 32 bits must be 0 */
889 	if (XE_IOCTL_DBG(xe, value & (~0ull << 32)))
890 		return -EINVAL;
891 
892 	return xe_exec_queue_group_validate(xe, q, value);
893 }
894 
895 static int exec_queue_set_multi_queue_priority(struct xe_device *xe, struct xe_exec_queue *q,
896 					       u64 value)
897 {
898 	if (XE_IOCTL_DBG(xe, value > XE_MULTI_QUEUE_PRIORITY_HIGH))
899 		return -EINVAL;
900 
901 	/* For queue creation time (!q->xef) setting, just store the priority value */
902 	if (!q->xef) {
903 		q->multi_queue.priority = value;
904 		return 0;
905 	}
906 
907 	if (!xe_exec_queue_is_multi_queue(q))
908 		return -EINVAL;
909 
910 	return q->ops->set_multi_queue_priority(q, value);
911 }
912 
913 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
914 					     struct xe_exec_queue *q,
915 					     u64 value);
916 
917 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
918 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
919 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
920 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE] = exec_queue_set_pxp_type,
921 	[DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE] = exec_queue_set_hang_replay_state,
922 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP] = exec_queue_set_multi_group,
923 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY] =
924 							exec_queue_set_multi_queue_priority,
925 };
926 
927 int xe_exec_queue_set_property_ioctl(struct drm_device *dev, void *data,
928 				     struct drm_file *file)
929 {
930 	struct xe_device *xe = to_xe_device(dev);
931 	struct xe_file *xef = to_xe_file(file);
932 	struct drm_xe_exec_queue_set_property *args = data;
933 	struct xe_exec_queue *q;
934 	int ret;
935 	u32 idx;
936 
937 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
938 		return -EINVAL;
939 
940 	if (XE_IOCTL_DBG(xe, args->property !=
941 			 DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY))
942 		return -EINVAL;
943 
944 	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
945 	if (XE_IOCTL_DBG(xe, !q))
946 		return -ENOENT;
947 
948 	idx = array_index_nospec(args->property,
949 				 ARRAY_SIZE(exec_queue_set_property_funcs));
950 	ret = exec_queue_set_property_funcs[idx](xe, q, args->value);
951 	if (XE_IOCTL_DBG(xe, ret))
952 		goto err_post_lookup;
953 
954 	xe_exec_queue_put(q);
955 	return 0;
956 
957  err_post_lookup:
958 	xe_exec_queue_put(q);
959 	return ret;
960 }
961 
962 static int exec_queue_user_ext_check(struct xe_exec_queue *q, u64 properties)
963 {
964 	u64 secondary_queue_valid_props = BIT_ULL(DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP) |
965 				  BIT_ULL(DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY);
966 
967 	/*
968 	 * Only MULTI_QUEUE_PRIORITY property is valid for secondary queues of a
969 	 * multi-queue group.
970 	 */
971 	if (xe_exec_queue_is_multi_queue_secondary(q) &&
972 	    properties & ~secondary_queue_valid_props)
973 		return -EINVAL;
974 
975 	return 0;
976 }
977 
978 static int exec_queue_user_ext_check_final(struct xe_exec_queue *q, u64 properties)
979 {
980 	/* MULTI_QUEUE_PRIORITY only applies to multi-queue group queues */
981 	if ((properties & BIT_ULL(DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY)) &&
982 	    !(properties & BIT_ULL(DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP)))
983 		return -EINVAL;
984 
985 	return 0;
986 }
987 
988 static int exec_queue_user_ext_set_property(struct xe_device *xe,
989 					    struct xe_exec_queue *q,
990 					    u64 extension, u64 *properties)
991 {
992 	u64 __user *address = u64_to_user_ptr(extension);
993 	struct drm_xe_ext_set_property ext;
994 	int err;
995 	u32 idx;
996 
997 	err = copy_from_user(&ext, address, sizeof(ext));
998 	if (XE_IOCTL_DBG(xe, err))
999 		return -EFAULT;
1000 
1001 	if (XE_IOCTL_DBG(xe, ext.property >=
1002 			 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
1003 	    XE_IOCTL_DBG(xe, ext.pad) ||
1004 	    XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
1005 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE &&
1006 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE &&
1007 			 ext.property != DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE &&
1008 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP &&
1009 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY))
1010 		return -EINVAL;
1011 
1012 	idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
1013 	if (!exec_queue_set_property_funcs[idx])
1014 		return -EINVAL;
1015 
1016 	*properties |= BIT_ULL(idx);
1017 	err = exec_queue_user_ext_check(q, *properties);
1018 	if (XE_IOCTL_DBG(xe, err))
1019 		return err;
1020 
1021 	return exec_queue_set_property_funcs[idx](xe, q, ext.value);
1022 }
1023 
1024 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
1025 					       struct xe_exec_queue *q,
1026 					       u64 extension, u64 *properties);
1027 
1028 static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
1029 	[DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
1030 };
1031 
1032 #define MAX_USER_EXTENSIONS	16
1033 static int __exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
1034 					u64 extensions, int ext_number, u64 *properties)
1035 {
1036 	u64 __user *address = u64_to_user_ptr(extensions);
1037 	struct drm_xe_user_extension ext;
1038 	int err;
1039 	u32 idx;
1040 
1041 	if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
1042 		return -E2BIG;
1043 
1044 	err = copy_from_user(&ext, address, sizeof(ext));
1045 	if (XE_IOCTL_DBG(xe, err))
1046 		return -EFAULT;
1047 
1048 	if (XE_IOCTL_DBG(xe, ext.pad) ||
1049 	    XE_IOCTL_DBG(xe, ext.name >=
1050 			 ARRAY_SIZE(exec_queue_user_extension_funcs)))
1051 		return -EINVAL;
1052 
1053 	idx = array_index_nospec(ext.name,
1054 				 ARRAY_SIZE(exec_queue_user_extension_funcs));
1055 	err = exec_queue_user_extension_funcs[idx](xe, q, extensions, properties);
1056 	if (XE_IOCTL_DBG(xe, err))
1057 		return err;
1058 
1059 	if (ext.next_extension)
1060 		return __exec_queue_user_extensions(xe, q, ext.next_extension,
1061 						    ++ext_number, properties);
1062 
1063 	return 0;
1064 }
1065 
1066 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
1067 				      u64 extensions)
1068 {
1069 	u64 properties = 0;
1070 	int err;
1071 
1072 	err = __exec_queue_user_extensions(xe, q, extensions, 0, &properties);
1073 	if (XE_IOCTL_DBG(xe, err))
1074 		return err;
1075 
1076 	err = exec_queue_user_ext_check_final(q, properties);
1077 	if (XE_IOCTL_DBG(xe, err))
1078 		return err;
1079 
1080 	if (xe_exec_queue_is_multi_queue_primary(q)) {
1081 		err = xe_exec_queue_group_init(xe, q);
1082 		if (XE_IOCTL_DBG(xe, err))
1083 			return err;
1084 	}
1085 
1086 	return 0;
1087 }
1088 
1089 static u32 calc_validate_logical_mask(struct xe_device *xe,
1090 				      struct drm_xe_engine_class_instance *eci,
1091 				      u16 width, u16 num_placements)
1092 {
1093 	int len = width * num_placements;
1094 	int i, j, n;
1095 	u16 class;
1096 	u16 gt_id;
1097 	u32 return_mask = 0, prev_mask;
1098 
1099 	if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
1100 			 len > 1))
1101 		return 0;
1102 
1103 	for (i = 0; i < width; ++i) {
1104 		u32 current_mask = 0;
1105 
1106 		for (j = 0; j < num_placements; ++j) {
1107 			struct xe_hw_engine *hwe;
1108 
1109 			n = j * width + i;
1110 
1111 			hwe = xe_hw_engine_lookup(xe, eci[n]);
1112 			if (XE_IOCTL_DBG(xe, !hwe))
1113 				return 0;
1114 
1115 			if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
1116 				return 0;
1117 
1118 			if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
1119 			    XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
1120 				return 0;
1121 
1122 			class = eci[n].engine_class;
1123 			gt_id = eci[n].gt_id;
1124 
1125 			if (width == 1 || !i)
1126 				return_mask |= BIT(eci[n].engine_instance);
1127 			current_mask |= BIT(eci[n].engine_instance);
1128 		}
1129 
1130 		/* Parallel submissions must be logically contiguous */
1131 		if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
1132 			return 0;
1133 
1134 		prev_mask = current_mask;
1135 	}
1136 
1137 	return return_mask;
1138 }
1139 
1140 static bool has_sched_groups(struct xe_gt *gt)
1141 {
1142 	if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_sriov_pf_sched_groups_enabled(gt))
1143 		return true;
1144 
1145 	if (IS_SRIOV_VF(gt_to_xe(gt)) && xe_gt_sriov_vf_sched_groups_enabled(gt))
1146 		return true;
1147 
1148 	return false;
1149 }
1150 
1151 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
1152 			       struct drm_file *file)
1153 {
1154 	struct xe_device *xe = to_xe_device(dev);
1155 	struct xe_file *xef = to_xe_file(file);
1156 	struct drm_xe_exec_queue_create *args = data;
1157 	struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
1158 	struct drm_xe_engine_class_instance __user *user_eci =
1159 		u64_to_user_ptr(args->instances);
1160 	struct xe_hw_engine *hwe;
1161 	struct xe_vm *vm;
1162 	struct xe_tile *tile;
1163 	struct xe_exec_queue *q = NULL;
1164 	u32 logical_mask;
1165 	u32 flags = 0;
1166 	u32 id;
1167 	u32 len;
1168 	int err;
1169 
1170 	if (XE_IOCTL_DBG(xe, args->flags & ~DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT) ||
1171 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1172 		return -EINVAL;
1173 
1174 	len = args->width * args->num_placements;
1175 	if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
1176 		return -EINVAL;
1177 
1178 	err = copy_from_user(eci, user_eci,
1179 			     sizeof(struct drm_xe_engine_class_instance) * len);
1180 	if (XE_IOCTL_DBG(xe, err))
1181 		return -EFAULT;
1182 
1183 	if (XE_IOCTL_DBG(xe, !xe_device_get_gt(xe, eci[0].gt_id)))
1184 		return -EINVAL;
1185 
1186 	if (args->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT)
1187 		flags |= EXEC_QUEUE_FLAG_LOW_LATENCY;
1188 
1189 	if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
1190 		if (XE_IOCTL_DBG(xe, args->width != 1) ||
1191 		    XE_IOCTL_DBG(xe, args->num_placements != 1) ||
1192 		    XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
1193 			return -EINVAL;
1194 
1195 		vm = xe_vm_lookup(xef, args->vm_id);
1196 		if (XE_IOCTL_DBG(xe, !vm))
1197 			return -ENOENT;
1198 
1199 		err = down_read_interruptible(&vm->lock);
1200 		if (err) {
1201 			xe_vm_put(vm);
1202 			return err;
1203 		}
1204 
1205 		if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
1206 			up_read(&vm->lock);
1207 			xe_vm_put(vm);
1208 			return -ENOENT;
1209 		}
1210 
1211 		for_each_tile(tile, xe, id) {
1212 			struct xe_exec_queue *new;
1213 
1214 			flags |= EXEC_QUEUE_FLAG_VM;
1215 			if (id)
1216 				flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
1217 
1218 			new = xe_exec_queue_create_bind(xe, tile, vm, flags,
1219 							args->extensions);
1220 			if (IS_ERR(new)) {
1221 				up_read(&vm->lock);
1222 				xe_vm_put(vm);
1223 				err = PTR_ERR(new);
1224 				if (q)
1225 					goto put_exec_queue;
1226 				return err;
1227 			}
1228 			if (id == 0)
1229 				q = new;
1230 			else
1231 				list_add_tail(&new->multi_gt_list,
1232 					      &q->multi_gt_link);
1233 		}
1234 		up_read(&vm->lock);
1235 		xe_vm_put(vm);
1236 	} else {
1237 		logical_mask = calc_validate_logical_mask(xe, eci,
1238 							  args->width,
1239 							  args->num_placements);
1240 		if (XE_IOCTL_DBG(xe, !logical_mask))
1241 			return -EINVAL;
1242 
1243 		hwe = xe_hw_engine_lookup(xe, eci[0]);
1244 		if (XE_IOCTL_DBG(xe, !hwe))
1245 			return -EINVAL;
1246 
1247 		/* multi-lrc is only supported on select engine classes */
1248 		if (XE_IOCTL_DBG(xe, args->width > 1 &&
1249 				 !(xe->info.multi_lrc_mask & BIT(hwe->class))))
1250 			return -EOPNOTSUPP;
1251 
1252 		vm = xe_vm_lookup(xef, args->vm_id);
1253 		if (XE_IOCTL_DBG(xe, !vm))
1254 			return -ENOENT;
1255 
1256 		err = down_read_interruptible(&vm->lock);
1257 		if (err) {
1258 			xe_vm_put(vm);
1259 			return err;
1260 		}
1261 
1262 		if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
1263 			up_read(&vm->lock);
1264 			xe_vm_put(vm);
1265 			return -ENOENT;
1266 		}
1267 
1268 		/* SRIOV sched groups are not compatible with multi-lrc */
1269 		if (XE_IOCTL_DBG(xe, args->width > 1 && has_sched_groups(hwe->gt))) {
1270 			up_read(&vm->lock);
1271 			xe_vm_put(vm);
1272 			return -EINVAL;
1273 		}
1274 
1275 		q = xe_exec_queue_create(xe, vm, logical_mask,
1276 					 args->width, hwe, flags,
1277 					 args->extensions);
1278 		up_read(&vm->lock);
1279 		xe_vm_put(vm);
1280 		if (IS_ERR(q))
1281 			return PTR_ERR(q);
1282 
1283 		if (xe_exec_queue_is_multi_queue_secondary(q)) {
1284 			err = xe_exec_queue_group_add(xe, q);
1285 			if (XE_IOCTL_DBG(xe, err))
1286 				goto put_exec_queue;
1287 		}
1288 
1289 		if (xe_vm_in_preempt_fence_mode(vm)) {
1290 			q->lr.context = dma_fence_context_alloc(1);
1291 
1292 			err = xe_vm_add_compute_exec_queue(vm, q);
1293 			if (XE_IOCTL_DBG(xe, err))
1294 				goto delete_queue_group;
1295 		}
1296 
1297 		if (q->vm && q->hwe->hw_engine_group) {
1298 			err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q);
1299 			if (err)
1300 				goto put_exec_queue;
1301 		}
1302 	}
1303 
1304 	q->xef = xe_file_get(xef);
1305 	if (eci[0].engine_class != DRM_XE_ENGINE_CLASS_VM_BIND)
1306 		xe_vm_add_exec_queue(vm, q);
1307 
1308 	/* user id alloc must always be last in ioctl to prevent UAF */
1309 	err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
1310 	if (err)
1311 		goto kill_exec_queue;
1312 
1313 	args->exec_queue_id = id;
1314 
1315 	return 0;
1316 
1317 kill_exec_queue:
1318 	xe_exec_queue_kill(q);
1319 delete_queue_group:
1320 	if (xe_exec_queue_is_multi_queue_secondary(q))
1321 		xe_exec_queue_group_delete(xe, q);
1322 put_exec_queue:
1323 	xe_exec_queue_put(q);
1324 	return err;
1325 }
1326 
1327 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
1328 				     struct drm_file *file)
1329 {
1330 	struct xe_device *xe = to_xe_device(dev);
1331 	struct xe_file *xef = to_xe_file(file);
1332 	struct drm_xe_exec_queue_get_property *args = data;
1333 	struct xe_exec_queue *q;
1334 	int ret;
1335 
1336 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1337 		return -EINVAL;
1338 
1339 	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
1340 	if (XE_IOCTL_DBG(xe, !q))
1341 		return -ENOENT;
1342 
1343 	switch (args->property) {
1344 	case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
1345 		args->value = q->ops->reset_status(q);
1346 		ret = 0;
1347 		break;
1348 	default:
1349 		ret = -EINVAL;
1350 	}
1351 
1352 	xe_exec_queue_put(q);
1353 
1354 	return ret;
1355 }
1356 
1357 /**
1358  * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
1359  * @q: The exec_queue
1360  *
1361  * Return: True if the exec_queue is long-running, false otherwise.
1362  */
1363 bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
1364 {
1365 	return q->vm && xe_vm_in_lr_mode(q->vm) &&
1366 		!(q->flags & EXEC_QUEUE_FLAG_VM);
1367 }
1368 
1369 /**
1370  * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
1371  * @q: The exec_queue
1372  *
1373  * FIXME: Need to determine what to use as the short-lived
1374  * timeline lock for the exec_queues, so that the return value
1375  * of this function becomes more than just an advisory
1376  * snapshot in time. The timeline lock must protect the
1377  * seqno from racing submissions on the same exec_queue.
1378  * Typically vm->resv, but user-created timeline locks use the migrate vm
1379  * and never grabs the migrate vm->resv so we have a race there.
1380  *
1381  * Return: True if the exec_queue is idle, false otherwise.
1382  */
1383 bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
1384 {
1385 	if (xe_exec_queue_is_parallel(q)) {
1386 		int i;
1387 
1388 		for (i = 0; i < q->width; ++i) {
1389 			if (xe_lrc_seqno(q->lrc[i]) !=
1390 			    q->lrc[i]->fence_ctx.next_seqno - 1)
1391 				return false;
1392 		}
1393 
1394 		return true;
1395 	}
1396 
1397 	return xe_lrc_seqno(q->lrc[0]) ==
1398 		q->lrc[0]->fence_ctx.next_seqno - 1;
1399 }
1400 
1401 /**
1402  * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue
1403  * from hw
1404  * @q: The exec queue
1405  *
1406  * Update the timestamp saved by HW for this exec queue and save run ticks
1407  * calculated by using the delta from last update.
1408  */
1409 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
1410 {
1411 	struct xe_device *xe = gt_to_xe(q->gt);
1412 	struct xe_lrc *lrc;
1413 	u64 old_ts, new_ts;
1414 	int idx;
1415 
1416 	/*
1417 	 * Jobs that are executed by kernel doesn't have a corresponding xe_file
1418 	 * and thus are not accounted.
1419 	 */
1420 	if (!q->xef)
1421 		return;
1422 
1423 	/* Synchronize with unbind while holding the xe file open */
1424 	if (!drm_dev_enter(&xe->drm, &idx))
1425 		return;
1426 	/*
1427 	 * Only sample the first LRC. For parallel submission, all of them are
1428 	 * scheduled together and we compensate that below by multiplying by
1429 	 * width - this may introduce errors if that premise is not true and
1430 	 * they don't exit 100% aligned. On the other hand, looping through
1431 	 * the LRCs and reading them in different time could also introduce
1432 	 * errors.
1433 	 */
1434 	lrc = q->lrc[0];
1435 	new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
1436 	q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
1437 
1438 	drm_dev_exit(idx);
1439 }
1440 
1441 /**
1442  * xe_exec_queue_kill - permanently stop all execution from an exec queue
1443  * @q: The exec queue
1444  *
1445  * This function permanently stops all activity on an exec queue. If the queue
1446  * is actively executing on the HW, it will be kicked off the engine; any
1447  * pending jobs are discarded and all future submissions are rejected.
1448  * This function is safe to call multiple times.
1449  */
1450 void xe_exec_queue_kill(struct xe_exec_queue *q)
1451 {
1452 	struct xe_exec_queue *eq = q, *next;
1453 
1454 	list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
1455 				 multi_gt_link) {
1456 		q->ops->kill(eq);
1457 		xe_vm_remove_compute_exec_queue(q->vm, eq);
1458 	}
1459 
1460 	q->ops->kill(q);
1461 	xe_vm_remove_compute_exec_queue(q->vm, q);
1462 }
1463 
1464 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
1465 				struct drm_file *file)
1466 {
1467 	struct xe_device *xe = to_xe_device(dev);
1468 	struct xe_file *xef = to_xe_file(file);
1469 	struct drm_xe_exec_queue_destroy *args = data;
1470 	struct xe_exec_queue *q;
1471 
1472 	if (XE_IOCTL_DBG(xe, args->pad) ||
1473 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1474 		return -EINVAL;
1475 
1476 	mutex_lock(&xef->exec_queue.lock);
1477 	q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
1478 	if (q)
1479 		atomic_inc(&xef->exec_queue.pending_removal);
1480 	mutex_unlock(&xef->exec_queue.lock);
1481 
1482 	if (XE_IOCTL_DBG(xe, !q))
1483 		return -ENOENT;
1484 
1485 	if (q->vm && q->hwe->hw_engine_group)
1486 		xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
1487 
1488 	xe_exec_queue_kill(q);
1489 
1490 	trace_xe_exec_queue_close(q);
1491 	xe_exec_queue_put(q);
1492 
1493 	return 0;
1494 }
1495 
1496 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
1497 						    struct xe_vm *vm)
1498 {
1499 	if (q->flags & EXEC_QUEUE_FLAG_MIGRATE) {
1500 		xe_migrate_job_lock_assert(q);
1501 	} else if (q->flags & EXEC_QUEUE_FLAG_VM) {
1502 		lockdep_assert_held(&vm->lock);
1503 	} else {
1504 		xe_vm_assert_held(vm);
1505 		lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
1506 	}
1507 }
1508 
1509 /**
1510  * xe_exec_queue_last_fence_put() - Drop ref to last fence
1511  * @q: The exec queue
1512  * @vm: The VM the engine does a bind or exec for
1513  */
1514 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
1515 {
1516 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1517 
1518 	xe_exec_queue_last_fence_put_unlocked(q);
1519 }
1520 
1521 /**
1522  * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
1523  * @q: The exec queue
1524  *
1525  * Only safe to be called from xe_exec_queue_destroy().
1526  */
1527 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
1528 {
1529 	if (q->last_fence) {
1530 		dma_fence_put(q->last_fence);
1531 		q->last_fence = NULL;
1532 	}
1533 }
1534 
1535 /**
1536  * xe_exec_queue_last_fence_get() - Get last fence
1537  * @q: The exec queue
1538  * @vm: The VM the engine does a bind or exec for
1539  *
1540  * Get last fence, takes a ref
1541  *
1542  * Returns: last fence if not signaled, dma fence stub if signaled
1543  */
1544 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
1545 					       struct xe_vm *vm)
1546 {
1547 	struct dma_fence *fence;
1548 
1549 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1550 
1551 	if (q->last_fence &&
1552 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
1553 		xe_exec_queue_last_fence_put(q, vm);
1554 
1555 	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
1556 	dma_fence_get(fence);
1557 	return fence;
1558 }
1559 
1560 /**
1561  * xe_exec_queue_last_fence_get_for_resume() - Get last fence
1562  * @q: The exec queue
1563  * @vm: The VM the engine does a bind or exec for
1564  *
1565  * Get last fence, takes a ref. Only safe to be called in the context of
1566  * resuming the hw engine group's long-running exec queue, when the group
1567  * semaphore is held.
1568  *
1569  * Returns: last fence if not signaled, dma fence stub if signaled
1570  */
1571 struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
1572 							  struct xe_vm *vm)
1573 {
1574 	struct dma_fence *fence;
1575 
1576 	lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
1577 
1578 	if (q->last_fence &&
1579 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
1580 		xe_exec_queue_last_fence_put_unlocked(q);
1581 
1582 	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
1583 	dma_fence_get(fence);
1584 	return fence;
1585 }
1586 
1587 /**
1588  * xe_exec_queue_last_fence_set() - Set last fence
1589  * @q: The exec queue
1590  * @vm: The VM the engine does a bind or exec for
1591  * @fence: The fence
1592  *
1593  * Set the last fence for the engine. Increases reference count for fence, when
1594  * closing engine xe_exec_queue_last_fence_put should be called.
1595  */
1596 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
1597 				  struct dma_fence *fence)
1598 {
1599 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1600 	xe_assert(vm->xe, !dma_fence_is_container(fence));
1601 
1602 	xe_exec_queue_last_fence_put(q, vm);
1603 	q->last_fence = dma_fence_get(fence);
1604 }
1605 
1606 /**
1607  * xe_exec_queue_tlb_inval_last_fence_put() - Drop ref to last TLB invalidation fence
1608  * @q: The exec queue
1609  * @vm: The VM the engine does a bind for
1610  * @type: Either primary or media GT
1611  */
1612 void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q,
1613 					    struct xe_vm *vm,
1614 					    unsigned int type)
1615 {
1616 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1617 	xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
1618 		  type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
1619 
1620 	xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, type);
1621 }
1622 
1623 /**
1624  * xe_exec_queue_tlb_inval_last_fence_put_unlocked() - Drop ref to last TLB
1625  * invalidation fence unlocked
1626  * @q: The exec queue
1627  * @type: Either primary or media GT
1628  *
1629  * Only safe to be called from xe_exec_queue_destroy().
1630  */
1631 void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q,
1632 						     unsigned int type)
1633 {
1634 	xe_assert(q->vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
1635 		  type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
1636 
1637 	dma_fence_put(q->tlb_inval[type].last_fence);
1638 	q->tlb_inval[type].last_fence = NULL;
1639 }
1640 
1641 /**
1642  * xe_exec_queue_tlb_inval_last_fence_get() - Get last fence for TLB invalidation
1643  * @q: The exec queue
1644  * @vm: The VM the engine does a bind for
1645  * @type: Either primary or media GT
1646  *
1647  * Get last fence, takes a ref
1648  *
1649  * Returns: last fence if not signaled, dma fence stub if signaled
1650  */
1651 struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q,
1652 							 struct xe_vm *vm,
1653 							 unsigned int type)
1654 {
1655 	struct dma_fence *fence;
1656 
1657 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1658 	xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
1659 		  type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
1660 	xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM |
1661 				      EXEC_QUEUE_FLAG_MIGRATE));
1662 
1663 	if (q->tlb_inval[type].last_fence &&
1664 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1665 		     &q->tlb_inval[type].last_fence->flags))
1666 		xe_exec_queue_tlb_inval_last_fence_put(q, vm, type);
1667 
1668 	fence = q->tlb_inval[type].last_fence ?: dma_fence_get_stub();
1669 	dma_fence_get(fence);
1670 	return fence;
1671 }
1672 
1673 /**
1674  * xe_exec_queue_tlb_inval_last_fence_set() - Set last fence for TLB invalidation
1675  * @q: The exec queue
1676  * @vm: The VM the engine does a bind for
1677  * @fence: The fence
1678  * @type: Either primary or media GT
1679  *
1680  * Set the last fence for the tlb invalidation type on the queue. Increases
1681  * reference count for fence, when closing queue
1682  * xe_exec_queue_tlb_inval_last_fence_put should be called.
1683  */
1684 void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q,
1685 					    struct xe_vm *vm,
1686 					    struct dma_fence *fence,
1687 					    unsigned int type)
1688 {
1689 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1690 	xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
1691 		  type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
1692 	xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM |
1693 				      EXEC_QUEUE_FLAG_MIGRATE));
1694 	xe_assert(vm->xe, !dma_fence_is_container(fence));
1695 
1696 	xe_exec_queue_tlb_inval_last_fence_put(q, vm, type);
1697 	q->tlb_inval[type].last_fence = dma_fence_get(fence);
1698 }
1699 
1700 /**
1701  * xe_exec_queue_contexts_hwsp_rebase - Re-compute GGTT references
1702  * within all LRCs of a queue.
1703  * @q: the &xe_exec_queue struct instance containing target LRCs
1704  * @scratch: scratch buffer to be used as temporary storage
1705  *
1706  * Returns: zero on success, negative error code on failure
1707  */
1708 int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch)
1709 {
1710 	int i;
1711 	int err = 0;
1712 
1713 	for (i = 0; i < q->width; ++i) {
1714 		struct xe_lrc *lrc;
1715 
1716 		lrc = xe_exec_queue_get_lrc(q, i);
1717 		if (!lrc)
1718 			continue;
1719 
1720 		xe_lrc_update_memirq_regs_with_address(lrc, q->hwe, scratch);
1721 		xe_lrc_update_hwctx_regs_with_address(lrc);
1722 		err = xe_lrc_setup_wa_bb_with_scratch(lrc, q->hwe, scratch);
1723 		xe_lrc_put(lrc);
1724 		if (err)
1725 			break;
1726 	}
1727 
1728 	return err;
1729 }
1730