xref: /linux/drivers/gpu/drm/xe/xe_exec_queue.c (revision 40a684f91d267164f9adf0d35b572b4cad0b8d3c)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_exec_queue.h"
7 
8 #include <linux/nospec.h>
9 
10 #include <drm/drm_device.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_file.h>
13 #include <drm/drm_syncobj.h>
14 #include <uapi/drm/xe_drm.h>
15 
16 #include "xe_bo.h"
17 #include "xe_dep_scheduler.h"
18 #include "xe_device.h"
19 #include "xe_gt.h"
20 #include "xe_gt_sriov_pf.h"
21 #include "xe_gt_sriov_vf.h"
22 #include "xe_hw_engine_class_sysfs.h"
23 #include "xe_hw_engine_group.h"
24 #include "xe_irq.h"
25 #include "xe_lrc.h"
26 #include "xe_macros.h"
27 #include "xe_migrate.h"
28 #include "xe_pm.h"
29 #include "xe_trace.h"
30 #include "xe_vm.h"
31 #include "xe_pxp.h"
32 
33 /**
34  * DOC: Execution Queue
35  *
36  * An Execution queue is an interface for the HW context of execution.
37  * The user creates an execution queue, submits the GPU jobs through those
38  * queues and in the end destroys them.
39  *
40  * Execution queues can also be created by XeKMD itself for driver internal
41  * operations like object migration etc.
42  *
43  * An execution queue is associated with a specified HW engine or a group of
44  * engines (belonging to the same tile and engine class) and any GPU job
45  * submitted on the queue will be run on one of these engines.
46  *
47  * An execution queue is tied to an address space (VM). It holds a reference
48  * of the associated VM and the underlying Logical Ring Context/s (LRC/s)
49  * until the queue is destroyed.
50  *
51  * The execution queue sits on top of the submission backend. It opaquely
52  * handles the GuC and Execlist backends whichever the platform uses, and
53  * the ring operations the different engine classes support.
54  */
55 
56 /**
57  * DOC: Multi Queue Group
58  *
59  * Multi Queue Group is another mode of execution supported by the compute
60  * and blitter copy command streamers (CCS and BCS, respectively). It is
61  * an enhancement of the existing hardware architecture and leverages the
62  * same submission model. It enables support for efficient, parallel
63  * execution of multiple queues within a single shared context. The multi
64  * queue group functionality is only supported with GuC submission backend.
65  * All the queues of a group must use the same address space (VM).
66  *
67  * The DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE execution queue property
68  * supports creating a multi queue group and adding queues to a queue group.
69  *
70  * The XE_EXEC_QUEUE_CREATE ioctl call with above property with value field
71  * set to DRM_XE_MULTI_GROUP_CREATE, will create a new multi queue group with
72  * the queue being created as the primary queue (aka q0) of the group. To add
73  * secondary queues to the group, they need to be created with the above
74  * property with id of the primary queue as the value. The properties of
75  * the primary queue (like priority, time slice) applies to the whole group.
76  * So, these properties can't be set for secondary queues of a group.
77  *
78  * The hardware does not support removing a queue from a multi-queue group.
79  * However, queues can be dynamically added to the group. A group can have
80  * up to 64 queues. To support this, XeKMD holds references to LRCs of the
81  * queues even after the queues are destroyed by the user until the whole
82  * group is destroyed. The secondary queues hold a reference to the primary
83  * queue thus preventing the group from being destroyed when user destroys
84  * the primary queue. Once the primary queue is destroyed, secondary queues
85  * can't be added to the queue group and new job submissions on existing
86  * secondary queues are not allowed.
87  *
88  * The queues of a multi queue group can set their priority within the group
89  * through the DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY property.
90  * This multi queue priority can also be set dynamically through the
91  * XE_EXEC_QUEUE_SET_PROPERTY ioctl. This is the only other property
92  * supported by the secondary queues of a multi queue group, other than
93  * DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE.
94  *
95  * When GuC reports an error on any of the queues of a multi queue group,
96  * the queue cleanup mechanism is invoked for all the queues of the group
97  * as hardware cannot make progress on the multi queue context.
98  *
99  * Refer :ref:`multi-queue-group-guc-interface` for multi queue group GuC
100  * interface.
101  */
102 
103 enum xe_exec_queue_sched_prop {
104 	XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
105 	XE_EXEC_QUEUE_TIMESLICE = 1,
106 	XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
107 	XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
108 };
109 
110 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
111 				      u64 extensions);
112 
113 static void xe_exec_queue_group_cleanup(struct xe_exec_queue *q)
114 {
115 	struct xe_exec_queue_group *group = q->multi_queue.group;
116 	struct xe_lrc *lrc;
117 	unsigned long idx;
118 
119 	if (xe_exec_queue_is_multi_queue_secondary(q)) {
120 		/*
121 		 * Put pairs with get from xe_exec_queue_lookup() call
122 		 * in xe_exec_queue_group_validate().
123 		 */
124 		xe_exec_queue_put(xe_exec_queue_multi_queue_primary(q));
125 		return;
126 	}
127 
128 	if (!group)
129 		return;
130 
131 	/* Primary queue cleanup */
132 	xa_for_each(&group->xa, idx, lrc)
133 		xe_lrc_put(lrc);
134 
135 	xa_destroy(&group->xa);
136 	mutex_destroy(&group->list_lock);
137 	xe_bo_unpin_map_no_vm(group->cgp_bo);
138 	kfree(group);
139 }
140 
141 static void __xe_exec_queue_free(struct xe_exec_queue *q)
142 {
143 	int i;
144 
145 	for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i)
146 		if (q->tlb_inval[i].dep_scheduler)
147 			xe_dep_scheduler_fini(q->tlb_inval[i].dep_scheduler);
148 
149 	if (xe_exec_queue_uses_pxp(q))
150 		xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
151 
152 	if (xe_exec_queue_is_multi_queue(q))
153 		xe_exec_queue_group_cleanup(q);
154 
155 	if (q->vm) {
156 		xe_vm_remove_exec_queue(q->vm, q);
157 		xe_vm_put(q->vm);
158 	}
159 
160 	if (q->xef)
161 		xe_file_put(q->xef);
162 
163 	kvfree(q->replay_state);
164 	kfree(q);
165 }
166 
167 static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q)
168 {
169 	struct xe_tile *tile = gt_to_tile(q->gt);
170 	int i;
171 
172 	for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i) {
173 		struct xe_dep_scheduler *dep_scheduler;
174 		struct xe_gt *gt;
175 		struct workqueue_struct *wq;
176 
177 		if (i == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT)
178 			gt = tile->primary_gt;
179 		else
180 			gt = tile->media_gt;
181 
182 		if (!gt)
183 			continue;
184 
185 		wq = gt->tlb_inval.job_wq;
186 
187 #define MAX_TLB_INVAL_JOBS	16	/* Picking a reasonable value */
188 		dep_scheduler = xe_dep_scheduler_create(xe, wq, q->name,
189 							MAX_TLB_INVAL_JOBS);
190 		if (IS_ERR(dep_scheduler))
191 			return PTR_ERR(dep_scheduler);
192 
193 		q->tlb_inval[i].dep_scheduler = dep_scheduler;
194 	}
195 #undef MAX_TLB_INVAL_JOBS
196 
197 	return 0;
198 }
199 
200 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
201 						   struct xe_vm *vm,
202 						   u32 logical_mask,
203 						   u16 width, struct xe_hw_engine *hwe,
204 						   u32 flags, u64 extensions)
205 {
206 	struct xe_exec_queue *q;
207 	struct xe_gt *gt = hwe->gt;
208 	int err;
209 
210 	/* only kernel queues can be permanent */
211 	XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
212 
213 	q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL);
214 	if (!q)
215 		return ERR_PTR(-ENOMEM);
216 
217 	kref_init(&q->refcount);
218 	q->flags = flags;
219 	q->hwe = hwe;
220 	q->gt = gt;
221 	q->class = hwe->class;
222 	q->width = width;
223 	q->msix_vec = XE_IRQ_DEFAULT_MSIX;
224 	q->logical_mask = logical_mask;
225 	q->fence_irq = &gt->fence_irq[hwe->class];
226 	q->ring_ops = gt->ring_ops[hwe->class];
227 	q->ops = gt->exec_queue_ops;
228 	INIT_LIST_HEAD(&q->lr.link);
229 	INIT_LIST_HEAD(&q->vm_exec_queue_link);
230 	INIT_LIST_HEAD(&q->multi_gt_link);
231 	INIT_LIST_HEAD(&q->hw_engine_group_link);
232 	INIT_LIST_HEAD(&q->pxp.link);
233 	spin_lock_init(&q->multi_queue.lock);
234 	q->multi_queue.priority = XE_MULTI_QUEUE_PRIORITY_NORMAL;
235 
236 	q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
237 	q->sched_props.preempt_timeout_us =
238 				hwe->eclass->sched_props.preempt_timeout_us;
239 	q->sched_props.job_timeout_ms =
240 				hwe->eclass->sched_props.job_timeout_ms;
241 	if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
242 	    q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
243 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
244 	else
245 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
246 
247 	if (q->flags & (EXEC_QUEUE_FLAG_MIGRATE | EXEC_QUEUE_FLAG_VM)) {
248 		err = alloc_dep_schedulers(xe, q);
249 		if (err) {
250 			__xe_exec_queue_free(q);
251 			return ERR_PTR(err);
252 		}
253 	}
254 
255 	if (vm)
256 		q->vm = xe_vm_get(vm);
257 
258 	if (extensions) {
259 		/*
260 		 * may set q->usm, must come before xe_lrc_create(),
261 		 * may overwrite q->sched_props, must come before q->ops->init()
262 		 */
263 		err = exec_queue_user_extensions(xe, q, extensions);
264 		if (err) {
265 			__xe_exec_queue_free(q);
266 			return ERR_PTR(err);
267 		}
268 	}
269 
270 	return q;
271 }
272 
273 static int __xe_exec_queue_init(struct xe_exec_queue *q, u32 exec_queue_flags)
274 {
275 	int i, err;
276 	u32 flags = 0;
277 
278 	/*
279 	 * PXP workloads executing on RCS or CCS must run in isolation (i.e. no
280 	 * other workload can use the EUs at the same time). On MTL this is done
281 	 * by setting the RUNALONE bit in the LRC, while starting on Xe2 there
282 	 * is a dedicated bit for it.
283 	 */
284 	if (xe_exec_queue_uses_pxp(q) &&
285 	    (q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) {
286 		if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20)
287 			flags |= XE_LRC_CREATE_PXP;
288 		else
289 			flags |= XE_LRC_CREATE_RUNALONE;
290 	}
291 
292 	if (!(exec_queue_flags & EXEC_QUEUE_FLAG_KERNEL))
293 		flags |= XE_LRC_CREATE_USER_CTX;
294 
295 	err = q->ops->init(q);
296 	if (err)
297 		return err;
298 
299 	/*
300 	 * This must occur after q->ops->init to avoid race conditions during VF
301 	 * post-migration recovery, as the fixups for the LRC GGTT addresses
302 	 * depend on the queue being present in the backend tracking structure.
303 	 *
304 	 * In addition to above, we must wait on inflight GGTT changes to avoid
305 	 * writing out stale values here. Such wait provides a solid solution
306 	 * (without a race) only if the function can detect migration instantly
307 	 * from the moment vCPU resumes execution.
308 	 */
309 	for (i = 0; i < q->width; ++i) {
310 		struct xe_lrc *lrc;
311 
312 		xe_gt_sriov_vf_wait_valid_ggtt(q->gt);
313 		lrc = xe_lrc_create(q->hwe, q->vm, q->replay_state,
314 				    xe_lrc_ring_size(), q->msix_vec, flags);
315 		if (IS_ERR(lrc)) {
316 			err = PTR_ERR(lrc);
317 			goto err_lrc;
318 		}
319 
320 		/* Pairs with READ_ONCE to xe_exec_queue_contexts_hwsp_rebase */
321 		WRITE_ONCE(q->lrc[i], lrc);
322 	}
323 
324 	return 0;
325 
326 err_lrc:
327 	for (i = i - 1; i >= 0; --i)
328 		xe_lrc_put(q->lrc[i]);
329 	return err;
330 }
331 
332 static void __xe_exec_queue_fini(struct xe_exec_queue *q)
333 {
334 	int i;
335 
336 	q->ops->fini(q);
337 
338 	for (i = 0; i < q->width; ++i)
339 		xe_lrc_put(q->lrc[i]);
340 }
341 
342 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
343 					   u32 logical_mask, u16 width,
344 					   struct xe_hw_engine *hwe, u32 flags,
345 					   u64 extensions)
346 {
347 	struct xe_exec_queue *q;
348 	int err;
349 
350 	/* VMs for GSCCS queues (and only those) must have the XE_VM_FLAG_GSC flag */
351 	xe_assert(xe, !vm || (!!(vm->flags & XE_VM_FLAG_GSC) == !!(hwe->engine_id == XE_HW_ENGINE_GSCCS0)));
352 
353 	q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
354 				  extensions);
355 	if (IS_ERR(q))
356 		return q;
357 
358 	err = __xe_exec_queue_init(q, flags);
359 	if (err)
360 		goto err_post_alloc;
361 
362 	/*
363 	 * We can only add the queue to the PXP list after the init is complete,
364 	 * because the PXP termination can call exec_queue_kill and that will
365 	 * go bad if the queue is only half-initialized. This means that we
366 	 * can't do it when we handle the PXP extension in __xe_exec_queue_alloc
367 	 * and we need to do it here instead.
368 	 */
369 	if (xe_exec_queue_uses_pxp(q)) {
370 		err = xe_pxp_exec_queue_add(xe->pxp, q);
371 		if (err)
372 			goto err_post_init;
373 	}
374 
375 	return q;
376 
377 err_post_init:
378 	__xe_exec_queue_fini(q);
379 err_post_alloc:
380 	__xe_exec_queue_free(q);
381 	return ERR_PTR(err);
382 }
383 ALLOW_ERROR_INJECTION(xe_exec_queue_create, ERRNO);
384 
385 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
386 						 struct xe_vm *vm,
387 						 enum xe_engine_class class,
388 						 u32 flags, u64 extensions)
389 {
390 	struct xe_hw_engine *hwe, *hwe0 = NULL;
391 	enum xe_hw_engine_id id;
392 	u32 logical_mask = 0;
393 
394 	for_each_hw_engine(hwe, gt, id) {
395 		if (xe_hw_engine_is_reserved(hwe))
396 			continue;
397 
398 		if (hwe->class == class) {
399 			logical_mask |= BIT(hwe->logical_instance);
400 			if (!hwe0)
401 				hwe0 = hwe;
402 		}
403 	}
404 
405 	if (!logical_mask)
406 		return ERR_PTR(-ENODEV);
407 
408 	return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions);
409 }
410 
411 /**
412  * xe_exec_queue_create_bind() - Create bind exec queue.
413  * @xe: Xe device.
414  * @tile: tile which bind exec queue belongs to.
415  * @flags: exec queue creation flags
416  * @user_vm: The user VM which this exec queue belongs to
417  * @extensions: exec queue creation extensions
418  *
419  * Normalize bind exec queue creation. Bind exec queue is tied to migration VM
420  * for access to physical memory required for page table programming. On a
421  * faulting devices the reserved copy engine instance must be used to avoid
422  * deadlocking (user binds cannot get stuck behind faults as kernel binds which
423  * resolve faults depend on user binds). On non-faulting devices any copy engine
424  * can be used.
425  *
426  * Returns exec queue on success, ERR_PTR on failure
427  */
428 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
429 						struct xe_tile *tile,
430 						struct xe_vm *user_vm,
431 						u32 flags, u64 extensions)
432 {
433 	struct xe_gt *gt = tile->primary_gt;
434 	struct xe_exec_queue *q;
435 	struct xe_vm *migrate_vm;
436 
437 	migrate_vm = xe_migrate_get_vm(tile->migrate);
438 	if (xe->info.has_usm) {
439 		struct xe_hw_engine *hwe = xe_gt_hw_engine(gt,
440 							   XE_ENGINE_CLASS_COPY,
441 							   gt->usm.reserved_bcs_instance,
442 							   false);
443 
444 		if (!hwe) {
445 			xe_vm_put(migrate_vm);
446 			return ERR_PTR(-EINVAL);
447 		}
448 
449 		q = xe_exec_queue_create(xe, migrate_vm,
450 					 BIT(hwe->logical_instance), 1, hwe,
451 					 flags, extensions);
452 	} else {
453 		q = xe_exec_queue_create_class(xe, gt, migrate_vm,
454 					       XE_ENGINE_CLASS_COPY, flags,
455 					       extensions);
456 	}
457 	xe_vm_put(migrate_vm);
458 
459 	if (!IS_ERR(q)) {
460 		int err = drm_syncobj_create(&q->ufence_syncobj,
461 					     DRM_SYNCOBJ_CREATE_SIGNALED,
462 					     NULL);
463 		if (err) {
464 			xe_exec_queue_put(q);
465 			return ERR_PTR(err);
466 		}
467 
468 		if (user_vm)
469 			q->user_vm = xe_vm_get(user_vm);
470 	}
471 
472 	return q;
473 }
474 ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
475 
476 void xe_exec_queue_destroy(struct kref *ref)
477 {
478 	struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
479 	struct xe_exec_queue *eq, *next;
480 	int i;
481 
482 	xe_assert(gt_to_xe(q->gt), atomic_read(&q->job_cnt) == 0);
483 
484 	if (q->ufence_syncobj)
485 		drm_syncobj_put(q->ufence_syncobj);
486 
487 	if (xe_exec_queue_uses_pxp(q))
488 		xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
489 
490 	xe_exec_queue_last_fence_put_unlocked(q);
491 	for_each_tlb_inval(i)
492 		xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, i);
493 
494 	if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
495 		list_for_each_entry_safe(eq, next, &q->multi_gt_list,
496 					 multi_gt_link)
497 			xe_exec_queue_put(eq);
498 	}
499 
500 	if (q->user_vm) {
501 		xe_vm_put(q->user_vm);
502 		q->user_vm = NULL;
503 	}
504 
505 	q->ops->destroy(q);
506 }
507 
508 void xe_exec_queue_fini(struct xe_exec_queue *q)
509 {
510 	/*
511 	 * Before releasing our ref to lrc and xef, accumulate our run ticks
512 	 * and wakeup any waiters.
513 	 */
514 	xe_exec_queue_update_run_ticks(q);
515 	if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
516 		wake_up_var(&q->xef->exec_queue.pending_removal);
517 
518 	__xe_exec_queue_fini(q);
519 	__xe_exec_queue_free(q);
520 }
521 
522 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
523 {
524 	switch (q->class) {
525 	case XE_ENGINE_CLASS_RENDER:
526 		snprintf(q->name, sizeof(q->name), "rcs%d", instance);
527 		break;
528 	case XE_ENGINE_CLASS_VIDEO_DECODE:
529 		snprintf(q->name, sizeof(q->name), "vcs%d", instance);
530 		break;
531 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
532 		snprintf(q->name, sizeof(q->name), "vecs%d", instance);
533 		break;
534 	case XE_ENGINE_CLASS_COPY:
535 		snprintf(q->name, sizeof(q->name), "bcs%d", instance);
536 		break;
537 	case XE_ENGINE_CLASS_COMPUTE:
538 		snprintf(q->name, sizeof(q->name), "ccs%d", instance);
539 		break;
540 	case XE_ENGINE_CLASS_OTHER:
541 		snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
542 		break;
543 	default:
544 		XE_WARN_ON(q->class);
545 	}
546 }
547 
548 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
549 {
550 	struct xe_exec_queue *q;
551 
552 	mutex_lock(&xef->exec_queue.lock);
553 	q = xa_load(&xef->exec_queue.xa, id);
554 	if (q)
555 		xe_exec_queue_get(q);
556 	mutex_unlock(&xef->exec_queue.lock);
557 
558 	return q;
559 }
560 
561 enum xe_exec_queue_priority
562 xe_exec_queue_device_get_max_priority(struct xe_device *xe)
563 {
564 	return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
565 				       XE_EXEC_QUEUE_PRIORITY_NORMAL;
566 }
567 
568 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
569 				   u64 value)
570 {
571 	if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
572 		return -EINVAL;
573 
574 	if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
575 		return -EPERM;
576 
577 	q->sched_props.priority = value;
578 	return 0;
579 }
580 
581 static bool xe_exec_queue_enforce_schedule_limit(void)
582 {
583 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
584 	return true;
585 #else
586 	return !capable(CAP_SYS_NICE);
587 #endif
588 }
589 
590 static void
591 xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
592 			      enum xe_exec_queue_sched_prop prop,
593 			      u32 *min, u32 *max)
594 {
595 	switch (prop) {
596 	case XE_EXEC_QUEUE_JOB_TIMEOUT:
597 		*min = eclass->sched_props.job_timeout_min;
598 		*max = eclass->sched_props.job_timeout_max;
599 		break;
600 	case XE_EXEC_QUEUE_TIMESLICE:
601 		*min = eclass->sched_props.timeslice_min;
602 		*max = eclass->sched_props.timeslice_max;
603 		break;
604 	case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
605 		*min = eclass->sched_props.preempt_timeout_min;
606 		*max = eclass->sched_props.preempt_timeout_max;
607 		break;
608 	default:
609 		break;
610 	}
611 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
612 	if (capable(CAP_SYS_NICE)) {
613 		switch (prop) {
614 		case XE_EXEC_QUEUE_JOB_TIMEOUT:
615 			*min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
616 			*max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
617 			break;
618 		case XE_EXEC_QUEUE_TIMESLICE:
619 			*min = XE_HW_ENGINE_TIMESLICE_MIN;
620 			*max = XE_HW_ENGINE_TIMESLICE_MAX;
621 			break;
622 		case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
623 			*min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
624 			*max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
625 			break;
626 		default:
627 			break;
628 		}
629 	}
630 #endif
631 }
632 
633 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
634 				    u64 value)
635 {
636 	u32 min = 0, max = 0;
637 
638 	xe_exec_queue_get_prop_minmax(q->hwe->eclass,
639 				      XE_EXEC_QUEUE_TIMESLICE, &min, &max);
640 
641 	if (xe_exec_queue_enforce_schedule_limit() &&
642 	    !xe_hw_engine_timeout_in_range(value, min, max))
643 		return -EINVAL;
644 
645 	q->sched_props.timeslice_us = value;
646 	return 0;
647 }
648 
649 static int
650 exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value)
651 {
652 	if (value == DRM_XE_PXP_TYPE_NONE)
653 		return 0;
654 
655 	/* we only support HWDRM sessions right now */
656 	if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
657 		return -EINVAL;
658 
659 	if (!xe_pxp_is_enabled(xe->pxp))
660 		return -ENODEV;
661 
662 	return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM);
663 }
664 
665 static int exec_queue_set_hang_replay_state(struct xe_device *xe,
666 					    struct xe_exec_queue *q,
667 					    u64 value)
668 {
669 	size_t size = xe_gt_lrc_hang_replay_size(q->gt, q->class);
670 	u64 __user *address = u64_to_user_ptr(value);
671 	void *ptr;
672 
673 	ptr = vmemdup_user(address, size);
674 	if (XE_IOCTL_DBG(xe, IS_ERR(ptr)))
675 		return PTR_ERR(ptr);
676 
677 	q->replay_state = ptr;
678 
679 	return 0;
680 }
681 
682 static int xe_exec_queue_group_init(struct xe_device *xe, struct xe_exec_queue *q)
683 {
684 	struct xe_tile *tile = gt_to_tile(q->gt);
685 	struct xe_exec_queue_group *group;
686 	struct xe_bo *bo;
687 
688 	group = kzalloc(sizeof(*group), GFP_KERNEL);
689 	if (!group)
690 		return -ENOMEM;
691 
692 	bo = xe_bo_create_pin_map_novm(xe, tile, SZ_4K, ttm_bo_type_kernel,
693 				       XE_BO_FLAG_VRAM_IF_DGFX(tile) |
694 				       XE_BO_FLAG_PINNED_LATE_RESTORE |
695 				       XE_BO_FLAG_FORCE_USER_VRAM |
696 				       XE_BO_FLAG_GGTT_INVALIDATE |
697 				       XE_BO_FLAG_GGTT, false);
698 	if (IS_ERR(bo)) {
699 		drm_err(&xe->drm, "CGP bo allocation for queue group failed: %ld\n",
700 			PTR_ERR(bo));
701 		kfree(group);
702 		return PTR_ERR(bo);
703 	}
704 
705 	xe_map_memset(xe, &bo->vmap, 0, 0, SZ_4K);
706 
707 	group->primary = q;
708 	group->cgp_bo = bo;
709 	INIT_LIST_HEAD(&group->list);
710 	xa_init_flags(&group->xa, XA_FLAGS_ALLOC1);
711 	mutex_init(&group->list_lock);
712 	q->multi_queue.group = group;
713 
714 	/* group->list_lock is used in submission backend */
715 	if (IS_ENABLED(CONFIG_LOCKDEP)) {
716 		fs_reclaim_acquire(GFP_KERNEL);
717 		might_lock(&group->list_lock);
718 		fs_reclaim_release(GFP_KERNEL);
719 	}
720 
721 	return 0;
722 }
723 
724 static inline bool xe_exec_queue_supports_multi_queue(struct xe_exec_queue *q)
725 {
726 	return q->gt->info.multi_queue_engine_class_mask & BIT(q->class);
727 }
728 
729 static int xe_exec_queue_group_validate(struct xe_device *xe, struct xe_exec_queue *q,
730 					u32 primary_id)
731 {
732 	struct xe_exec_queue_group *group;
733 	struct xe_exec_queue *primary;
734 	int ret;
735 
736 	/*
737 	 * Get from below xe_exec_queue_lookup() pairs with put
738 	 * in xe_exec_queue_group_cleanup().
739 	 */
740 	primary = xe_exec_queue_lookup(q->vm->xef, primary_id);
741 	if (XE_IOCTL_DBG(xe, !primary))
742 		return -ENOENT;
743 
744 	if (XE_IOCTL_DBG(xe, !xe_exec_queue_is_multi_queue_primary(primary)) ||
745 	    XE_IOCTL_DBG(xe, q->vm != primary->vm) ||
746 	    XE_IOCTL_DBG(xe, q->logical_mask != primary->logical_mask)) {
747 		ret = -EINVAL;
748 		goto put_primary;
749 	}
750 
751 	group = primary->multi_queue.group;
752 	q->multi_queue.valid = true;
753 	q->multi_queue.group = group;
754 
755 	return 0;
756 put_primary:
757 	xe_exec_queue_put(primary);
758 	return ret;
759 }
760 
761 #define XE_MAX_GROUP_SIZE	64
762 static int xe_exec_queue_group_add(struct xe_device *xe, struct xe_exec_queue *q)
763 {
764 	struct xe_exec_queue_group *group = q->multi_queue.group;
765 	u32 pos;
766 	int err;
767 
768 	xe_assert(xe, xe_exec_queue_is_multi_queue_secondary(q));
769 
770 	/* Primary queue holds a reference to LRCs of all secondary queues */
771 	err = xa_alloc(&group->xa, &pos, xe_lrc_get(q->lrc[0]),
772 		       XA_LIMIT(1, XE_MAX_GROUP_SIZE - 1), GFP_KERNEL);
773 	if (XE_IOCTL_DBG(xe, err)) {
774 		xe_lrc_put(q->lrc[0]);
775 
776 		/* It is invalid if queue group limit is exceeded */
777 		if (err == -EBUSY)
778 			err = -EINVAL;
779 
780 		return err;
781 	}
782 
783 	q->multi_queue.pos = pos;
784 
785 	return 0;
786 }
787 
788 static void xe_exec_queue_group_delete(struct xe_device *xe, struct xe_exec_queue *q)
789 {
790 	struct xe_exec_queue_group *group = q->multi_queue.group;
791 	struct xe_lrc *lrc;
792 
793 	xe_assert(xe, xe_exec_queue_is_multi_queue_secondary(q));
794 
795 	lrc = xa_erase(&group->xa, q->multi_queue.pos);
796 	xe_assert(xe, lrc);
797 	xe_lrc_put(lrc);
798 }
799 
800 static int exec_queue_set_multi_group(struct xe_device *xe, struct xe_exec_queue *q,
801 				      u64 value)
802 {
803 	if (XE_IOCTL_DBG(xe, !xe_exec_queue_supports_multi_queue(q)))
804 		return -ENODEV;
805 
806 	if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe)))
807 		return -EOPNOTSUPP;
808 
809 	if (XE_IOCTL_DBG(xe, !q->vm->xef))
810 		return -EINVAL;
811 
812 	if (XE_IOCTL_DBG(xe, xe_exec_queue_is_parallel(q)))
813 		return -EINVAL;
814 
815 	if (XE_IOCTL_DBG(xe, xe_exec_queue_is_multi_queue(q)))
816 		return -EINVAL;
817 
818 	if (value & DRM_XE_MULTI_GROUP_CREATE) {
819 		if (XE_IOCTL_DBG(xe, value & ~DRM_XE_MULTI_GROUP_CREATE))
820 			return -EINVAL;
821 
822 		q->multi_queue.valid = true;
823 		q->multi_queue.is_primary = true;
824 		q->multi_queue.pos = 0;
825 		return 0;
826 	}
827 
828 	/* While adding secondary queues, the upper 32 bits must be 0 */
829 	if (XE_IOCTL_DBG(xe, value & (~0ull << 32)))
830 		return -EINVAL;
831 
832 	return xe_exec_queue_group_validate(xe, q, value);
833 }
834 
835 static int exec_queue_set_multi_queue_priority(struct xe_device *xe, struct xe_exec_queue *q,
836 					       u64 value)
837 {
838 	if (XE_IOCTL_DBG(xe, value > XE_MULTI_QUEUE_PRIORITY_HIGH))
839 		return -EINVAL;
840 
841 	/* For queue creation time (!q->xef) setting, just store the priority value */
842 	if (!q->xef) {
843 		q->multi_queue.priority = value;
844 		return 0;
845 	}
846 
847 	if (!xe_exec_queue_is_multi_queue(q))
848 		return -EINVAL;
849 
850 	return q->ops->set_multi_queue_priority(q, value);
851 }
852 
853 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
854 					     struct xe_exec_queue *q,
855 					     u64 value);
856 
857 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
858 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
859 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
860 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE] = exec_queue_set_pxp_type,
861 	[DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE] = exec_queue_set_hang_replay_state,
862 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP] = exec_queue_set_multi_group,
863 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY] =
864 							exec_queue_set_multi_queue_priority,
865 };
866 
867 int xe_exec_queue_set_property_ioctl(struct drm_device *dev, void *data,
868 				     struct drm_file *file)
869 {
870 	struct xe_device *xe = to_xe_device(dev);
871 	struct xe_file *xef = to_xe_file(file);
872 	struct drm_xe_exec_queue_set_property *args = data;
873 	struct xe_exec_queue *q;
874 	int ret;
875 	u32 idx;
876 
877 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
878 		return -EINVAL;
879 
880 	if (XE_IOCTL_DBG(xe, args->property !=
881 			 DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY))
882 		return -EINVAL;
883 
884 	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
885 	if (XE_IOCTL_DBG(xe, !q))
886 		return -ENOENT;
887 
888 	idx = array_index_nospec(args->property,
889 				 ARRAY_SIZE(exec_queue_set_property_funcs));
890 	ret = exec_queue_set_property_funcs[idx](xe, q, args->value);
891 	if (XE_IOCTL_DBG(xe, ret))
892 		goto err_post_lookup;
893 
894 	xe_exec_queue_put(q);
895 	return 0;
896 
897  err_post_lookup:
898 	xe_exec_queue_put(q);
899 	return ret;
900 }
901 
902 static int exec_queue_user_ext_check(struct xe_exec_queue *q, u64 properties)
903 {
904 	u64 secondary_queue_valid_props = BIT_ULL(DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP) |
905 				  BIT_ULL(DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY);
906 
907 	/*
908 	 * Only MULTI_QUEUE_PRIORITY property is valid for secondary queues of a
909 	 * multi-queue group.
910 	 */
911 	if (xe_exec_queue_is_multi_queue_secondary(q) &&
912 	    properties & ~secondary_queue_valid_props)
913 		return -EINVAL;
914 
915 	return 0;
916 }
917 
918 static int exec_queue_user_ext_check_final(struct xe_exec_queue *q, u64 properties)
919 {
920 	/* MULTI_QUEUE_PRIORITY only applies to multi-queue group queues */
921 	if ((properties & BIT_ULL(DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY)) &&
922 	    !(properties & BIT_ULL(DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP)))
923 		return -EINVAL;
924 
925 	return 0;
926 }
927 
928 static int exec_queue_user_ext_set_property(struct xe_device *xe,
929 					    struct xe_exec_queue *q,
930 					    u64 extension, u64 *properties)
931 {
932 	u64 __user *address = u64_to_user_ptr(extension);
933 	struct drm_xe_ext_set_property ext;
934 	int err;
935 	u32 idx;
936 
937 	err = copy_from_user(&ext, address, sizeof(ext));
938 	if (XE_IOCTL_DBG(xe, err))
939 		return -EFAULT;
940 
941 	if (XE_IOCTL_DBG(xe, ext.property >=
942 			 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
943 	    XE_IOCTL_DBG(xe, ext.pad) ||
944 	    XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
945 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE &&
946 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE &&
947 			 ext.property != DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE &&
948 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP &&
949 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY))
950 		return -EINVAL;
951 
952 	idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
953 	if (!exec_queue_set_property_funcs[idx])
954 		return -EINVAL;
955 
956 	*properties |= BIT_ULL(idx);
957 	err = exec_queue_user_ext_check(q, *properties);
958 	if (XE_IOCTL_DBG(xe, err))
959 		return err;
960 
961 	return exec_queue_set_property_funcs[idx](xe, q, ext.value);
962 }
963 
964 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
965 					       struct xe_exec_queue *q,
966 					       u64 extension, u64 *properties);
967 
968 static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
969 	[DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
970 };
971 
972 #define MAX_USER_EXTENSIONS	16
973 static int __exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
974 					u64 extensions, int ext_number, u64 *properties)
975 {
976 	u64 __user *address = u64_to_user_ptr(extensions);
977 	struct drm_xe_user_extension ext;
978 	int err;
979 	u32 idx;
980 
981 	if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
982 		return -E2BIG;
983 
984 	err = copy_from_user(&ext, address, sizeof(ext));
985 	if (XE_IOCTL_DBG(xe, err))
986 		return -EFAULT;
987 
988 	if (XE_IOCTL_DBG(xe, ext.pad) ||
989 	    XE_IOCTL_DBG(xe, ext.name >=
990 			 ARRAY_SIZE(exec_queue_user_extension_funcs)))
991 		return -EINVAL;
992 
993 	idx = array_index_nospec(ext.name,
994 				 ARRAY_SIZE(exec_queue_user_extension_funcs));
995 	err = exec_queue_user_extension_funcs[idx](xe, q, extensions, properties);
996 	if (XE_IOCTL_DBG(xe, err))
997 		return err;
998 
999 	if (ext.next_extension)
1000 		return __exec_queue_user_extensions(xe, q, ext.next_extension,
1001 						    ++ext_number, properties);
1002 
1003 	return 0;
1004 }
1005 
1006 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
1007 				      u64 extensions)
1008 {
1009 	u64 properties = 0;
1010 	int err;
1011 
1012 	err = __exec_queue_user_extensions(xe, q, extensions, 0, &properties);
1013 	if (XE_IOCTL_DBG(xe, err))
1014 		return err;
1015 
1016 	err = exec_queue_user_ext_check_final(q, properties);
1017 	if (XE_IOCTL_DBG(xe, err))
1018 		return err;
1019 
1020 	if (xe_exec_queue_is_multi_queue_primary(q)) {
1021 		err = xe_exec_queue_group_init(xe, q);
1022 		if (XE_IOCTL_DBG(xe, err))
1023 			return err;
1024 	}
1025 
1026 	return 0;
1027 }
1028 
1029 static u32 calc_validate_logical_mask(struct xe_device *xe,
1030 				      struct drm_xe_engine_class_instance *eci,
1031 				      u16 width, u16 num_placements)
1032 {
1033 	int len = width * num_placements;
1034 	int i, j, n;
1035 	u16 class;
1036 	u16 gt_id;
1037 	u32 return_mask = 0, prev_mask;
1038 
1039 	if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
1040 			 len > 1))
1041 		return 0;
1042 
1043 	for (i = 0; i < width; ++i) {
1044 		u32 current_mask = 0;
1045 
1046 		for (j = 0; j < num_placements; ++j) {
1047 			struct xe_hw_engine *hwe;
1048 
1049 			n = j * width + i;
1050 
1051 			hwe = xe_hw_engine_lookup(xe, eci[n]);
1052 			if (XE_IOCTL_DBG(xe, !hwe))
1053 				return 0;
1054 
1055 			if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
1056 				return 0;
1057 
1058 			if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
1059 			    XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
1060 				return 0;
1061 
1062 			class = eci[n].engine_class;
1063 			gt_id = eci[n].gt_id;
1064 
1065 			if (width == 1 || !i)
1066 				return_mask |= BIT(eci[n].engine_instance);
1067 			current_mask |= BIT(eci[n].engine_instance);
1068 		}
1069 
1070 		/* Parallel submissions must be logically contiguous */
1071 		if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
1072 			return 0;
1073 
1074 		prev_mask = current_mask;
1075 	}
1076 
1077 	return return_mask;
1078 }
1079 
1080 static bool has_sched_groups(struct xe_gt *gt)
1081 {
1082 	if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_sriov_pf_sched_groups_enabled(gt))
1083 		return true;
1084 
1085 	if (IS_SRIOV_VF(gt_to_xe(gt)) && xe_gt_sriov_vf_sched_groups_enabled(gt))
1086 		return true;
1087 
1088 	return false;
1089 }
1090 
1091 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
1092 			       struct drm_file *file)
1093 {
1094 	struct xe_device *xe = to_xe_device(dev);
1095 	struct xe_file *xef = to_xe_file(file);
1096 	struct drm_xe_exec_queue_create *args = data;
1097 	struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
1098 	struct drm_xe_engine_class_instance __user *user_eci =
1099 		u64_to_user_ptr(args->instances);
1100 	struct xe_hw_engine *hwe;
1101 	struct xe_vm *vm;
1102 	struct xe_tile *tile;
1103 	struct xe_exec_queue *q = NULL;
1104 	u32 logical_mask;
1105 	u32 flags = 0;
1106 	u32 id;
1107 	u32 len;
1108 	int err;
1109 
1110 	if (XE_IOCTL_DBG(xe, args->flags & ~DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT) ||
1111 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1112 		return -EINVAL;
1113 
1114 	len = args->width * args->num_placements;
1115 	if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
1116 		return -EINVAL;
1117 
1118 	err = copy_from_user(eci, user_eci,
1119 			     sizeof(struct drm_xe_engine_class_instance) * len);
1120 	if (XE_IOCTL_DBG(xe, err))
1121 		return -EFAULT;
1122 
1123 	if (XE_IOCTL_DBG(xe, !xe_device_get_gt(xe, eci[0].gt_id)))
1124 		return -EINVAL;
1125 
1126 	if (args->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT)
1127 		flags |= EXEC_QUEUE_FLAG_LOW_LATENCY;
1128 
1129 	if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
1130 		if (XE_IOCTL_DBG(xe, args->width != 1) ||
1131 		    XE_IOCTL_DBG(xe, args->num_placements != 1) ||
1132 		    XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
1133 			return -EINVAL;
1134 
1135 		vm = xe_vm_lookup(xef, args->vm_id);
1136 		if (XE_IOCTL_DBG(xe, !vm))
1137 			return -ENOENT;
1138 
1139 		err = down_read_interruptible(&vm->lock);
1140 		if (err) {
1141 			xe_vm_put(vm);
1142 			return err;
1143 		}
1144 
1145 		if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
1146 			up_read(&vm->lock);
1147 			xe_vm_put(vm);
1148 			return -ENOENT;
1149 		}
1150 
1151 		for_each_tile(tile, xe, id) {
1152 			struct xe_exec_queue *new;
1153 
1154 			flags |= EXEC_QUEUE_FLAG_VM;
1155 			if (id)
1156 				flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
1157 
1158 			new = xe_exec_queue_create_bind(xe, tile, vm, flags,
1159 							args->extensions);
1160 			if (IS_ERR(new)) {
1161 				up_read(&vm->lock);
1162 				xe_vm_put(vm);
1163 				err = PTR_ERR(new);
1164 				if (q)
1165 					goto put_exec_queue;
1166 				return err;
1167 			}
1168 			if (id == 0)
1169 				q = new;
1170 			else
1171 				list_add_tail(&new->multi_gt_list,
1172 					      &q->multi_gt_link);
1173 		}
1174 		up_read(&vm->lock);
1175 		xe_vm_put(vm);
1176 	} else {
1177 		logical_mask = calc_validate_logical_mask(xe, eci,
1178 							  args->width,
1179 							  args->num_placements);
1180 		if (XE_IOCTL_DBG(xe, !logical_mask))
1181 			return -EINVAL;
1182 
1183 		hwe = xe_hw_engine_lookup(xe, eci[0]);
1184 		if (XE_IOCTL_DBG(xe, !hwe))
1185 			return -EINVAL;
1186 
1187 		vm = xe_vm_lookup(xef, args->vm_id);
1188 		if (XE_IOCTL_DBG(xe, !vm))
1189 			return -ENOENT;
1190 
1191 		err = down_read_interruptible(&vm->lock);
1192 		if (err) {
1193 			xe_vm_put(vm);
1194 			return err;
1195 		}
1196 
1197 		if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
1198 			up_read(&vm->lock);
1199 			xe_vm_put(vm);
1200 			return -ENOENT;
1201 		}
1202 
1203 		/* SRIOV sched groups are not compatible with multi-lrc */
1204 		if (XE_IOCTL_DBG(xe, args->width > 1 && has_sched_groups(hwe->gt))) {
1205 			up_read(&vm->lock);
1206 			xe_vm_put(vm);
1207 			return -EINVAL;
1208 		}
1209 
1210 		q = xe_exec_queue_create(xe, vm, logical_mask,
1211 					 args->width, hwe, flags,
1212 					 args->extensions);
1213 		up_read(&vm->lock);
1214 		xe_vm_put(vm);
1215 		if (IS_ERR(q))
1216 			return PTR_ERR(q);
1217 
1218 		if (xe_exec_queue_is_multi_queue_secondary(q)) {
1219 			err = xe_exec_queue_group_add(xe, q);
1220 			if (XE_IOCTL_DBG(xe, err))
1221 				goto put_exec_queue;
1222 		}
1223 
1224 		if (xe_vm_in_preempt_fence_mode(vm)) {
1225 			q->lr.context = dma_fence_context_alloc(1);
1226 
1227 			err = xe_vm_add_compute_exec_queue(vm, q);
1228 			if (XE_IOCTL_DBG(xe, err))
1229 				goto delete_queue_group;
1230 		}
1231 
1232 		if (q->vm && q->hwe->hw_engine_group) {
1233 			err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q);
1234 			if (err)
1235 				goto put_exec_queue;
1236 		}
1237 	}
1238 
1239 	q->xef = xe_file_get(xef);
1240 	if (eci[0].engine_class != DRM_XE_ENGINE_CLASS_VM_BIND)
1241 		xe_vm_add_exec_queue(vm, q);
1242 
1243 	/* user id alloc must always be last in ioctl to prevent UAF */
1244 	err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
1245 	if (err)
1246 		goto kill_exec_queue;
1247 
1248 	args->exec_queue_id = id;
1249 
1250 	return 0;
1251 
1252 kill_exec_queue:
1253 	xe_exec_queue_kill(q);
1254 delete_queue_group:
1255 	if (xe_exec_queue_is_multi_queue_secondary(q))
1256 		xe_exec_queue_group_delete(xe, q);
1257 put_exec_queue:
1258 	xe_exec_queue_put(q);
1259 	return err;
1260 }
1261 
1262 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
1263 				     struct drm_file *file)
1264 {
1265 	struct xe_device *xe = to_xe_device(dev);
1266 	struct xe_file *xef = to_xe_file(file);
1267 	struct drm_xe_exec_queue_get_property *args = data;
1268 	struct xe_exec_queue *q;
1269 	int ret;
1270 
1271 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1272 		return -EINVAL;
1273 
1274 	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
1275 	if (XE_IOCTL_DBG(xe, !q))
1276 		return -ENOENT;
1277 
1278 	switch (args->property) {
1279 	case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
1280 		args->value = q->ops->reset_status(q);
1281 		ret = 0;
1282 		break;
1283 	default:
1284 		ret = -EINVAL;
1285 	}
1286 
1287 	xe_exec_queue_put(q);
1288 
1289 	return ret;
1290 }
1291 
1292 /**
1293  * xe_exec_queue_lrc() - Get the LRC from exec queue.
1294  * @q: The exec_queue.
1295  *
1296  * Retrieves the primary LRC for the exec queue. Note that this function
1297  * returns only the first LRC instance, even when multiple parallel LRCs
1298  * are configured.
1299  *
1300  * Return: Pointer to LRC on success, error on failure
1301  */
1302 struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q)
1303 {
1304 	return q->lrc[0];
1305 }
1306 
1307 /**
1308  * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
1309  * @q: The exec_queue
1310  *
1311  * Return: True if the exec_queue is long-running, false otherwise.
1312  */
1313 bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
1314 {
1315 	return q->vm && xe_vm_in_lr_mode(q->vm) &&
1316 		!(q->flags & EXEC_QUEUE_FLAG_VM);
1317 }
1318 
1319 /**
1320  * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
1321  * @q: The exec_queue
1322  *
1323  * FIXME: Need to determine what to use as the short-lived
1324  * timeline lock for the exec_queues, so that the return value
1325  * of this function becomes more than just an advisory
1326  * snapshot in time. The timeline lock must protect the
1327  * seqno from racing submissions on the same exec_queue.
1328  * Typically vm->resv, but user-created timeline locks use the migrate vm
1329  * and never grabs the migrate vm->resv so we have a race there.
1330  *
1331  * Return: True if the exec_queue is idle, false otherwise.
1332  */
1333 bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
1334 {
1335 	if (xe_exec_queue_is_parallel(q)) {
1336 		int i;
1337 
1338 		for (i = 0; i < q->width; ++i) {
1339 			if (xe_lrc_seqno(q->lrc[i]) !=
1340 			    q->lrc[i]->fence_ctx.next_seqno - 1)
1341 				return false;
1342 		}
1343 
1344 		return true;
1345 	}
1346 
1347 	return xe_lrc_seqno(q->lrc[0]) ==
1348 		q->lrc[0]->fence_ctx.next_seqno - 1;
1349 }
1350 
1351 /**
1352  * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue
1353  * from hw
1354  * @q: The exec queue
1355  *
1356  * Update the timestamp saved by HW for this exec queue and save run ticks
1357  * calculated by using the delta from last update.
1358  */
1359 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
1360 {
1361 	struct xe_device *xe = gt_to_xe(q->gt);
1362 	struct xe_lrc *lrc;
1363 	u64 old_ts, new_ts;
1364 	int idx;
1365 
1366 	/*
1367 	 * Jobs that are executed by kernel doesn't have a corresponding xe_file
1368 	 * and thus are not accounted.
1369 	 */
1370 	if (!q->xef)
1371 		return;
1372 
1373 	/* Synchronize with unbind while holding the xe file open */
1374 	if (!drm_dev_enter(&xe->drm, &idx))
1375 		return;
1376 	/*
1377 	 * Only sample the first LRC. For parallel submission, all of them are
1378 	 * scheduled together and we compensate that below by multiplying by
1379 	 * width - this may introduce errors if that premise is not true and
1380 	 * they don't exit 100% aligned. On the other hand, looping through
1381 	 * the LRCs and reading them in different time could also introduce
1382 	 * errors.
1383 	 */
1384 	lrc = q->lrc[0];
1385 	new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
1386 	q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
1387 
1388 	drm_dev_exit(idx);
1389 }
1390 
1391 /**
1392  * xe_exec_queue_kill - permanently stop all execution from an exec queue
1393  * @q: The exec queue
1394  *
1395  * This function permanently stops all activity on an exec queue. If the queue
1396  * is actively executing on the HW, it will be kicked off the engine; any
1397  * pending jobs are discarded and all future submissions are rejected.
1398  * This function is safe to call multiple times.
1399  */
1400 void xe_exec_queue_kill(struct xe_exec_queue *q)
1401 {
1402 	struct xe_exec_queue *eq = q, *next;
1403 
1404 	list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
1405 				 multi_gt_link) {
1406 		q->ops->kill(eq);
1407 		xe_vm_remove_compute_exec_queue(q->vm, eq);
1408 	}
1409 
1410 	q->ops->kill(q);
1411 	xe_vm_remove_compute_exec_queue(q->vm, q);
1412 }
1413 
1414 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
1415 				struct drm_file *file)
1416 {
1417 	struct xe_device *xe = to_xe_device(dev);
1418 	struct xe_file *xef = to_xe_file(file);
1419 	struct drm_xe_exec_queue_destroy *args = data;
1420 	struct xe_exec_queue *q;
1421 
1422 	if (XE_IOCTL_DBG(xe, args->pad) ||
1423 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1424 		return -EINVAL;
1425 
1426 	mutex_lock(&xef->exec_queue.lock);
1427 	q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
1428 	if (q)
1429 		atomic_inc(&xef->exec_queue.pending_removal);
1430 	mutex_unlock(&xef->exec_queue.lock);
1431 
1432 	if (XE_IOCTL_DBG(xe, !q))
1433 		return -ENOENT;
1434 
1435 	if (q->vm && q->hwe->hw_engine_group)
1436 		xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
1437 
1438 	xe_exec_queue_kill(q);
1439 
1440 	trace_xe_exec_queue_close(q);
1441 	xe_exec_queue_put(q);
1442 
1443 	return 0;
1444 }
1445 
1446 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
1447 						    struct xe_vm *vm)
1448 {
1449 	if (q->flags & EXEC_QUEUE_FLAG_MIGRATE) {
1450 		xe_migrate_job_lock_assert(q);
1451 	} else if (q->flags & EXEC_QUEUE_FLAG_VM) {
1452 		lockdep_assert_held(&vm->lock);
1453 	} else {
1454 		xe_vm_assert_held(vm);
1455 		lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
1456 	}
1457 }
1458 
1459 /**
1460  * xe_exec_queue_last_fence_put() - Drop ref to last fence
1461  * @q: The exec queue
1462  * @vm: The VM the engine does a bind or exec for
1463  */
1464 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
1465 {
1466 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1467 
1468 	xe_exec_queue_last_fence_put_unlocked(q);
1469 }
1470 
1471 /**
1472  * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
1473  * @q: The exec queue
1474  *
1475  * Only safe to be called from xe_exec_queue_destroy().
1476  */
1477 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
1478 {
1479 	if (q->last_fence) {
1480 		dma_fence_put(q->last_fence);
1481 		q->last_fence = NULL;
1482 	}
1483 }
1484 
1485 /**
1486  * xe_exec_queue_last_fence_get() - Get last fence
1487  * @q: The exec queue
1488  * @vm: The VM the engine does a bind or exec for
1489  *
1490  * Get last fence, takes a ref
1491  *
1492  * Returns: last fence if not signaled, dma fence stub if signaled
1493  */
1494 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
1495 					       struct xe_vm *vm)
1496 {
1497 	struct dma_fence *fence;
1498 
1499 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1500 
1501 	if (q->last_fence &&
1502 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
1503 		xe_exec_queue_last_fence_put(q, vm);
1504 
1505 	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
1506 	dma_fence_get(fence);
1507 	return fence;
1508 }
1509 
1510 /**
1511  * xe_exec_queue_last_fence_get_for_resume() - Get last fence
1512  * @q: The exec queue
1513  * @vm: The VM the engine does a bind or exec for
1514  *
1515  * Get last fence, takes a ref. Only safe to be called in the context of
1516  * resuming the hw engine group's long-running exec queue, when the group
1517  * semaphore is held.
1518  *
1519  * Returns: last fence if not signaled, dma fence stub if signaled
1520  */
1521 struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
1522 							  struct xe_vm *vm)
1523 {
1524 	struct dma_fence *fence;
1525 
1526 	lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
1527 
1528 	if (q->last_fence &&
1529 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
1530 		xe_exec_queue_last_fence_put_unlocked(q);
1531 
1532 	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
1533 	dma_fence_get(fence);
1534 	return fence;
1535 }
1536 
1537 /**
1538  * xe_exec_queue_last_fence_set() - Set last fence
1539  * @q: The exec queue
1540  * @vm: The VM the engine does a bind or exec for
1541  * @fence: The fence
1542  *
1543  * Set the last fence for the engine. Increases reference count for fence, when
1544  * closing engine xe_exec_queue_last_fence_put should be called.
1545  */
1546 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
1547 				  struct dma_fence *fence)
1548 {
1549 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1550 	xe_assert(vm->xe, !dma_fence_is_container(fence));
1551 
1552 	xe_exec_queue_last_fence_put(q, vm);
1553 	q->last_fence = dma_fence_get(fence);
1554 }
1555 
1556 /**
1557  * xe_exec_queue_tlb_inval_last_fence_put() - Drop ref to last TLB invalidation fence
1558  * @q: The exec queue
1559  * @vm: The VM the engine does a bind for
1560  * @type: Either primary or media GT
1561  */
1562 void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q,
1563 					    struct xe_vm *vm,
1564 					    unsigned int type)
1565 {
1566 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1567 	xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
1568 		  type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
1569 
1570 	xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, type);
1571 }
1572 
1573 /**
1574  * xe_exec_queue_tlb_inval_last_fence_put_unlocked() - Drop ref to last TLB
1575  * invalidation fence unlocked
1576  * @q: The exec queue
1577  * @type: Either primary or media GT
1578  *
1579  * Only safe to be called from xe_exec_queue_destroy().
1580  */
1581 void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q,
1582 						     unsigned int type)
1583 {
1584 	xe_assert(q->vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
1585 		  type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
1586 
1587 	dma_fence_put(q->tlb_inval[type].last_fence);
1588 	q->tlb_inval[type].last_fence = NULL;
1589 }
1590 
1591 /**
1592  * xe_exec_queue_tlb_inval_last_fence_get() - Get last fence for TLB invalidation
1593  * @q: The exec queue
1594  * @vm: The VM the engine does a bind for
1595  * @type: Either primary or media GT
1596  *
1597  * Get last fence, takes a ref
1598  *
1599  * Returns: last fence if not signaled, dma fence stub if signaled
1600  */
1601 struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q,
1602 							 struct xe_vm *vm,
1603 							 unsigned int type)
1604 {
1605 	struct dma_fence *fence;
1606 
1607 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1608 	xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
1609 		  type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
1610 	xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM |
1611 				      EXEC_QUEUE_FLAG_MIGRATE));
1612 
1613 	if (q->tlb_inval[type].last_fence &&
1614 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1615 		     &q->tlb_inval[type].last_fence->flags))
1616 		xe_exec_queue_tlb_inval_last_fence_put(q, vm, type);
1617 
1618 	fence = q->tlb_inval[type].last_fence ?: dma_fence_get_stub();
1619 	dma_fence_get(fence);
1620 	return fence;
1621 }
1622 
1623 /**
1624  * xe_exec_queue_tlb_inval_last_fence_set() - Set last fence for TLB invalidation
1625  * @q: The exec queue
1626  * @vm: The VM the engine does a bind for
1627  * @fence: The fence
1628  * @type: Either primary or media GT
1629  *
1630  * Set the last fence for the tlb invalidation type on the queue. Increases
1631  * reference count for fence, when closing queue
1632  * xe_exec_queue_tlb_inval_last_fence_put should be called.
1633  */
1634 void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q,
1635 					    struct xe_vm *vm,
1636 					    struct dma_fence *fence,
1637 					    unsigned int type)
1638 {
1639 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1640 	xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
1641 		  type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
1642 	xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM |
1643 				      EXEC_QUEUE_FLAG_MIGRATE));
1644 	xe_assert(vm->xe, !dma_fence_is_container(fence));
1645 
1646 	xe_exec_queue_tlb_inval_last_fence_put(q, vm, type);
1647 	q->tlb_inval[type].last_fence = dma_fence_get(fence);
1648 }
1649 
1650 /**
1651  * xe_exec_queue_contexts_hwsp_rebase - Re-compute GGTT references
1652  * within all LRCs of a queue.
1653  * @q: the &xe_exec_queue struct instance containing target LRCs
1654  * @scratch: scratch buffer to be used as temporary storage
1655  *
1656  * Returns: zero on success, negative error code on failure
1657  */
1658 int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch)
1659 {
1660 	int i;
1661 	int err = 0;
1662 
1663 	for (i = 0; i < q->width; ++i) {
1664 		struct xe_lrc *lrc;
1665 
1666 		/* Pairs with WRITE_ONCE in __xe_exec_queue_init  */
1667 		lrc = READ_ONCE(q->lrc[i]);
1668 		if (!lrc)
1669 			continue;
1670 
1671 		xe_lrc_update_memirq_regs_with_address(lrc, q->hwe, scratch);
1672 		xe_lrc_update_hwctx_regs_with_address(lrc);
1673 		err = xe_lrc_setup_wa_bb_with_scratch(lrc, q->hwe, scratch);
1674 		if (err)
1675 			break;
1676 	}
1677 
1678 	return err;
1679 }
1680