xref: /linux/drivers/gpu/drm/xe/xe_exec_queue.c (revision 6f4b7aed61817624250e590ba0ef304146d34614)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_exec_queue.h"
7 
8 #include <linux/nospec.h>
9 
10 #include <drm/drm_device.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_file.h>
13 #include <drm/drm_syncobj.h>
14 #include <uapi/drm/xe_drm.h>
15 
16 #include "xe_dep_scheduler.h"
17 #include "xe_device.h"
18 #include "xe_gt.h"
19 #include "xe_gt_sriov_vf.h"
20 #include "xe_hw_engine_class_sysfs.h"
21 #include "xe_hw_engine_group.h"
22 #include "xe_hw_fence.h"
23 #include "xe_irq.h"
24 #include "xe_lrc.h"
25 #include "xe_macros.h"
26 #include "xe_migrate.h"
27 #include "xe_pm.h"
28 #include "xe_ring_ops_types.h"
29 #include "xe_trace.h"
30 #include "xe_vm.h"
31 #include "xe_pxp.h"
32 
33 /**
34  * DOC: Execution Queue
35  *
36  * An Execution queue is an interface for the HW context of execution.
37  * The user creates an execution queue, submits the GPU jobs through those
38  * queues and in the end destroys them.
39  *
40  * Execution queues can also be created by XeKMD itself for driver internal
41  * operations like object migration etc.
42  *
43  * An execution queue is associated with a specified HW engine or a group of
44  * engines (belonging to the same tile and engine class) and any GPU job
45  * submitted on the queue will be run on one of these engines.
46  *
47  * An execution queue is tied to an address space (VM). It holds a reference
48  * of the associated VM and the underlying Logical Ring Context/s (LRC/s)
49  * until the queue is destroyed.
50  *
51  * The execution queue sits on top of the submission backend. It opaquely
52  * handles the GuC and Execlist backends whichever the platform uses, and
53  * the ring operations the different engine classes support.
54  */
55 
56 enum xe_exec_queue_sched_prop {
57 	XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
58 	XE_EXEC_QUEUE_TIMESLICE = 1,
59 	XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
60 	XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
61 };
62 
63 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
64 				      u64 extensions, int ext_number);
65 
66 static void __xe_exec_queue_free(struct xe_exec_queue *q)
67 {
68 	int i;
69 
70 	for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i)
71 		if (q->tlb_inval[i].dep_scheduler)
72 			xe_dep_scheduler_fini(q->tlb_inval[i].dep_scheduler);
73 
74 	if (xe_exec_queue_uses_pxp(q))
75 		xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
76 	if (q->vm)
77 		xe_vm_put(q->vm);
78 
79 	if (q->xef)
80 		xe_file_put(q->xef);
81 
82 	kfree(q);
83 }
84 
85 static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q)
86 {
87 	struct xe_tile *tile = gt_to_tile(q->gt);
88 	int i;
89 
90 	for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i) {
91 		struct xe_dep_scheduler *dep_scheduler;
92 		struct xe_gt *gt;
93 		struct workqueue_struct *wq;
94 
95 		if (i == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT)
96 			gt = tile->primary_gt;
97 		else
98 			gt = tile->media_gt;
99 
100 		if (!gt)
101 			continue;
102 
103 		wq = gt->tlb_inval.job_wq;
104 
105 #define MAX_TLB_INVAL_JOBS	16	/* Picking a reasonable value */
106 		dep_scheduler = xe_dep_scheduler_create(xe, wq, q->name,
107 							MAX_TLB_INVAL_JOBS);
108 		if (IS_ERR(dep_scheduler))
109 			return PTR_ERR(dep_scheduler);
110 
111 		q->tlb_inval[i].dep_scheduler = dep_scheduler;
112 	}
113 #undef MAX_TLB_INVAL_JOBS
114 
115 	return 0;
116 }
117 
118 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
119 						   struct xe_vm *vm,
120 						   u32 logical_mask,
121 						   u16 width, struct xe_hw_engine *hwe,
122 						   u32 flags, u64 extensions)
123 {
124 	struct xe_exec_queue *q;
125 	struct xe_gt *gt = hwe->gt;
126 	int err;
127 
128 	/* only kernel queues can be permanent */
129 	XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
130 
131 	q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL);
132 	if (!q)
133 		return ERR_PTR(-ENOMEM);
134 
135 	kref_init(&q->refcount);
136 	q->flags = flags;
137 	q->hwe = hwe;
138 	q->gt = gt;
139 	q->class = hwe->class;
140 	q->width = width;
141 	q->msix_vec = XE_IRQ_DEFAULT_MSIX;
142 	q->logical_mask = logical_mask;
143 	q->fence_irq = &gt->fence_irq[hwe->class];
144 	q->ring_ops = gt->ring_ops[hwe->class];
145 	q->ops = gt->exec_queue_ops;
146 	INIT_LIST_HEAD(&q->lr.link);
147 	INIT_LIST_HEAD(&q->multi_gt_link);
148 	INIT_LIST_HEAD(&q->hw_engine_group_link);
149 	INIT_LIST_HEAD(&q->pxp.link);
150 
151 	q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
152 	q->sched_props.preempt_timeout_us =
153 				hwe->eclass->sched_props.preempt_timeout_us;
154 	q->sched_props.job_timeout_ms =
155 				hwe->eclass->sched_props.job_timeout_ms;
156 	if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
157 	    q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
158 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
159 	else
160 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
161 
162 	if (q->flags & (EXEC_QUEUE_FLAG_MIGRATE | EXEC_QUEUE_FLAG_VM)) {
163 		err = alloc_dep_schedulers(xe, q);
164 		if (err) {
165 			__xe_exec_queue_free(q);
166 			return ERR_PTR(err);
167 		}
168 	}
169 
170 	if (vm)
171 		q->vm = xe_vm_get(vm);
172 
173 	if (extensions) {
174 		/*
175 		 * may set q->usm, must come before xe_lrc_create(),
176 		 * may overwrite q->sched_props, must come before q->ops->init()
177 		 */
178 		err = exec_queue_user_extensions(xe, q, extensions, 0);
179 		if (err) {
180 			__xe_exec_queue_free(q);
181 			return ERR_PTR(err);
182 		}
183 	}
184 
185 	return q;
186 }
187 
188 static int __xe_exec_queue_init(struct xe_exec_queue *q, u32 exec_queue_flags)
189 {
190 	int i, err;
191 	u32 flags = 0;
192 
193 	/*
194 	 * PXP workloads executing on RCS or CCS must run in isolation (i.e. no
195 	 * other workload can use the EUs at the same time). On MTL this is done
196 	 * by setting the RUNALONE bit in the LRC, while starting on Xe2 there
197 	 * is a dedicated bit for it.
198 	 */
199 	if (xe_exec_queue_uses_pxp(q) &&
200 	    (q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) {
201 		if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20)
202 			flags |= XE_LRC_CREATE_PXP;
203 		else
204 			flags |= XE_LRC_CREATE_RUNALONE;
205 	}
206 
207 	if (!(exec_queue_flags & EXEC_QUEUE_FLAG_KERNEL))
208 		flags |= XE_LRC_CREATE_USER_CTX;
209 
210 	err = q->ops->init(q);
211 	if (err)
212 		return err;
213 
214 	/*
215 	 * This must occur after q->ops->init to avoid race conditions during VF
216 	 * post-migration recovery, as the fixups for the LRC GGTT addresses
217 	 * depend on the queue being present in the backend tracking structure.
218 	 *
219 	 * In addition to above, we must wait on inflight GGTT changes to avoid
220 	 * writing out stale values here. Such wait provides a solid solution
221 	 * (without a race) only if the function can detect migration instantly
222 	 * from the moment vCPU resumes execution.
223 	 */
224 	for (i = 0; i < q->width; ++i) {
225 		struct xe_lrc *lrc;
226 
227 		xe_gt_sriov_vf_wait_valid_ggtt(q->gt);
228 		lrc = xe_lrc_create(q->hwe, q->vm, xe_lrc_ring_size(),
229 				    q->msix_vec, flags);
230 		if (IS_ERR(lrc)) {
231 			err = PTR_ERR(lrc);
232 			goto err_lrc;
233 		}
234 
235 		/* Pairs with READ_ONCE to xe_exec_queue_contexts_hwsp_rebase */
236 		WRITE_ONCE(q->lrc[i], lrc);
237 	}
238 
239 	return 0;
240 
241 err_lrc:
242 	for (i = i - 1; i >= 0; --i)
243 		xe_lrc_put(q->lrc[i]);
244 	return err;
245 }
246 
247 static void __xe_exec_queue_fini(struct xe_exec_queue *q)
248 {
249 	int i;
250 
251 	q->ops->fini(q);
252 
253 	for (i = 0; i < q->width; ++i)
254 		xe_lrc_put(q->lrc[i]);
255 }
256 
257 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
258 					   u32 logical_mask, u16 width,
259 					   struct xe_hw_engine *hwe, u32 flags,
260 					   u64 extensions)
261 {
262 	struct xe_exec_queue *q;
263 	int err;
264 
265 	/* VMs for GSCCS queues (and only those) must have the XE_VM_FLAG_GSC flag */
266 	xe_assert(xe, !vm || (!!(vm->flags & XE_VM_FLAG_GSC) == !!(hwe->engine_id == XE_HW_ENGINE_GSCCS0)));
267 
268 	q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
269 				  extensions);
270 	if (IS_ERR(q))
271 		return q;
272 
273 	err = __xe_exec_queue_init(q, flags);
274 	if (err)
275 		goto err_post_alloc;
276 
277 	/*
278 	 * We can only add the queue to the PXP list after the init is complete,
279 	 * because the PXP termination can call exec_queue_kill and that will
280 	 * go bad if the queue is only half-initialized. This means that we
281 	 * can't do it when we handle the PXP extension in __xe_exec_queue_alloc
282 	 * and we need to do it here instead.
283 	 */
284 	if (xe_exec_queue_uses_pxp(q)) {
285 		err = xe_pxp_exec_queue_add(xe->pxp, q);
286 		if (err)
287 			goto err_post_init;
288 	}
289 
290 	return q;
291 
292 err_post_init:
293 	__xe_exec_queue_fini(q);
294 err_post_alloc:
295 	__xe_exec_queue_free(q);
296 	return ERR_PTR(err);
297 }
298 ALLOW_ERROR_INJECTION(xe_exec_queue_create, ERRNO);
299 
300 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
301 						 struct xe_vm *vm,
302 						 enum xe_engine_class class,
303 						 u32 flags, u64 extensions)
304 {
305 	struct xe_hw_engine *hwe, *hwe0 = NULL;
306 	enum xe_hw_engine_id id;
307 	u32 logical_mask = 0;
308 
309 	for_each_hw_engine(hwe, gt, id) {
310 		if (xe_hw_engine_is_reserved(hwe))
311 			continue;
312 
313 		if (hwe->class == class) {
314 			logical_mask |= BIT(hwe->logical_instance);
315 			if (!hwe0)
316 				hwe0 = hwe;
317 		}
318 	}
319 
320 	if (!logical_mask)
321 		return ERR_PTR(-ENODEV);
322 
323 	return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions);
324 }
325 
326 /**
327  * xe_exec_queue_create_bind() - Create bind exec queue.
328  * @xe: Xe device.
329  * @tile: tile which bind exec queue belongs to.
330  * @flags: exec queue creation flags
331  * @user_vm: The user VM which this exec queue belongs to
332  * @extensions: exec queue creation extensions
333  *
334  * Normalize bind exec queue creation. Bind exec queue is tied to migration VM
335  * for access to physical memory required for page table programming. On a
336  * faulting devices the reserved copy engine instance must be used to avoid
337  * deadlocking (user binds cannot get stuck behind faults as kernel binds which
338  * resolve faults depend on user binds). On non-faulting devices any copy engine
339  * can be used.
340  *
341  * Returns exec queue on success, ERR_PTR on failure
342  */
343 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
344 						struct xe_tile *tile,
345 						struct xe_vm *user_vm,
346 						u32 flags, u64 extensions)
347 {
348 	struct xe_gt *gt = tile->primary_gt;
349 	struct xe_exec_queue *q;
350 	struct xe_vm *migrate_vm;
351 
352 	migrate_vm = xe_migrate_get_vm(tile->migrate);
353 	if (xe->info.has_usm) {
354 		struct xe_hw_engine *hwe = xe_gt_hw_engine(gt,
355 							   XE_ENGINE_CLASS_COPY,
356 							   gt->usm.reserved_bcs_instance,
357 							   false);
358 
359 		if (!hwe) {
360 			xe_vm_put(migrate_vm);
361 			return ERR_PTR(-EINVAL);
362 		}
363 
364 		q = xe_exec_queue_create(xe, migrate_vm,
365 					 BIT(hwe->logical_instance), 1, hwe,
366 					 flags, extensions);
367 	} else {
368 		q = xe_exec_queue_create_class(xe, gt, migrate_vm,
369 					       XE_ENGINE_CLASS_COPY, flags,
370 					       extensions);
371 	}
372 	xe_vm_put(migrate_vm);
373 
374 	if (!IS_ERR(q)) {
375 		int err = drm_syncobj_create(&q->ufence_syncobj,
376 					     DRM_SYNCOBJ_CREATE_SIGNALED,
377 					     NULL);
378 		if (err) {
379 			xe_exec_queue_put(q);
380 			return ERR_PTR(err);
381 		}
382 
383 		if (user_vm)
384 			q->user_vm = xe_vm_get(user_vm);
385 	}
386 
387 	return q;
388 }
389 ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
390 
391 void xe_exec_queue_destroy(struct kref *ref)
392 {
393 	struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
394 	struct xe_exec_queue *eq, *next;
395 	int i;
396 
397 	xe_assert(gt_to_xe(q->gt), atomic_read(&q->job_cnt) == 0);
398 
399 	if (q->ufence_syncobj)
400 		drm_syncobj_put(q->ufence_syncobj);
401 
402 	if (xe_exec_queue_uses_pxp(q))
403 		xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
404 
405 	xe_exec_queue_last_fence_put_unlocked(q);
406 	for_each_tlb_inval(i)
407 		xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, i);
408 
409 	if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
410 		list_for_each_entry_safe(eq, next, &q->multi_gt_list,
411 					 multi_gt_link)
412 			xe_exec_queue_put(eq);
413 	}
414 
415 	if (q->user_vm) {
416 		xe_vm_put(q->user_vm);
417 		q->user_vm = NULL;
418 	}
419 
420 	q->ops->destroy(q);
421 }
422 
423 void xe_exec_queue_fini(struct xe_exec_queue *q)
424 {
425 	/*
426 	 * Before releasing our ref to lrc and xef, accumulate our run ticks
427 	 * and wakeup any waiters.
428 	 */
429 	xe_exec_queue_update_run_ticks(q);
430 	if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
431 		wake_up_var(&q->xef->exec_queue.pending_removal);
432 
433 	__xe_exec_queue_fini(q);
434 	__xe_exec_queue_free(q);
435 }
436 
437 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
438 {
439 	switch (q->class) {
440 	case XE_ENGINE_CLASS_RENDER:
441 		snprintf(q->name, sizeof(q->name), "rcs%d", instance);
442 		break;
443 	case XE_ENGINE_CLASS_VIDEO_DECODE:
444 		snprintf(q->name, sizeof(q->name), "vcs%d", instance);
445 		break;
446 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
447 		snprintf(q->name, sizeof(q->name), "vecs%d", instance);
448 		break;
449 	case XE_ENGINE_CLASS_COPY:
450 		snprintf(q->name, sizeof(q->name), "bcs%d", instance);
451 		break;
452 	case XE_ENGINE_CLASS_COMPUTE:
453 		snprintf(q->name, sizeof(q->name), "ccs%d", instance);
454 		break;
455 	case XE_ENGINE_CLASS_OTHER:
456 		snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
457 		break;
458 	default:
459 		XE_WARN_ON(q->class);
460 	}
461 }
462 
463 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
464 {
465 	struct xe_exec_queue *q;
466 
467 	mutex_lock(&xef->exec_queue.lock);
468 	q = xa_load(&xef->exec_queue.xa, id);
469 	if (q)
470 		xe_exec_queue_get(q);
471 	mutex_unlock(&xef->exec_queue.lock);
472 
473 	return q;
474 }
475 
476 enum xe_exec_queue_priority
477 xe_exec_queue_device_get_max_priority(struct xe_device *xe)
478 {
479 	return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
480 				       XE_EXEC_QUEUE_PRIORITY_NORMAL;
481 }
482 
483 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
484 				   u64 value)
485 {
486 	if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
487 		return -EINVAL;
488 
489 	if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
490 		return -EPERM;
491 
492 	q->sched_props.priority = value;
493 	return 0;
494 }
495 
496 static bool xe_exec_queue_enforce_schedule_limit(void)
497 {
498 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
499 	return true;
500 #else
501 	return !capable(CAP_SYS_NICE);
502 #endif
503 }
504 
505 static void
506 xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
507 			      enum xe_exec_queue_sched_prop prop,
508 			      u32 *min, u32 *max)
509 {
510 	switch (prop) {
511 	case XE_EXEC_QUEUE_JOB_TIMEOUT:
512 		*min = eclass->sched_props.job_timeout_min;
513 		*max = eclass->sched_props.job_timeout_max;
514 		break;
515 	case XE_EXEC_QUEUE_TIMESLICE:
516 		*min = eclass->sched_props.timeslice_min;
517 		*max = eclass->sched_props.timeslice_max;
518 		break;
519 	case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
520 		*min = eclass->sched_props.preempt_timeout_min;
521 		*max = eclass->sched_props.preempt_timeout_max;
522 		break;
523 	default:
524 		break;
525 	}
526 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
527 	if (capable(CAP_SYS_NICE)) {
528 		switch (prop) {
529 		case XE_EXEC_QUEUE_JOB_TIMEOUT:
530 			*min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
531 			*max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
532 			break;
533 		case XE_EXEC_QUEUE_TIMESLICE:
534 			*min = XE_HW_ENGINE_TIMESLICE_MIN;
535 			*max = XE_HW_ENGINE_TIMESLICE_MAX;
536 			break;
537 		case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
538 			*min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
539 			*max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
540 			break;
541 		default:
542 			break;
543 		}
544 	}
545 #endif
546 }
547 
548 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
549 				    u64 value)
550 {
551 	u32 min = 0, max = 0;
552 
553 	xe_exec_queue_get_prop_minmax(q->hwe->eclass,
554 				      XE_EXEC_QUEUE_TIMESLICE, &min, &max);
555 
556 	if (xe_exec_queue_enforce_schedule_limit() &&
557 	    !xe_hw_engine_timeout_in_range(value, min, max))
558 		return -EINVAL;
559 
560 	q->sched_props.timeslice_us = value;
561 	return 0;
562 }
563 
564 static int
565 exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value)
566 {
567 	if (value == DRM_XE_PXP_TYPE_NONE)
568 		return 0;
569 
570 	/* we only support HWDRM sessions right now */
571 	if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
572 		return -EINVAL;
573 
574 	if (!xe_pxp_is_enabled(xe->pxp))
575 		return -ENODEV;
576 
577 	return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM);
578 }
579 
580 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
581 					     struct xe_exec_queue *q,
582 					     u64 value);
583 
584 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
585 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
586 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
587 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE] = exec_queue_set_pxp_type,
588 };
589 
590 static int exec_queue_user_ext_set_property(struct xe_device *xe,
591 					    struct xe_exec_queue *q,
592 					    u64 extension)
593 {
594 	u64 __user *address = u64_to_user_ptr(extension);
595 	struct drm_xe_ext_set_property ext;
596 	int err;
597 	u32 idx;
598 
599 	err = copy_from_user(&ext, address, sizeof(ext));
600 	if (XE_IOCTL_DBG(xe, err))
601 		return -EFAULT;
602 
603 	if (XE_IOCTL_DBG(xe, ext.property >=
604 			 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
605 	    XE_IOCTL_DBG(xe, ext.pad) ||
606 	    XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
607 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE &&
608 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE))
609 		return -EINVAL;
610 
611 	idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
612 	if (!exec_queue_set_property_funcs[idx])
613 		return -EINVAL;
614 
615 	return exec_queue_set_property_funcs[idx](xe, q, ext.value);
616 }
617 
618 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
619 					       struct xe_exec_queue *q,
620 					       u64 extension);
621 
622 static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
623 	[DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
624 };
625 
626 #define MAX_USER_EXTENSIONS	16
627 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
628 				      u64 extensions, int ext_number)
629 {
630 	u64 __user *address = u64_to_user_ptr(extensions);
631 	struct drm_xe_user_extension ext;
632 	int err;
633 	u32 idx;
634 
635 	if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
636 		return -E2BIG;
637 
638 	err = copy_from_user(&ext, address, sizeof(ext));
639 	if (XE_IOCTL_DBG(xe, err))
640 		return -EFAULT;
641 
642 	if (XE_IOCTL_DBG(xe, ext.pad) ||
643 	    XE_IOCTL_DBG(xe, ext.name >=
644 			 ARRAY_SIZE(exec_queue_user_extension_funcs)))
645 		return -EINVAL;
646 
647 	idx = array_index_nospec(ext.name,
648 				 ARRAY_SIZE(exec_queue_user_extension_funcs));
649 	err = exec_queue_user_extension_funcs[idx](xe, q, extensions);
650 	if (XE_IOCTL_DBG(xe, err))
651 		return err;
652 
653 	if (ext.next_extension)
654 		return exec_queue_user_extensions(xe, q, ext.next_extension,
655 						  ++ext_number);
656 
657 	return 0;
658 }
659 
660 static u32 calc_validate_logical_mask(struct xe_device *xe,
661 				      struct drm_xe_engine_class_instance *eci,
662 				      u16 width, u16 num_placements)
663 {
664 	int len = width * num_placements;
665 	int i, j, n;
666 	u16 class;
667 	u16 gt_id;
668 	u32 return_mask = 0, prev_mask;
669 
670 	if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
671 			 len > 1))
672 		return 0;
673 
674 	for (i = 0; i < width; ++i) {
675 		u32 current_mask = 0;
676 
677 		for (j = 0; j < num_placements; ++j) {
678 			struct xe_hw_engine *hwe;
679 
680 			n = j * width + i;
681 
682 			hwe = xe_hw_engine_lookup(xe, eci[n]);
683 			if (XE_IOCTL_DBG(xe, !hwe))
684 				return 0;
685 
686 			if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
687 				return 0;
688 
689 			if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
690 			    XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
691 				return 0;
692 
693 			class = eci[n].engine_class;
694 			gt_id = eci[n].gt_id;
695 
696 			if (width == 1 || !i)
697 				return_mask |= BIT(eci[n].engine_instance);
698 			current_mask |= BIT(eci[n].engine_instance);
699 		}
700 
701 		/* Parallel submissions must be logically contiguous */
702 		if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
703 			return 0;
704 
705 		prev_mask = current_mask;
706 	}
707 
708 	return return_mask;
709 }
710 
711 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
712 			       struct drm_file *file)
713 {
714 	struct xe_device *xe = to_xe_device(dev);
715 	struct xe_file *xef = to_xe_file(file);
716 	struct drm_xe_exec_queue_create *args = data;
717 	struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
718 	struct drm_xe_engine_class_instance __user *user_eci =
719 		u64_to_user_ptr(args->instances);
720 	struct xe_hw_engine *hwe;
721 	struct xe_vm *vm;
722 	struct xe_tile *tile;
723 	struct xe_exec_queue *q = NULL;
724 	u32 logical_mask;
725 	u32 flags = 0;
726 	u32 id;
727 	u32 len;
728 	int err;
729 
730 	if (XE_IOCTL_DBG(xe, args->flags & ~DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT) ||
731 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
732 		return -EINVAL;
733 
734 	len = args->width * args->num_placements;
735 	if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
736 		return -EINVAL;
737 
738 	err = copy_from_user(eci, user_eci,
739 			     sizeof(struct drm_xe_engine_class_instance) * len);
740 	if (XE_IOCTL_DBG(xe, err))
741 		return -EFAULT;
742 
743 	if (XE_IOCTL_DBG(xe, !xe_device_get_gt(xe, eci[0].gt_id)))
744 		return -EINVAL;
745 
746 	if (args->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT)
747 		flags |= EXEC_QUEUE_FLAG_LOW_LATENCY;
748 
749 	if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
750 		if (XE_IOCTL_DBG(xe, args->width != 1) ||
751 		    XE_IOCTL_DBG(xe, args->num_placements != 1) ||
752 		    XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
753 			return -EINVAL;
754 
755 		vm = xe_vm_lookup(xef, args->vm_id);
756 		if (XE_IOCTL_DBG(xe, !vm))
757 			return -ENOENT;
758 
759 		err = down_read_interruptible(&vm->lock);
760 		if (err) {
761 			xe_vm_put(vm);
762 			return err;
763 		}
764 
765 		if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
766 			up_read(&vm->lock);
767 			xe_vm_put(vm);
768 			return -ENOENT;
769 		}
770 
771 		for_each_tile(tile, xe, id) {
772 			struct xe_exec_queue *new;
773 
774 			flags |= EXEC_QUEUE_FLAG_VM;
775 			if (id)
776 				flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
777 
778 			new = xe_exec_queue_create_bind(xe, tile, vm, flags,
779 							args->extensions);
780 			if (IS_ERR(new)) {
781 				up_read(&vm->lock);
782 				xe_vm_put(vm);
783 				err = PTR_ERR(new);
784 				if (q)
785 					goto put_exec_queue;
786 				return err;
787 			}
788 			if (id == 0)
789 				q = new;
790 			else
791 				list_add_tail(&new->multi_gt_list,
792 					      &q->multi_gt_link);
793 		}
794 		up_read(&vm->lock);
795 		xe_vm_put(vm);
796 	} else {
797 		logical_mask = calc_validate_logical_mask(xe, eci,
798 							  args->width,
799 							  args->num_placements);
800 		if (XE_IOCTL_DBG(xe, !logical_mask))
801 			return -EINVAL;
802 
803 		hwe = xe_hw_engine_lookup(xe, eci[0]);
804 		if (XE_IOCTL_DBG(xe, !hwe))
805 			return -EINVAL;
806 
807 		vm = xe_vm_lookup(xef, args->vm_id);
808 		if (XE_IOCTL_DBG(xe, !vm))
809 			return -ENOENT;
810 
811 		err = down_read_interruptible(&vm->lock);
812 		if (err) {
813 			xe_vm_put(vm);
814 			return err;
815 		}
816 
817 		if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
818 			up_read(&vm->lock);
819 			xe_vm_put(vm);
820 			return -ENOENT;
821 		}
822 
823 		q = xe_exec_queue_create(xe, vm, logical_mask,
824 					 args->width, hwe, flags,
825 					 args->extensions);
826 		up_read(&vm->lock);
827 		xe_vm_put(vm);
828 		if (IS_ERR(q))
829 			return PTR_ERR(q);
830 
831 		if (xe_vm_in_preempt_fence_mode(vm)) {
832 			q->lr.context = dma_fence_context_alloc(1);
833 
834 			err = xe_vm_add_compute_exec_queue(vm, q);
835 			if (XE_IOCTL_DBG(xe, err))
836 				goto put_exec_queue;
837 		}
838 
839 		if (q->vm && q->hwe->hw_engine_group) {
840 			err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q);
841 			if (err)
842 				goto put_exec_queue;
843 		}
844 	}
845 
846 	q->xef = xe_file_get(xef);
847 
848 	/* user id alloc must always be last in ioctl to prevent UAF */
849 	err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
850 	if (err)
851 		goto kill_exec_queue;
852 
853 	args->exec_queue_id = id;
854 
855 	return 0;
856 
857 kill_exec_queue:
858 	xe_exec_queue_kill(q);
859 put_exec_queue:
860 	xe_exec_queue_put(q);
861 	return err;
862 }
863 
864 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
865 				     struct drm_file *file)
866 {
867 	struct xe_device *xe = to_xe_device(dev);
868 	struct xe_file *xef = to_xe_file(file);
869 	struct drm_xe_exec_queue_get_property *args = data;
870 	struct xe_exec_queue *q;
871 	int ret;
872 
873 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
874 		return -EINVAL;
875 
876 	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
877 	if (XE_IOCTL_DBG(xe, !q))
878 		return -ENOENT;
879 
880 	switch (args->property) {
881 	case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
882 		args->value = q->ops->reset_status(q);
883 		ret = 0;
884 		break;
885 	default:
886 		ret = -EINVAL;
887 	}
888 
889 	xe_exec_queue_put(q);
890 
891 	return ret;
892 }
893 
894 /**
895  * xe_exec_queue_lrc() - Get the LRC from exec queue.
896  * @q: The exec_queue.
897  *
898  * Retrieves the primary LRC for the exec queue. Note that this function
899  * returns only the first LRC instance, even when multiple parallel LRCs
900  * are configured.
901  *
902  * Return: Pointer to LRC on success, error on failure
903  */
904 struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q)
905 {
906 	return q->lrc[0];
907 }
908 
909 /**
910  * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
911  * @q: The exec_queue
912  *
913  * Return: True if the exec_queue is long-running, false otherwise.
914  */
915 bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
916 {
917 	return q->vm && xe_vm_in_lr_mode(q->vm) &&
918 		!(q->flags & EXEC_QUEUE_FLAG_VM);
919 }
920 
921 /**
922  * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
923  * @q: The exec_queue
924  *
925  * FIXME: Need to determine what to use as the short-lived
926  * timeline lock for the exec_queues, so that the return value
927  * of this function becomes more than just an advisory
928  * snapshot in time. The timeline lock must protect the
929  * seqno from racing submissions on the same exec_queue.
930  * Typically vm->resv, but user-created timeline locks use the migrate vm
931  * and never grabs the migrate vm->resv so we have a race there.
932  *
933  * Return: True if the exec_queue is idle, false otherwise.
934  */
935 bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
936 {
937 	if (xe_exec_queue_is_parallel(q)) {
938 		int i;
939 
940 		for (i = 0; i < q->width; ++i) {
941 			if (xe_lrc_seqno(q->lrc[i]) !=
942 			    q->lrc[i]->fence_ctx.next_seqno - 1)
943 				return false;
944 		}
945 
946 		return true;
947 	}
948 
949 	return xe_lrc_seqno(q->lrc[0]) ==
950 		q->lrc[0]->fence_ctx.next_seqno - 1;
951 }
952 
953 /**
954  * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue
955  * from hw
956  * @q: The exec queue
957  *
958  * Update the timestamp saved by HW for this exec queue and save run ticks
959  * calculated by using the delta from last update.
960  */
961 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
962 {
963 	struct xe_device *xe = gt_to_xe(q->gt);
964 	struct xe_lrc *lrc;
965 	u64 old_ts, new_ts;
966 	int idx;
967 
968 	/*
969 	 * Jobs that are executed by kernel doesn't have a corresponding xe_file
970 	 * and thus are not accounted.
971 	 */
972 	if (!q->xef)
973 		return;
974 
975 	/* Synchronize with unbind while holding the xe file open */
976 	if (!drm_dev_enter(&xe->drm, &idx))
977 		return;
978 	/*
979 	 * Only sample the first LRC. For parallel submission, all of them are
980 	 * scheduled together and we compensate that below by multiplying by
981 	 * width - this may introduce errors if that premise is not true and
982 	 * they don't exit 100% aligned. On the other hand, looping through
983 	 * the LRCs and reading them in different time could also introduce
984 	 * errors.
985 	 */
986 	lrc = q->lrc[0];
987 	new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
988 	q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
989 
990 	drm_dev_exit(idx);
991 }
992 
993 /**
994  * xe_exec_queue_kill - permanently stop all execution from an exec queue
995  * @q: The exec queue
996  *
997  * This function permanently stops all activity on an exec queue. If the queue
998  * is actively executing on the HW, it will be kicked off the engine; any
999  * pending jobs are discarded and all future submissions are rejected.
1000  * This function is safe to call multiple times.
1001  */
1002 void xe_exec_queue_kill(struct xe_exec_queue *q)
1003 {
1004 	struct xe_exec_queue *eq = q, *next;
1005 
1006 	list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
1007 				 multi_gt_link) {
1008 		q->ops->kill(eq);
1009 		xe_vm_remove_compute_exec_queue(q->vm, eq);
1010 	}
1011 
1012 	q->ops->kill(q);
1013 	xe_vm_remove_compute_exec_queue(q->vm, q);
1014 }
1015 
1016 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
1017 				struct drm_file *file)
1018 {
1019 	struct xe_device *xe = to_xe_device(dev);
1020 	struct xe_file *xef = to_xe_file(file);
1021 	struct drm_xe_exec_queue_destroy *args = data;
1022 	struct xe_exec_queue *q;
1023 
1024 	if (XE_IOCTL_DBG(xe, args->pad) ||
1025 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1026 		return -EINVAL;
1027 
1028 	mutex_lock(&xef->exec_queue.lock);
1029 	q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
1030 	if (q)
1031 		atomic_inc(&xef->exec_queue.pending_removal);
1032 	mutex_unlock(&xef->exec_queue.lock);
1033 
1034 	if (XE_IOCTL_DBG(xe, !q))
1035 		return -ENOENT;
1036 
1037 	if (q->vm && q->hwe->hw_engine_group)
1038 		xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
1039 
1040 	xe_exec_queue_kill(q);
1041 
1042 	trace_xe_exec_queue_close(q);
1043 	xe_exec_queue_put(q);
1044 
1045 	return 0;
1046 }
1047 
1048 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
1049 						    struct xe_vm *vm)
1050 {
1051 	if (q->flags & EXEC_QUEUE_FLAG_MIGRATE) {
1052 		xe_migrate_job_lock_assert(q);
1053 	} else if (q->flags & EXEC_QUEUE_FLAG_VM) {
1054 		lockdep_assert_held(&vm->lock);
1055 	} else {
1056 		xe_vm_assert_held(vm);
1057 		lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
1058 	}
1059 }
1060 
1061 /**
1062  * xe_exec_queue_last_fence_put() - Drop ref to last fence
1063  * @q: The exec queue
1064  * @vm: The VM the engine does a bind or exec for
1065  */
1066 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
1067 {
1068 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1069 
1070 	xe_exec_queue_last_fence_put_unlocked(q);
1071 }
1072 
1073 /**
1074  * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
1075  * @q: The exec queue
1076  *
1077  * Only safe to be called from xe_exec_queue_destroy().
1078  */
1079 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
1080 {
1081 	if (q->last_fence) {
1082 		dma_fence_put(q->last_fence);
1083 		q->last_fence = NULL;
1084 	}
1085 }
1086 
1087 /**
1088  * xe_exec_queue_last_fence_get() - Get last fence
1089  * @q: The exec queue
1090  * @vm: The VM the engine does a bind or exec for
1091  *
1092  * Get last fence, takes a ref
1093  *
1094  * Returns: last fence if not signaled, dma fence stub if signaled
1095  */
1096 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
1097 					       struct xe_vm *vm)
1098 {
1099 	struct dma_fence *fence;
1100 
1101 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1102 
1103 	if (q->last_fence &&
1104 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
1105 		xe_exec_queue_last_fence_put(q, vm);
1106 
1107 	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
1108 	dma_fence_get(fence);
1109 	return fence;
1110 }
1111 
1112 /**
1113  * xe_exec_queue_last_fence_get_for_resume() - Get last fence
1114  * @q: The exec queue
1115  * @vm: The VM the engine does a bind or exec for
1116  *
1117  * Get last fence, takes a ref. Only safe to be called in the context of
1118  * resuming the hw engine group's long-running exec queue, when the group
1119  * semaphore is held.
1120  *
1121  * Returns: last fence if not signaled, dma fence stub if signaled
1122  */
1123 struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
1124 							  struct xe_vm *vm)
1125 {
1126 	struct dma_fence *fence;
1127 
1128 	lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
1129 
1130 	if (q->last_fence &&
1131 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
1132 		xe_exec_queue_last_fence_put_unlocked(q);
1133 
1134 	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
1135 	dma_fence_get(fence);
1136 	return fence;
1137 }
1138 
1139 /**
1140  * xe_exec_queue_last_fence_set() - Set last fence
1141  * @q: The exec queue
1142  * @vm: The VM the engine does a bind or exec for
1143  * @fence: The fence
1144  *
1145  * Set the last fence for the engine. Increases reference count for fence, when
1146  * closing engine xe_exec_queue_last_fence_put should be called.
1147  */
1148 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
1149 				  struct dma_fence *fence)
1150 {
1151 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1152 	xe_assert(vm->xe, !dma_fence_is_container(fence));
1153 
1154 	xe_exec_queue_last_fence_put(q, vm);
1155 	q->last_fence = dma_fence_get(fence);
1156 }
1157 
1158 /**
1159  * xe_exec_queue_tlb_inval_last_fence_put() - Drop ref to last TLB invalidation fence
1160  * @q: The exec queue
1161  * @vm: The VM the engine does a bind for
1162  * @type: Either primary or media GT
1163  */
1164 void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q,
1165 					    struct xe_vm *vm,
1166 					    unsigned int type)
1167 {
1168 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1169 	xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
1170 		  type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
1171 
1172 	xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, type);
1173 }
1174 
1175 /**
1176  * xe_exec_queue_tlb_inval_last_fence_put_unlocked() - Drop ref to last TLB
1177  * invalidation fence unlocked
1178  * @q: The exec queue
1179  * @type: Either primary or media GT
1180  *
1181  * Only safe to be called from xe_exec_queue_destroy().
1182  */
1183 void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q,
1184 						     unsigned int type)
1185 {
1186 	xe_assert(q->vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
1187 		  type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
1188 
1189 	dma_fence_put(q->tlb_inval[type].last_fence);
1190 	q->tlb_inval[type].last_fence = NULL;
1191 }
1192 
1193 /**
1194  * xe_exec_queue_tlb_inval_last_fence_get() - Get last fence for TLB invalidation
1195  * @q: The exec queue
1196  * @vm: The VM the engine does a bind for
1197  * @type: Either primary or media GT
1198  *
1199  * Get last fence, takes a ref
1200  *
1201  * Returns: last fence if not signaled, dma fence stub if signaled
1202  */
1203 struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q,
1204 							 struct xe_vm *vm,
1205 							 unsigned int type)
1206 {
1207 	struct dma_fence *fence;
1208 
1209 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1210 	xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
1211 		  type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
1212 	xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM |
1213 				      EXEC_QUEUE_FLAG_MIGRATE));
1214 
1215 	if (q->tlb_inval[type].last_fence &&
1216 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1217 		     &q->tlb_inval[type].last_fence->flags))
1218 		xe_exec_queue_tlb_inval_last_fence_put(q, vm, type);
1219 
1220 	fence = q->tlb_inval[type].last_fence ?: dma_fence_get_stub();
1221 	dma_fence_get(fence);
1222 	return fence;
1223 }
1224 
1225 /**
1226  * xe_exec_queue_tlb_inval_last_fence_set() - Set last fence for TLB invalidation
1227  * @q: The exec queue
1228  * @vm: The VM the engine does a bind for
1229  * @fence: The fence
1230  * @type: Either primary or media GT
1231  *
1232  * Set the last fence for the tlb invalidation type on the queue. Increases
1233  * reference count for fence, when closing queue
1234  * xe_exec_queue_tlb_inval_last_fence_put should be called.
1235  */
1236 void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q,
1237 					    struct xe_vm *vm,
1238 					    struct dma_fence *fence,
1239 					    unsigned int type)
1240 {
1241 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1242 	xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
1243 		  type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
1244 	xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM |
1245 				      EXEC_QUEUE_FLAG_MIGRATE));
1246 	xe_assert(vm->xe, !dma_fence_is_container(fence));
1247 
1248 	xe_exec_queue_tlb_inval_last_fence_put(q, vm, type);
1249 	q->tlb_inval[type].last_fence = dma_fence_get(fence);
1250 }
1251 
1252 /**
1253  * xe_exec_queue_contexts_hwsp_rebase - Re-compute GGTT references
1254  * within all LRCs of a queue.
1255  * @q: the &xe_exec_queue struct instance containing target LRCs
1256  * @scratch: scratch buffer to be used as temporary storage
1257  *
1258  * Returns: zero on success, negative error code on failure
1259  */
1260 int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch)
1261 {
1262 	int i;
1263 	int err = 0;
1264 
1265 	for (i = 0; i < q->width; ++i) {
1266 		struct xe_lrc *lrc;
1267 
1268 		/* Pairs with WRITE_ONCE in __xe_exec_queue_init  */
1269 		lrc = READ_ONCE(q->lrc[i]);
1270 		if (!lrc)
1271 			continue;
1272 
1273 		xe_lrc_update_memirq_regs_with_address(lrc, q->hwe, scratch);
1274 		xe_lrc_update_hwctx_regs_with_address(lrc);
1275 		err = xe_lrc_setup_wa_bb_with_scratch(lrc, q->hwe, scratch);
1276 		if (err)
1277 			break;
1278 	}
1279 
1280 	return err;
1281 }
1282