xref: /linux/drivers/gpu/drm/xe/xe_exec_queue.c (revision 9112fc0109fc0037ac3b8b633a169e78b4e23ca1)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_exec_queue.h"
7 
8 #include <linux/nospec.h>
9 
10 #include <drm/drm_device.h>
11 #include <drm/drm_file.h>
12 #include <drm/xe_drm.h>
13 
14 #include "xe_device.h"
15 #include "xe_gt.h"
16 #include "xe_hw_engine_class_sysfs.h"
17 #include "xe_hw_fence.h"
18 #include "xe_lrc.h"
19 #include "xe_macros.h"
20 #include "xe_migrate.h"
21 #include "xe_pm.h"
22 #include "xe_ring_ops_types.h"
23 #include "xe_trace.h"
24 #include "xe_vm.h"
25 
26 enum xe_exec_queue_sched_prop {
27 	XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
28 	XE_EXEC_QUEUE_TIMESLICE = 1,
29 	XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
30 	XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
31 };
32 
33 static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
34 						    struct xe_vm *vm,
35 						    u32 logical_mask,
36 						    u16 width, struct xe_hw_engine *hwe,
37 						    u32 flags)
38 {
39 	struct xe_exec_queue *q;
40 	struct xe_gt *gt = hwe->gt;
41 	int err;
42 	int i;
43 
44 	/* only kernel queues can be permanent */
45 	XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
46 
47 	q = kzalloc(sizeof(*q) + sizeof(struct xe_lrc) * width, GFP_KERNEL);
48 	if (!q)
49 		return ERR_PTR(-ENOMEM);
50 
51 	kref_init(&q->refcount);
52 	q->flags = flags;
53 	q->hwe = hwe;
54 	q->gt = gt;
55 	if (vm)
56 		q->vm = xe_vm_get(vm);
57 	q->class = hwe->class;
58 	q->width = width;
59 	q->logical_mask = logical_mask;
60 	q->fence_irq = &gt->fence_irq[hwe->class];
61 	q->ring_ops = gt->ring_ops[hwe->class];
62 	q->ops = gt->exec_queue_ops;
63 	INIT_LIST_HEAD(&q->compute.link);
64 	INIT_LIST_HEAD(&q->multi_gt_link);
65 
66 	q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
67 	q->sched_props.preempt_timeout_us =
68 				hwe->eclass->sched_props.preempt_timeout_us;
69 	if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
70 	    q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
71 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
72 	else
73 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
74 
75 	if (xe_exec_queue_is_parallel(q)) {
76 		q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
77 		q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
78 	}
79 	if (q->flags & EXEC_QUEUE_FLAG_VM) {
80 		q->bind.fence_ctx = dma_fence_context_alloc(1);
81 		q->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO;
82 	}
83 
84 	for (i = 0; i < width; ++i) {
85 		err = xe_lrc_init(q->lrc + i, hwe, q, vm, SZ_16K);
86 		if (err)
87 			goto err_lrc;
88 	}
89 
90 	err = q->ops->init(q);
91 	if (err)
92 		goto err_lrc;
93 
94 	/*
95 	 * Normally the user vm holds an rpm ref to keep the device
96 	 * awake, and the context holds a ref for the vm, however for
97 	 * some engines we use the kernels migrate vm underneath which offers no
98 	 * such rpm ref, or we lack a vm. Make sure we keep a ref here, so we
99 	 * can perform GuC CT actions when needed. Caller is expected to have
100 	 * already grabbed the rpm ref outside any sensitive locks.
101 	 */
102 	if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !vm))
103 		drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
104 
105 	return q;
106 
107 err_lrc:
108 	for (i = i - 1; i >= 0; --i)
109 		xe_lrc_finish(q->lrc + i);
110 	kfree(q);
111 	return ERR_PTR(err);
112 }
113 
114 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
115 					   u32 logical_mask, u16 width,
116 					   struct xe_hw_engine *hwe, u32 flags)
117 {
118 	struct xe_exec_queue *q;
119 	int err;
120 
121 	if (vm) {
122 		err = xe_vm_lock(vm, true);
123 		if (err)
124 			return ERR_PTR(err);
125 	}
126 	q = __xe_exec_queue_create(xe, vm, logical_mask, width, hwe, flags);
127 	if (vm)
128 		xe_vm_unlock(vm);
129 
130 	return q;
131 }
132 
133 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
134 						 struct xe_vm *vm,
135 						 enum xe_engine_class class, u32 flags)
136 {
137 	struct xe_hw_engine *hwe, *hwe0 = NULL;
138 	enum xe_hw_engine_id id;
139 	u32 logical_mask = 0;
140 
141 	for_each_hw_engine(hwe, gt, id) {
142 		if (xe_hw_engine_is_reserved(hwe))
143 			continue;
144 
145 		if (hwe->class == class) {
146 			logical_mask |= BIT(hwe->logical_instance);
147 			if (!hwe0)
148 				hwe0 = hwe;
149 		}
150 	}
151 
152 	if (!logical_mask)
153 		return ERR_PTR(-ENODEV);
154 
155 	return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags);
156 }
157 
158 void xe_exec_queue_destroy(struct kref *ref)
159 {
160 	struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
161 	struct xe_exec_queue *eq, *next;
162 
163 	xe_exec_queue_last_fence_put_unlocked(q);
164 	if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
165 		list_for_each_entry_safe(eq, next, &q->multi_gt_list,
166 					 multi_gt_link)
167 			xe_exec_queue_put(eq);
168 	}
169 
170 	q->ops->fini(q);
171 }
172 
173 void xe_exec_queue_fini(struct xe_exec_queue *q)
174 {
175 	int i;
176 
177 	for (i = 0; i < q->width; ++i)
178 		xe_lrc_finish(q->lrc + i);
179 	if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
180 		xe_device_mem_access_put(gt_to_xe(q->gt));
181 	if (q->vm)
182 		xe_vm_put(q->vm);
183 
184 	kfree(q);
185 }
186 
187 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
188 {
189 	switch (q->class) {
190 	case XE_ENGINE_CLASS_RENDER:
191 		sprintf(q->name, "rcs%d", instance);
192 		break;
193 	case XE_ENGINE_CLASS_VIDEO_DECODE:
194 		sprintf(q->name, "vcs%d", instance);
195 		break;
196 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
197 		sprintf(q->name, "vecs%d", instance);
198 		break;
199 	case XE_ENGINE_CLASS_COPY:
200 		sprintf(q->name, "bcs%d", instance);
201 		break;
202 	case XE_ENGINE_CLASS_COMPUTE:
203 		sprintf(q->name, "ccs%d", instance);
204 		break;
205 	case XE_ENGINE_CLASS_OTHER:
206 		sprintf(q->name, "gsccs%d", instance);
207 		break;
208 	default:
209 		XE_WARN_ON(q->class);
210 	}
211 }
212 
213 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
214 {
215 	struct xe_exec_queue *q;
216 
217 	mutex_lock(&xef->exec_queue.lock);
218 	q = xa_load(&xef->exec_queue.xa, id);
219 	if (q)
220 		xe_exec_queue_get(q);
221 	mutex_unlock(&xef->exec_queue.lock);
222 
223 	return q;
224 }
225 
226 enum xe_exec_queue_priority
227 xe_exec_queue_device_get_max_priority(struct xe_device *xe)
228 {
229 	return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
230 				       XE_EXEC_QUEUE_PRIORITY_NORMAL;
231 }
232 
233 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
234 				   u64 value, bool create)
235 {
236 	if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
237 		return -EINVAL;
238 
239 	if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
240 		return -EPERM;
241 
242 	return q->ops->set_priority(q, value);
243 }
244 
245 static bool xe_exec_queue_enforce_schedule_limit(void)
246 {
247 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
248 	return true;
249 #else
250 	return !capable(CAP_SYS_NICE);
251 #endif
252 }
253 
254 static void
255 xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
256 			      enum xe_exec_queue_sched_prop prop,
257 			      u32 *min, u32 *max)
258 {
259 	switch (prop) {
260 	case XE_EXEC_QUEUE_JOB_TIMEOUT:
261 		*min = eclass->sched_props.job_timeout_min;
262 		*max = eclass->sched_props.job_timeout_max;
263 		break;
264 	case XE_EXEC_QUEUE_TIMESLICE:
265 		*min = eclass->sched_props.timeslice_min;
266 		*max = eclass->sched_props.timeslice_max;
267 		break;
268 	case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
269 		*min = eclass->sched_props.preempt_timeout_min;
270 		*max = eclass->sched_props.preempt_timeout_max;
271 		break;
272 	default:
273 		break;
274 	}
275 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
276 	if (capable(CAP_SYS_NICE)) {
277 		switch (prop) {
278 		case XE_EXEC_QUEUE_JOB_TIMEOUT:
279 			*min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
280 			*max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
281 			break;
282 		case XE_EXEC_QUEUE_TIMESLICE:
283 			*min = XE_HW_ENGINE_TIMESLICE_MIN;
284 			*max = XE_HW_ENGINE_TIMESLICE_MAX;
285 			break;
286 		case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
287 			*min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
288 			*max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
289 			break;
290 		default:
291 			break;
292 		}
293 	}
294 #endif
295 }
296 
297 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
298 				    u64 value, bool create)
299 {
300 	u32 min = 0, max = 0;
301 
302 	xe_exec_queue_get_prop_minmax(q->hwe->eclass,
303 				      XE_EXEC_QUEUE_TIMESLICE, &min, &max);
304 
305 	if (xe_exec_queue_enforce_schedule_limit() &&
306 	    !xe_hw_engine_timeout_in_range(value, min, max))
307 		return -EINVAL;
308 
309 	return q->ops->set_timeslice(q, value);
310 }
311 
312 static int exec_queue_set_preemption_timeout(struct xe_device *xe,
313 					     struct xe_exec_queue *q, u64 value,
314 					     bool create)
315 {
316 	u32 min = 0, max = 0;
317 
318 	xe_exec_queue_get_prop_minmax(q->hwe->eclass,
319 				      XE_EXEC_QUEUE_PREEMPT_TIMEOUT, &min, &max);
320 
321 	if (xe_exec_queue_enforce_schedule_limit() &&
322 	    !xe_hw_engine_timeout_in_range(value, min, max))
323 		return -EINVAL;
324 
325 	return q->ops->set_preempt_timeout(q, value);
326 }
327 
328 static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q,
329 				      u64 value, bool create)
330 {
331 	u32 min = 0, max = 0;
332 
333 	if (XE_IOCTL_DBG(xe, !create))
334 		return -EINVAL;
335 
336 	xe_exec_queue_get_prop_minmax(q->hwe->eclass,
337 				      XE_EXEC_QUEUE_JOB_TIMEOUT, &min, &max);
338 
339 	if (xe_exec_queue_enforce_schedule_limit() &&
340 	    !xe_hw_engine_timeout_in_range(value, min, max))
341 		return -EINVAL;
342 
343 	return q->ops->set_job_timeout(q, value);
344 }
345 
346 static int exec_queue_set_acc_trigger(struct xe_device *xe, struct xe_exec_queue *q,
347 				      u64 value, bool create)
348 {
349 	if (XE_IOCTL_DBG(xe, !create))
350 		return -EINVAL;
351 
352 	if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
353 		return -EINVAL;
354 
355 	q->usm.acc_trigger = value;
356 
357 	return 0;
358 }
359 
360 static int exec_queue_set_acc_notify(struct xe_device *xe, struct xe_exec_queue *q,
361 				     u64 value, bool create)
362 {
363 	if (XE_IOCTL_DBG(xe, !create))
364 		return -EINVAL;
365 
366 	if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
367 		return -EINVAL;
368 
369 	q->usm.acc_notify = value;
370 
371 	return 0;
372 }
373 
374 static int exec_queue_set_acc_granularity(struct xe_device *xe, struct xe_exec_queue *q,
375 					  u64 value, bool create)
376 {
377 	if (XE_IOCTL_DBG(xe, !create))
378 		return -EINVAL;
379 
380 	if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
381 		return -EINVAL;
382 
383 	if (value > DRM_XE_ACC_GRANULARITY_64M)
384 		return -EINVAL;
385 
386 	q->usm.acc_granularity = value;
387 
388 	return 0;
389 }
390 
391 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
392 					     struct xe_exec_queue *q,
393 					     u64 value, bool create);
394 
395 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
396 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
397 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
398 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
399 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
400 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
401 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
402 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity,
403 };
404 
405 static int exec_queue_user_ext_set_property(struct xe_device *xe,
406 					    struct xe_exec_queue *q,
407 					    u64 extension,
408 					    bool create)
409 {
410 	u64 __user *address = u64_to_user_ptr(extension);
411 	struct drm_xe_ext_set_property ext;
412 	int err;
413 	u32 idx;
414 
415 	err = __copy_from_user(&ext, address, sizeof(ext));
416 	if (XE_IOCTL_DBG(xe, err))
417 		return -EFAULT;
418 
419 	if (XE_IOCTL_DBG(xe, ext.property >=
420 			 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
421 	    XE_IOCTL_DBG(xe, ext.pad))
422 		return -EINVAL;
423 
424 	idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
425 	if (!exec_queue_set_property_funcs[idx])
426 		return -EINVAL;
427 
428 	return exec_queue_set_property_funcs[idx](xe, q, ext.value,  create);
429 }
430 
431 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
432 					       struct xe_exec_queue *q,
433 					       u64 extension,
434 					       bool create);
435 
436 static const xe_exec_queue_set_property_fn exec_queue_user_extension_funcs[] = {
437 	[DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
438 };
439 
440 #define MAX_USER_EXTENSIONS	16
441 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
442 				      u64 extensions, int ext_number, bool create)
443 {
444 	u64 __user *address = u64_to_user_ptr(extensions);
445 	struct drm_xe_user_extension ext;
446 	int err;
447 	u32 idx;
448 
449 	if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
450 		return -E2BIG;
451 
452 	err = __copy_from_user(&ext, address, sizeof(ext));
453 	if (XE_IOCTL_DBG(xe, err))
454 		return -EFAULT;
455 
456 	if (XE_IOCTL_DBG(xe, ext.pad) ||
457 	    XE_IOCTL_DBG(xe, ext.name >=
458 			 ARRAY_SIZE(exec_queue_user_extension_funcs)))
459 		return -EINVAL;
460 
461 	idx = array_index_nospec(ext.name,
462 				 ARRAY_SIZE(exec_queue_user_extension_funcs));
463 	err = exec_queue_user_extension_funcs[idx](xe, q, extensions, create);
464 	if (XE_IOCTL_DBG(xe, err))
465 		return err;
466 
467 	if (ext.next_extension)
468 		return exec_queue_user_extensions(xe, q, ext.next_extension,
469 					      ++ext_number, create);
470 
471 	return 0;
472 }
473 
474 static const enum xe_engine_class user_to_xe_engine_class[] = {
475 	[DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
476 	[DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
477 	[DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
478 	[DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
479 	[DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
480 };
481 
482 static struct xe_hw_engine *
483 find_hw_engine(struct xe_device *xe,
484 	       struct drm_xe_engine_class_instance eci)
485 {
486 	u32 idx;
487 
488 	if (eci.engine_class > ARRAY_SIZE(user_to_xe_engine_class))
489 		return NULL;
490 
491 	if (eci.gt_id >= xe->info.gt_count)
492 		return NULL;
493 
494 	idx = array_index_nospec(eci.engine_class,
495 				 ARRAY_SIZE(user_to_xe_engine_class));
496 
497 	return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
498 			       user_to_xe_engine_class[idx],
499 			       eci.engine_instance, true);
500 }
501 
502 static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt,
503 					struct drm_xe_engine_class_instance *eci,
504 					u16 width, u16 num_placements)
505 {
506 	struct xe_hw_engine *hwe;
507 	enum xe_hw_engine_id id;
508 	u32 logical_mask = 0;
509 
510 	if (XE_IOCTL_DBG(xe, width != 1))
511 		return 0;
512 	if (XE_IOCTL_DBG(xe, num_placements != 1))
513 		return 0;
514 	if (XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
515 		return 0;
516 
517 	eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY;
518 
519 	for_each_hw_engine(hwe, gt, id) {
520 		if (xe_hw_engine_is_reserved(hwe))
521 			continue;
522 
523 		if (hwe->class ==
524 		    user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY])
525 			logical_mask |= BIT(hwe->logical_instance);
526 	}
527 
528 	return logical_mask;
529 }
530 
531 static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
532 				      struct drm_xe_engine_class_instance *eci,
533 				      u16 width, u16 num_placements)
534 {
535 	int len = width * num_placements;
536 	int i, j, n;
537 	u16 class;
538 	u16 gt_id;
539 	u32 return_mask = 0, prev_mask;
540 
541 	if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
542 			 len > 1))
543 		return 0;
544 
545 	for (i = 0; i < width; ++i) {
546 		u32 current_mask = 0;
547 
548 		for (j = 0; j < num_placements; ++j) {
549 			struct xe_hw_engine *hwe;
550 
551 			n = j * width + i;
552 
553 			hwe = find_hw_engine(xe, eci[n]);
554 			if (XE_IOCTL_DBG(xe, !hwe))
555 				return 0;
556 
557 			if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
558 				return 0;
559 
560 			if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
561 			    XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
562 				return 0;
563 
564 			class = eci[n].engine_class;
565 			gt_id = eci[n].gt_id;
566 
567 			if (width == 1 || !i)
568 				return_mask |= BIT(eci[n].engine_instance);
569 			current_mask |= BIT(eci[n].engine_instance);
570 		}
571 
572 		/* Parallel submissions must be logically contiguous */
573 		if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
574 			return 0;
575 
576 		prev_mask = current_mask;
577 	}
578 
579 	return return_mask;
580 }
581 
582 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
583 			       struct drm_file *file)
584 {
585 	struct xe_device *xe = to_xe_device(dev);
586 	struct xe_file *xef = to_xe_file(file);
587 	struct drm_xe_exec_queue_create *args = data;
588 	struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
589 	struct drm_xe_engine_class_instance __user *user_eci =
590 		u64_to_user_ptr(args->instances);
591 	struct xe_hw_engine *hwe;
592 	struct xe_vm *vm, *migrate_vm;
593 	struct xe_gt *gt;
594 	struct xe_exec_queue *q = NULL;
595 	u32 logical_mask;
596 	u32 id;
597 	u32 len;
598 	int err;
599 
600 	if (XE_IOCTL_DBG(xe, args->flags) ||
601 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
602 		return -EINVAL;
603 
604 	len = args->width * args->num_placements;
605 	if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
606 		return -EINVAL;
607 
608 	err = __copy_from_user(eci, user_eci,
609 			       sizeof(struct drm_xe_engine_class_instance) *
610 			       len);
611 	if (XE_IOCTL_DBG(xe, err))
612 		return -EFAULT;
613 
614 	if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
615 		return -EINVAL;
616 
617 	if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
618 		for_each_gt(gt, xe, id) {
619 			struct xe_exec_queue *new;
620 
621 			if (xe_gt_is_media_type(gt))
622 				continue;
623 
624 			eci[0].gt_id = gt->info.id;
625 			logical_mask = bind_exec_queue_logical_mask(xe, gt, eci,
626 								    args->width,
627 								    args->num_placements);
628 			if (XE_IOCTL_DBG(xe, !logical_mask))
629 				return -EINVAL;
630 
631 			hwe = find_hw_engine(xe, eci[0]);
632 			if (XE_IOCTL_DBG(xe, !hwe))
633 				return -EINVAL;
634 
635 			/* The migration vm doesn't hold rpm ref */
636 			xe_device_mem_access_get(xe);
637 
638 			migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
639 			new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
640 						   args->width, hwe,
641 						   EXEC_QUEUE_FLAG_PERSISTENT |
642 						   EXEC_QUEUE_FLAG_VM |
643 						   (id ?
644 						    EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD :
645 						    0));
646 
647 			xe_device_mem_access_put(xe); /* now held by engine */
648 
649 			xe_vm_put(migrate_vm);
650 			if (IS_ERR(new)) {
651 				err = PTR_ERR(new);
652 				if (q)
653 					goto put_exec_queue;
654 				return err;
655 			}
656 			if (id == 0)
657 				q = new;
658 			else
659 				list_add_tail(&new->multi_gt_list,
660 					      &q->multi_gt_link);
661 		}
662 	} else {
663 		gt = xe_device_get_gt(xe, eci[0].gt_id);
664 		logical_mask = calc_validate_logical_mask(xe, gt, eci,
665 							  args->width,
666 							  args->num_placements);
667 		if (XE_IOCTL_DBG(xe, !logical_mask))
668 			return -EINVAL;
669 
670 		hwe = find_hw_engine(xe, eci[0]);
671 		if (XE_IOCTL_DBG(xe, !hwe))
672 			return -EINVAL;
673 
674 		vm = xe_vm_lookup(xef, args->vm_id);
675 		if (XE_IOCTL_DBG(xe, !vm))
676 			return -ENOENT;
677 
678 		err = down_read_interruptible(&vm->lock);
679 		if (err) {
680 			xe_vm_put(vm);
681 			return err;
682 		}
683 
684 		if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
685 			up_read(&vm->lock);
686 			xe_vm_put(vm);
687 			return -ENOENT;
688 		}
689 
690 		q = xe_exec_queue_create(xe, vm, logical_mask,
691 					 args->width, hwe, 0);
692 		up_read(&vm->lock);
693 		xe_vm_put(vm);
694 		if (IS_ERR(q))
695 			return PTR_ERR(q);
696 
697 		if (xe_vm_in_preempt_fence_mode(vm)) {
698 			q->compute.context = dma_fence_context_alloc(1);
699 			spin_lock_init(&q->compute.lock);
700 
701 			err = xe_vm_add_compute_exec_queue(vm, q);
702 			if (XE_IOCTL_DBG(xe, err))
703 				goto put_exec_queue;
704 		}
705 	}
706 
707 	if (args->extensions) {
708 		err = exec_queue_user_extensions(xe, q, args->extensions, 0, true);
709 		if (XE_IOCTL_DBG(xe, err))
710 			goto kill_exec_queue;
711 	}
712 
713 	mutex_lock(&xef->exec_queue.lock);
714 	err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
715 	mutex_unlock(&xef->exec_queue.lock);
716 	if (err)
717 		goto kill_exec_queue;
718 
719 	args->exec_queue_id = id;
720 
721 	return 0;
722 
723 kill_exec_queue:
724 	xe_exec_queue_kill(q);
725 put_exec_queue:
726 	xe_exec_queue_put(q);
727 	return err;
728 }
729 
730 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
731 				     struct drm_file *file)
732 {
733 	struct xe_device *xe = to_xe_device(dev);
734 	struct xe_file *xef = to_xe_file(file);
735 	struct drm_xe_exec_queue_get_property *args = data;
736 	struct xe_exec_queue *q;
737 	int ret;
738 
739 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
740 		return -EINVAL;
741 
742 	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
743 	if (XE_IOCTL_DBG(xe, !q))
744 		return -ENOENT;
745 
746 	switch (args->property) {
747 	case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
748 		args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED);
749 		ret = 0;
750 		break;
751 	default:
752 		ret = -EINVAL;
753 	}
754 
755 	xe_exec_queue_put(q);
756 
757 	return ret;
758 }
759 
760 /**
761  * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
762  * @q: The exec_queue
763  *
764  * Return: True if the exec_queue is long-running, false otherwise.
765  */
766 bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
767 {
768 	return q->vm && xe_vm_in_lr_mode(q->vm) &&
769 		!(q->flags & EXEC_QUEUE_FLAG_VM);
770 }
771 
772 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
773 {
774 	return q->lrc->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc) - 1;
775 }
776 
777 /**
778  * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
779  * @q: The exec_queue
780  *
781  * Return: True if the exec_queue's ring is full, false otherwise.
782  */
783 bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
784 {
785 	struct xe_lrc *lrc = q->lrc;
786 	s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
787 
788 	return xe_exec_queue_num_job_inflight(q) >= max_job;
789 }
790 
791 /**
792  * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
793  * @q: The exec_queue
794  *
795  * FIXME: Need to determine what to use as the short-lived
796  * timeline lock for the exec_queues, so that the return value
797  * of this function becomes more than just an advisory
798  * snapshot in time. The timeline lock must protect the
799  * seqno from racing submissions on the same exec_queue.
800  * Typically vm->resv, but user-created timeline locks use the migrate vm
801  * and never grabs the migrate vm->resv so we have a race there.
802  *
803  * Return: True if the exec_queue is idle, false otherwise.
804  */
805 bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
806 {
807 	if (xe_exec_queue_is_parallel(q)) {
808 		int i;
809 
810 		for (i = 0; i < q->width; ++i) {
811 			if (xe_lrc_seqno(&q->lrc[i]) !=
812 			    q->lrc[i].fence_ctx.next_seqno - 1)
813 				return false;
814 		}
815 
816 		return true;
817 	}
818 
819 	return xe_lrc_seqno(&q->lrc[0]) ==
820 		q->lrc[0].fence_ctx.next_seqno - 1;
821 }
822 
823 void xe_exec_queue_kill(struct xe_exec_queue *q)
824 {
825 	struct xe_exec_queue *eq = q, *next;
826 
827 	list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
828 				 multi_gt_link) {
829 		q->ops->kill(eq);
830 		xe_vm_remove_compute_exec_queue(q->vm, eq);
831 	}
832 
833 	q->ops->kill(q);
834 	xe_vm_remove_compute_exec_queue(q->vm, q);
835 }
836 
837 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
838 				struct drm_file *file)
839 {
840 	struct xe_device *xe = to_xe_device(dev);
841 	struct xe_file *xef = to_xe_file(file);
842 	struct drm_xe_exec_queue_destroy *args = data;
843 	struct xe_exec_queue *q;
844 
845 	if (XE_IOCTL_DBG(xe, args->pad) ||
846 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
847 		return -EINVAL;
848 
849 	mutex_lock(&xef->exec_queue.lock);
850 	q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
851 	mutex_unlock(&xef->exec_queue.lock);
852 	if (XE_IOCTL_DBG(xe, !q))
853 		return -ENOENT;
854 
855 	xe_exec_queue_kill(q);
856 
857 	trace_xe_exec_queue_close(q);
858 	xe_exec_queue_put(q);
859 
860 	return 0;
861 }
862 
863 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
864 						    struct xe_vm *vm)
865 {
866 	if (q->flags & EXEC_QUEUE_FLAG_VM)
867 		lockdep_assert_held(&vm->lock);
868 	else
869 		xe_vm_assert_held(vm);
870 }
871 
872 /**
873  * xe_exec_queue_last_fence_put() - Drop ref to last fence
874  * @q: The exec queue
875  * @vm: The VM the engine does a bind or exec for
876  */
877 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
878 {
879 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
880 
881 	if (q->last_fence) {
882 		dma_fence_put(q->last_fence);
883 		q->last_fence = NULL;
884 	}
885 }
886 
887 /**
888  * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
889  * @q: The exec queue
890  *
891  * Only safe to be called from xe_exec_queue_destroy().
892  */
893 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
894 {
895 	if (q->last_fence) {
896 		dma_fence_put(q->last_fence);
897 		q->last_fence = NULL;
898 	}
899 }
900 
901 /**
902  * xe_exec_queue_last_fence_get() - Get last fence
903  * @q: The exec queue
904  * @vm: The VM the engine does a bind or exec for
905  *
906  * Get last fence, takes a ref
907  *
908  * Returns: last fence if not signaled, dma fence stub if signaled
909  */
910 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
911 					       struct xe_vm *vm)
912 {
913 	struct dma_fence *fence;
914 
915 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
916 
917 	if (q->last_fence &&
918 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
919 		xe_exec_queue_last_fence_put(q, vm);
920 
921 	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
922 	dma_fence_get(fence);
923 	return fence;
924 }
925 
926 /**
927  * xe_exec_queue_last_fence_set() - Set last fence
928  * @q: The exec queue
929  * @vm: The VM the engine does a bind or exec for
930  * @fence: The fence
931  *
932  * Set the last fence for the engine. Increases reference count for fence, when
933  * closing engine xe_exec_queue_last_fence_put should be called.
934  */
935 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
936 				  struct dma_fence *fence)
937 {
938 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
939 
940 	xe_exec_queue_last_fence_put(q, vm);
941 	q->last_fence = dma_fence_get(fence);
942 }
943