xref: /linux/drivers/gpu/drm/xe/xe_exec_queue.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_exec_queue.h"
7 
8 #include <linux/nospec.h>
9 
10 #include <drm/drm_device.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_file.h>
13 #include <uapi/drm/xe_drm.h>
14 
15 #include "xe_device.h"
16 #include "xe_gt.h"
17 #include "xe_hw_engine_class_sysfs.h"
18 #include "xe_hw_engine_group.h"
19 #include "xe_hw_fence.h"
20 #include "xe_irq.h"
21 #include "xe_lrc.h"
22 #include "xe_macros.h"
23 #include "xe_migrate.h"
24 #include "xe_pm.h"
25 #include "xe_ring_ops_types.h"
26 #include "xe_trace.h"
27 #include "xe_vm.h"
28 
29 enum xe_exec_queue_sched_prop {
30 	XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
31 	XE_EXEC_QUEUE_TIMESLICE = 1,
32 	XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
33 	XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
34 };
35 
36 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
37 				      u64 extensions, int ext_number);
38 
__xe_exec_queue_free(struct xe_exec_queue * q)39 static void __xe_exec_queue_free(struct xe_exec_queue *q)
40 {
41 	if (q->vm)
42 		xe_vm_put(q->vm);
43 
44 	if (q->xef)
45 		xe_file_put(q->xef);
46 
47 	kfree(q);
48 }
49 
__xe_exec_queue_alloc(struct xe_device * xe,struct xe_vm * vm,u32 logical_mask,u16 width,struct xe_hw_engine * hwe,u32 flags,u64 extensions)50 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
51 						   struct xe_vm *vm,
52 						   u32 logical_mask,
53 						   u16 width, struct xe_hw_engine *hwe,
54 						   u32 flags, u64 extensions)
55 {
56 	struct xe_exec_queue *q;
57 	struct xe_gt *gt = hwe->gt;
58 	int err;
59 
60 	/* only kernel queues can be permanent */
61 	XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
62 
63 	q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL);
64 	if (!q)
65 		return ERR_PTR(-ENOMEM);
66 
67 	kref_init(&q->refcount);
68 	q->flags = flags;
69 	q->hwe = hwe;
70 	q->gt = gt;
71 	q->class = hwe->class;
72 	q->width = width;
73 	q->msix_vec = XE_IRQ_DEFAULT_MSIX;
74 	q->logical_mask = logical_mask;
75 	q->fence_irq = &gt->fence_irq[hwe->class];
76 	q->ring_ops = gt->ring_ops[hwe->class];
77 	q->ops = gt->exec_queue_ops;
78 	INIT_LIST_HEAD(&q->lr.link);
79 	INIT_LIST_HEAD(&q->multi_gt_link);
80 	INIT_LIST_HEAD(&q->hw_engine_group_link);
81 
82 	q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
83 	q->sched_props.preempt_timeout_us =
84 				hwe->eclass->sched_props.preempt_timeout_us;
85 	q->sched_props.job_timeout_ms =
86 				hwe->eclass->sched_props.job_timeout_ms;
87 	if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
88 	    q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
89 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
90 	else
91 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
92 
93 	if (vm)
94 		q->vm = xe_vm_get(vm);
95 
96 	if (extensions) {
97 		/*
98 		 * may set q->usm, must come before xe_lrc_create(),
99 		 * may overwrite q->sched_props, must come before q->ops->init()
100 		 */
101 		err = exec_queue_user_extensions(xe, q, extensions, 0);
102 		if (err) {
103 			__xe_exec_queue_free(q);
104 			return ERR_PTR(err);
105 		}
106 	}
107 
108 	return q;
109 }
110 
__xe_exec_queue_init(struct xe_exec_queue * q)111 static int __xe_exec_queue_init(struct xe_exec_queue *q)
112 {
113 	struct xe_vm *vm = q->vm;
114 	int i, err;
115 
116 	if (vm) {
117 		err = xe_vm_lock(vm, true);
118 		if (err)
119 			return err;
120 	}
121 
122 	for (i = 0; i < q->width; ++i) {
123 		q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec);
124 		if (IS_ERR(q->lrc[i])) {
125 			err = PTR_ERR(q->lrc[i]);
126 			goto err_unlock;
127 		}
128 	}
129 
130 	if (vm)
131 		xe_vm_unlock(vm);
132 
133 	err = q->ops->init(q);
134 	if (err)
135 		goto err_lrc;
136 
137 	return 0;
138 
139 err_unlock:
140 	if (vm)
141 		xe_vm_unlock(vm);
142 err_lrc:
143 	for (i = i - 1; i >= 0; --i)
144 		xe_lrc_put(q->lrc[i]);
145 	return err;
146 }
147 
xe_exec_queue_create(struct xe_device * xe,struct xe_vm * vm,u32 logical_mask,u16 width,struct xe_hw_engine * hwe,u32 flags,u64 extensions)148 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
149 					   u32 logical_mask, u16 width,
150 					   struct xe_hw_engine *hwe, u32 flags,
151 					   u64 extensions)
152 {
153 	struct xe_exec_queue *q;
154 	int err;
155 
156 	q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
157 				  extensions);
158 	if (IS_ERR(q))
159 		return q;
160 
161 	err = __xe_exec_queue_init(q);
162 	if (err)
163 		goto err_post_alloc;
164 
165 	return q;
166 
167 err_post_alloc:
168 	__xe_exec_queue_free(q);
169 	return ERR_PTR(err);
170 }
171 
xe_exec_queue_create_class(struct xe_device * xe,struct xe_gt * gt,struct xe_vm * vm,enum xe_engine_class class,u32 flags,u64 extensions)172 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
173 						 struct xe_vm *vm,
174 						 enum xe_engine_class class,
175 						 u32 flags, u64 extensions)
176 {
177 	struct xe_hw_engine *hwe, *hwe0 = NULL;
178 	enum xe_hw_engine_id id;
179 	u32 logical_mask = 0;
180 
181 	for_each_hw_engine(hwe, gt, id) {
182 		if (xe_hw_engine_is_reserved(hwe))
183 			continue;
184 
185 		if (hwe->class == class) {
186 			logical_mask |= BIT(hwe->logical_instance);
187 			if (!hwe0)
188 				hwe0 = hwe;
189 		}
190 	}
191 
192 	if (!logical_mask)
193 		return ERR_PTR(-ENODEV);
194 
195 	return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions);
196 }
197 
198 /**
199  * xe_exec_queue_create_bind() - Create bind exec queue.
200  * @xe: Xe device.
201  * @tile: tile which bind exec queue belongs to.
202  * @flags: exec queue creation flags
203  * @extensions: exec queue creation extensions
204  *
205  * Normalize bind exec queue creation. Bind exec queue is tied to migration VM
206  * for access to physical memory required for page table programming. On a
207  * faulting devices the reserved copy engine instance must be used to avoid
208  * deadlocking (user binds cannot get stuck behind faults as kernel binds which
209  * resolve faults depend on user binds). On non-faulting devices any copy engine
210  * can be used.
211  *
212  * Returns exec queue on success, ERR_PTR on failure
213  */
xe_exec_queue_create_bind(struct xe_device * xe,struct xe_tile * tile,u32 flags,u64 extensions)214 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
215 						struct xe_tile *tile,
216 						u32 flags, u64 extensions)
217 {
218 	struct xe_gt *gt = tile->primary_gt;
219 	struct xe_exec_queue *q;
220 	struct xe_vm *migrate_vm;
221 
222 	migrate_vm = xe_migrate_get_vm(tile->migrate);
223 	if (xe->info.has_usm) {
224 		struct xe_hw_engine *hwe = xe_gt_hw_engine(gt,
225 							   XE_ENGINE_CLASS_COPY,
226 							   gt->usm.reserved_bcs_instance,
227 							   false);
228 
229 		if (!hwe) {
230 			xe_vm_put(migrate_vm);
231 			return ERR_PTR(-EINVAL);
232 		}
233 
234 		q = xe_exec_queue_create(xe, migrate_vm,
235 					 BIT(hwe->logical_instance), 1, hwe,
236 					 flags, extensions);
237 	} else {
238 		q = xe_exec_queue_create_class(xe, gt, migrate_vm,
239 					       XE_ENGINE_CLASS_COPY, flags,
240 					       extensions);
241 	}
242 	xe_vm_put(migrate_vm);
243 
244 	return q;
245 }
246 ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
247 
xe_exec_queue_destroy(struct kref * ref)248 void xe_exec_queue_destroy(struct kref *ref)
249 {
250 	struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
251 	struct xe_exec_queue *eq, *next;
252 
253 	xe_exec_queue_last_fence_put_unlocked(q);
254 	if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
255 		list_for_each_entry_safe(eq, next, &q->multi_gt_list,
256 					 multi_gt_link)
257 			xe_exec_queue_put(eq);
258 	}
259 
260 	q->ops->fini(q);
261 }
262 
xe_exec_queue_fini(struct xe_exec_queue * q)263 void xe_exec_queue_fini(struct xe_exec_queue *q)
264 {
265 	int i;
266 
267 	/*
268 	 * Before releasing our ref to lrc and xef, accumulate our run ticks
269 	 * and wakeup any waiters.
270 	 */
271 	xe_exec_queue_update_run_ticks(q);
272 	if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
273 		wake_up_var(&q->xef->exec_queue.pending_removal);
274 
275 	for (i = 0; i < q->width; ++i)
276 		xe_lrc_put(q->lrc[i]);
277 
278 	__xe_exec_queue_free(q);
279 }
280 
xe_exec_queue_assign_name(struct xe_exec_queue * q,u32 instance)281 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
282 {
283 	switch (q->class) {
284 	case XE_ENGINE_CLASS_RENDER:
285 		snprintf(q->name, sizeof(q->name), "rcs%d", instance);
286 		break;
287 	case XE_ENGINE_CLASS_VIDEO_DECODE:
288 		snprintf(q->name, sizeof(q->name), "vcs%d", instance);
289 		break;
290 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
291 		snprintf(q->name, sizeof(q->name), "vecs%d", instance);
292 		break;
293 	case XE_ENGINE_CLASS_COPY:
294 		snprintf(q->name, sizeof(q->name), "bcs%d", instance);
295 		break;
296 	case XE_ENGINE_CLASS_COMPUTE:
297 		snprintf(q->name, sizeof(q->name), "ccs%d", instance);
298 		break;
299 	case XE_ENGINE_CLASS_OTHER:
300 		snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
301 		break;
302 	default:
303 		XE_WARN_ON(q->class);
304 	}
305 }
306 
xe_exec_queue_lookup(struct xe_file * xef,u32 id)307 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
308 {
309 	struct xe_exec_queue *q;
310 
311 	mutex_lock(&xef->exec_queue.lock);
312 	q = xa_load(&xef->exec_queue.xa, id);
313 	if (q)
314 		xe_exec_queue_get(q);
315 	mutex_unlock(&xef->exec_queue.lock);
316 
317 	return q;
318 }
319 
320 enum xe_exec_queue_priority
xe_exec_queue_device_get_max_priority(struct xe_device * xe)321 xe_exec_queue_device_get_max_priority(struct xe_device *xe)
322 {
323 	return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
324 				       XE_EXEC_QUEUE_PRIORITY_NORMAL;
325 }
326 
exec_queue_set_priority(struct xe_device * xe,struct xe_exec_queue * q,u64 value)327 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
328 				   u64 value)
329 {
330 	if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
331 		return -EINVAL;
332 
333 	if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
334 		return -EPERM;
335 
336 	q->sched_props.priority = value;
337 	return 0;
338 }
339 
xe_exec_queue_enforce_schedule_limit(void)340 static bool xe_exec_queue_enforce_schedule_limit(void)
341 {
342 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
343 	return true;
344 #else
345 	return !capable(CAP_SYS_NICE);
346 #endif
347 }
348 
349 static void
xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf * eclass,enum xe_exec_queue_sched_prop prop,u32 * min,u32 * max)350 xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
351 			      enum xe_exec_queue_sched_prop prop,
352 			      u32 *min, u32 *max)
353 {
354 	switch (prop) {
355 	case XE_EXEC_QUEUE_JOB_TIMEOUT:
356 		*min = eclass->sched_props.job_timeout_min;
357 		*max = eclass->sched_props.job_timeout_max;
358 		break;
359 	case XE_EXEC_QUEUE_TIMESLICE:
360 		*min = eclass->sched_props.timeslice_min;
361 		*max = eclass->sched_props.timeslice_max;
362 		break;
363 	case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
364 		*min = eclass->sched_props.preempt_timeout_min;
365 		*max = eclass->sched_props.preempt_timeout_max;
366 		break;
367 	default:
368 		break;
369 	}
370 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
371 	if (capable(CAP_SYS_NICE)) {
372 		switch (prop) {
373 		case XE_EXEC_QUEUE_JOB_TIMEOUT:
374 			*min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
375 			*max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
376 			break;
377 		case XE_EXEC_QUEUE_TIMESLICE:
378 			*min = XE_HW_ENGINE_TIMESLICE_MIN;
379 			*max = XE_HW_ENGINE_TIMESLICE_MAX;
380 			break;
381 		case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
382 			*min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
383 			*max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
384 			break;
385 		default:
386 			break;
387 		}
388 	}
389 #endif
390 }
391 
exec_queue_set_timeslice(struct xe_device * xe,struct xe_exec_queue * q,u64 value)392 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
393 				    u64 value)
394 {
395 	u32 min = 0, max = 0;
396 
397 	xe_exec_queue_get_prop_minmax(q->hwe->eclass,
398 				      XE_EXEC_QUEUE_TIMESLICE, &min, &max);
399 
400 	if (xe_exec_queue_enforce_schedule_limit() &&
401 	    !xe_hw_engine_timeout_in_range(value, min, max))
402 		return -EINVAL;
403 
404 	q->sched_props.timeslice_us = value;
405 	return 0;
406 }
407 
408 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
409 					     struct xe_exec_queue *q,
410 					     u64 value);
411 
412 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
413 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
414 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
415 };
416 
exec_queue_user_ext_set_property(struct xe_device * xe,struct xe_exec_queue * q,u64 extension)417 static int exec_queue_user_ext_set_property(struct xe_device *xe,
418 					    struct xe_exec_queue *q,
419 					    u64 extension)
420 {
421 	u64 __user *address = u64_to_user_ptr(extension);
422 	struct drm_xe_ext_set_property ext;
423 	int err;
424 	u32 idx;
425 
426 	err = __copy_from_user(&ext, address, sizeof(ext));
427 	if (XE_IOCTL_DBG(xe, err))
428 		return -EFAULT;
429 
430 	if (XE_IOCTL_DBG(xe, ext.property >=
431 			 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
432 	    XE_IOCTL_DBG(xe, ext.pad) ||
433 	    XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
434 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
435 		return -EINVAL;
436 
437 	idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
438 	if (!exec_queue_set_property_funcs[idx])
439 		return -EINVAL;
440 
441 	return exec_queue_set_property_funcs[idx](xe, q, ext.value);
442 }
443 
444 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
445 					       struct xe_exec_queue *q,
446 					       u64 extension);
447 
448 static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
449 	[DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
450 };
451 
452 #define MAX_USER_EXTENSIONS	16
exec_queue_user_extensions(struct xe_device * xe,struct xe_exec_queue * q,u64 extensions,int ext_number)453 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
454 				      u64 extensions, int ext_number)
455 {
456 	u64 __user *address = u64_to_user_ptr(extensions);
457 	struct drm_xe_user_extension ext;
458 	int err;
459 	u32 idx;
460 
461 	if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
462 		return -E2BIG;
463 
464 	err = __copy_from_user(&ext, address, sizeof(ext));
465 	if (XE_IOCTL_DBG(xe, err))
466 		return -EFAULT;
467 
468 	if (XE_IOCTL_DBG(xe, ext.pad) ||
469 	    XE_IOCTL_DBG(xe, ext.name >=
470 			 ARRAY_SIZE(exec_queue_user_extension_funcs)))
471 		return -EINVAL;
472 
473 	idx = array_index_nospec(ext.name,
474 				 ARRAY_SIZE(exec_queue_user_extension_funcs));
475 	err = exec_queue_user_extension_funcs[idx](xe, q, extensions);
476 	if (XE_IOCTL_DBG(xe, err))
477 		return err;
478 
479 	if (ext.next_extension)
480 		return exec_queue_user_extensions(xe, q, ext.next_extension,
481 						  ++ext_number);
482 
483 	return 0;
484 }
485 
calc_validate_logical_mask(struct xe_device * xe,struct xe_gt * gt,struct drm_xe_engine_class_instance * eci,u16 width,u16 num_placements)486 static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
487 				      struct drm_xe_engine_class_instance *eci,
488 				      u16 width, u16 num_placements)
489 {
490 	int len = width * num_placements;
491 	int i, j, n;
492 	u16 class;
493 	u16 gt_id;
494 	u32 return_mask = 0, prev_mask;
495 
496 	if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
497 			 len > 1))
498 		return 0;
499 
500 	for (i = 0; i < width; ++i) {
501 		u32 current_mask = 0;
502 
503 		for (j = 0; j < num_placements; ++j) {
504 			struct xe_hw_engine *hwe;
505 
506 			n = j * width + i;
507 
508 			hwe = xe_hw_engine_lookup(xe, eci[n]);
509 			if (XE_IOCTL_DBG(xe, !hwe))
510 				return 0;
511 
512 			if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
513 				return 0;
514 
515 			if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
516 			    XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
517 				return 0;
518 
519 			class = eci[n].engine_class;
520 			gt_id = eci[n].gt_id;
521 
522 			if (width == 1 || !i)
523 				return_mask |= BIT(eci[n].engine_instance);
524 			current_mask |= BIT(eci[n].engine_instance);
525 		}
526 
527 		/* Parallel submissions must be logically contiguous */
528 		if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
529 			return 0;
530 
531 		prev_mask = current_mask;
532 	}
533 
534 	return return_mask;
535 }
536 
xe_exec_queue_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)537 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
538 			       struct drm_file *file)
539 {
540 	struct xe_device *xe = to_xe_device(dev);
541 	struct xe_file *xef = to_xe_file(file);
542 	struct drm_xe_exec_queue_create *args = data;
543 	struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
544 	struct drm_xe_engine_class_instance __user *user_eci =
545 		u64_to_user_ptr(args->instances);
546 	struct xe_hw_engine *hwe;
547 	struct xe_vm *vm;
548 	struct xe_gt *gt;
549 	struct xe_tile *tile;
550 	struct xe_exec_queue *q = NULL;
551 	u32 logical_mask;
552 	u32 id;
553 	u32 len;
554 	int err;
555 
556 	if (XE_IOCTL_DBG(xe, args->flags) ||
557 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
558 		return -EINVAL;
559 
560 	len = args->width * args->num_placements;
561 	if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
562 		return -EINVAL;
563 
564 	err = __copy_from_user(eci, user_eci,
565 			       sizeof(struct drm_xe_engine_class_instance) *
566 			       len);
567 	if (XE_IOCTL_DBG(xe, err))
568 		return -EFAULT;
569 
570 	if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
571 		return -EINVAL;
572 
573 	if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
574 		if (XE_IOCTL_DBG(xe, args->width != 1) ||
575 		    XE_IOCTL_DBG(xe, args->num_placements != 1) ||
576 		    XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
577 			return -EINVAL;
578 
579 		for_each_tile(tile, xe, id) {
580 			struct xe_exec_queue *new;
581 			u32 flags = EXEC_QUEUE_FLAG_VM;
582 
583 			if (id)
584 				flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
585 
586 			new = xe_exec_queue_create_bind(xe, tile, flags,
587 							args->extensions);
588 			if (IS_ERR(new)) {
589 				err = PTR_ERR(new);
590 				if (q)
591 					goto put_exec_queue;
592 				return err;
593 			}
594 			if (id == 0)
595 				q = new;
596 			else
597 				list_add_tail(&new->multi_gt_list,
598 					      &q->multi_gt_link);
599 		}
600 	} else {
601 		gt = xe_device_get_gt(xe, eci[0].gt_id);
602 		logical_mask = calc_validate_logical_mask(xe, gt, eci,
603 							  args->width,
604 							  args->num_placements);
605 		if (XE_IOCTL_DBG(xe, !logical_mask))
606 			return -EINVAL;
607 
608 		hwe = xe_hw_engine_lookup(xe, eci[0]);
609 		if (XE_IOCTL_DBG(xe, !hwe))
610 			return -EINVAL;
611 
612 		vm = xe_vm_lookup(xef, args->vm_id);
613 		if (XE_IOCTL_DBG(xe, !vm))
614 			return -ENOENT;
615 
616 		err = down_read_interruptible(&vm->lock);
617 		if (err) {
618 			xe_vm_put(vm);
619 			return err;
620 		}
621 
622 		if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
623 			up_read(&vm->lock);
624 			xe_vm_put(vm);
625 			return -ENOENT;
626 		}
627 
628 		q = xe_exec_queue_create(xe, vm, logical_mask,
629 					 args->width, hwe, 0,
630 					 args->extensions);
631 		up_read(&vm->lock);
632 		xe_vm_put(vm);
633 		if (IS_ERR(q))
634 			return PTR_ERR(q);
635 
636 		if (xe_vm_in_preempt_fence_mode(vm)) {
637 			q->lr.context = dma_fence_context_alloc(1);
638 
639 			err = xe_vm_add_compute_exec_queue(vm, q);
640 			if (XE_IOCTL_DBG(xe, err))
641 				goto put_exec_queue;
642 		}
643 
644 		if (q->vm && q->hwe->hw_engine_group) {
645 			err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q);
646 			if (err)
647 				goto put_exec_queue;
648 		}
649 	}
650 
651 	q->xef = xe_file_get(xef);
652 
653 	/* user id alloc must always be last in ioctl to prevent UAF */
654 	err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
655 	if (err)
656 		goto kill_exec_queue;
657 
658 	args->exec_queue_id = id;
659 
660 	return 0;
661 
662 kill_exec_queue:
663 	xe_exec_queue_kill(q);
664 put_exec_queue:
665 	xe_exec_queue_put(q);
666 	return err;
667 }
668 
xe_exec_queue_get_property_ioctl(struct drm_device * dev,void * data,struct drm_file * file)669 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
670 				     struct drm_file *file)
671 {
672 	struct xe_device *xe = to_xe_device(dev);
673 	struct xe_file *xef = to_xe_file(file);
674 	struct drm_xe_exec_queue_get_property *args = data;
675 	struct xe_exec_queue *q;
676 	int ret;
677 
678 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
679 		return -EINVAL;
680 
681 	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
682 	if (XE_IOCTL_DBG(xe, !q))
683 		return -ENOENT;
684 
685 	switch (args->property) {
686 	case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
687 		args->value = q->ops->reset_status(q);
688 		ret = 0;
689 		break;
690 	default:
691 		ret = -EINVAL;
692 	}
693 
694 	xe_exec_queue_put(q);
695 
696 	return ret;
697 }
698 
699 /**
700  * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
701  * @q: The exec_queue
702  *
703  * Return: True if the exec_queue is long-running, false otherwise.
704  */
xe_exec_queue_is_lr(struct xe_exec_queue * q)705 bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
706 {
707 	return q->vm && xe_vm_in_lr_mode(q->vm) &&
708 		!(q->flags & EXEC_QUEUE_FLAG_VM);
709 }
710 
xe_exec_queue_num_job_inflight(struct xe_exec_queue * q)711 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
712 {
713 	return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1;
714 }
715 
716 /**
717  * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
718  * @q: The exec_queue
719  *
720  * Return: True if the exec_queue's ring is full, false otherwise.
721  */
xe_exec_queue_ring_full(struct xe_exec_queue * q)722 bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
723 {
724 	struct xe_lrc *lrc = q->lrc[0];
725 	s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
726 
727 	return xe_exec_queue_num_job_inflight(q) >= max_job;
728 }
729 
730 /**
731  * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
732  * @q: The exec_queue
733  *
734  * FIXME: Need to determine what to use as the short-lived
735  * timeline lock for the exec_queues, so that the return value
736  * of this function becomes more than just an advisory
737  * snapshot in time. The timeline lock must protect the
738  * seqno from racing submissions on the same exec_queue.
739  * Typically vm->resv, but user-created timeline locks use the migrate vm
740  * and never grabs the migrate vm->resv so we have a race there.
741  *
742  * Return: True if the exec_queue is idle, false otherwise.
743  */
xe_exec_queue_is_idle(struct xe_exec_queue * q)744 bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
745 {
746 	if (xe_exec_queue_is_parallel(q)) {
747 		int i;
748 
749 		for (i = 0; i < q->width; ++i) {
750 			if (xe_lrc_seqno(q->lrc[i]) !=
751 			    q->lrc[i]->fence_ctx.next_seqno - 1)
752 				return false;
753 		}
754 
755 		return true;
756 	}
757 
758 	return xe_lrc_seqno(q->lrc[0]) ==
759 		q->lrc[0]->fence_ctx.next_seqno - 1;
760 }
761 
762 /**
763  * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue
764  * from hw
765  * @q: The exec queue
766  *
767  * Update the timestamp saved by HW for this exec queue and save run ticks
768  * calculated by using the delta from last update.
769  */
xe_exec_queue_update_run_ticks(struct xe_exec_queue * q)770 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
771 {
772 	struct xe_device *xe = gt_to_xe(q->gt);
773 	struct xe_lrc *lrc;
774 	u32 old_ts, new_ts;
775 	int idx;
776 
777 	/*
778 	 * Jobs that are executed by kernel doesn't have a corresponding xe_file
779 	 * and thus are not accounted.
780 	 */
781 	if (!q->xef)
782 		return;
783 
784 	/* Synchronize with unbind while holding the xe file open */
785 	if (!drm_dev_enter(&xe->drm, &idx))
786 		return;
787 	/*
788 	 * Only sample the first LRC. For parallel submission, all of them are
789 	 * scheduled together and we compensate that below by multiplying by
790 	 * width - this may introduce errors if that premise is not true and
791 	 * they don't exit 100% aligned. On the other hand, looping through
792 	 * the LRCs and reading them in different time could also introduce
793 	 * errors.
794 	 */
795 	lrc = q->lrc[0];
796 	new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
797 	q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
798 
799 	drm_dev_exit(idx);
800 }
801 
802 /**
803  * xe_exec_queue_kill - permanently stop all execution from an exec queue
804  * @q: The exec queue
805  *
806  * This function permanently stops all activity on an exec queue. If the queue
807  * is actively executing on the HW, it will be kicked off the engine; any
808  * pending jobs are discarded and all future submissions are rejected.
809  * This function is safe to call multiple times.
810  */
xe_exec_queue_kill(struct xe_exec_queue * q)811 void xe_exec_queue_kill(struct xe_exec_queue *q)
812 {
813 	struct xe_exec_queue *eq = q, *next;
814 
815 	list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
816 				 multi_gt_link) {
817 		q->ops->kill(eq);
818 		xe_vm_remove_compute_exec_queue(q->vm, eq);
819 	}
820 
821 	q->ops->kill(q);
822 	xe_vm_remove_compute_exec_queue(q->vm, q);
823 }
824 
xe_exec_queue_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file)825 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
826 				struct drm_file *file)
827 {
828 	struct xe_device *xe = to_xe_device(dev);
829 	struct xe_file *xef = to_xe_file(file);
830 	struct drm_xe_exec_queue_destroy *args = data;
831 	struct xe_exec_queue *q;
832 
833 	if (XE_IOCTL_DBG(xe, args->pad) ||
834 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
835 		return -EINVAL;
836 
837 	mutex_lock(&xef->exec_queue.lock);
838 	q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
839 	if (q)
840 		atomic_inc(&xef->exec_queue.pending_removal);
841 	mutex_unlock(&xef->exec_queue.lock);
842 
843 	if (XE_IOCTL_DBG(xe, !q))
844 		return -ENOENT;
845 
846 	if (q->vm && q->hwe->hw_engine_group)
847 		xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
848 
849 	xe_exec_queue_kill(q);
850 
851 	trace_xe_exec_queue_close(q);
852 	xe_exec_queue_put(q);
853 
854 	return 0;
855 }
856 
xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue * q,struct xe_vm * vm)857 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
858 						    struct xe_vm *vm)
859 {
860 	if (q->flags & EXEC_QUEUE_FLAG_VM) {
861 		lockdep_assert_held(&vm->lock);
862 	} else {
863 		xe_vm_assert_held(vm);
864 		lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
865 	}
866 }
867 
868 /**
869  * xe_exec_queue_last_fence_put() - Drop ref to last fence
870  * @q: The exec queue
871  * @vm: The VM the engine does a bind or exec for
872  */
xe_exec_queue_last_fence_put(struct xe_exec_queue * q,struct xe_vm * vm)873 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
874 {
875 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
876 
877 	xe_exec_queue_last_fence_put_unlocked(q);
878 }
879 
880 /**
881  * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
882  * @q: The exec queue
883  *
884  * Only safe to be called from xe_exec_queue_destroy().
885  */
xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue * q)886 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
887 {
888 	if (q->last_fence) {
889 		dma_fence_put(q->last_fence);
890 		q->last_fence = NULL;
891 	}
892 }
893 
894 /**
895  * xe_exec_queue_last_fence_get() - Get last fence
896  * @q: The exec queue
897  * @vm: The VM the engine does a bind or exec for
898  *
899  * Get last fence, takes a ref
900  *
901  * Returns: last fence if not signaled, dma fence stub if signaled
902  */
xe_exec_queue_last_fence_get(struct xe_exec_queue * q,struct xe_vm * vm)903 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
904 					       struct xe_vm *vm)
905 {
906 	struct dma_fence *fence;
907 
908 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
909 
910 	if (q->last_fence &&
911 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
912 		xe_exec_queue_last_fence_put(q, vm);
913 
914 	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
915 	dma_fence_get(fence);
916 	return fence;
917 }
918 
919 /**
920  * xe_exec_queue_last_fence_get_for_resume() - Get last fence
921  * @q: The exec queue
922  * @vm: The VM the engine does a bind or exec for
923  *
924  * Get last fence, takes a ref. Only safe to be called in the context of
925  * resuming the hw engine group's long-running exec queue, when the group
926  * semaphore is held.
927  *
928  * Returns: last fence if not signaled, dma fence stub if signaled
929  */
xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue * q,struct xe_vm * vm)930 struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
931 							  struct xe_vm *vm)
932 {
933 	struct dma_fence *fence;
934 
935 	lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
936 
937 	if (q->last_fence &&
938 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
939 		xe_exec_queue_last_fence_put_unlocked(q);
940 
941 	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
942 	dma_fence_get(fence);
943 	return fence;
944 }
945 
946 /**
947  * xe_exec_queue_last_fence_set() - Set last fence
948  * @q: The exec queue
949  * @vm: The VM the engine does a bind or exec for
950  * @fence: The fence
951  *
952  * Set the last fence for the engine. Increases reference count for fence, when
953  * closing engine xe_exec_queue_last_fence_put should be called.
954  */
xe_exec_queue_last_fence_set(struct xe_exec_queue * q,struct xe_vm * vm,struct dma_fence * fence)955 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
956 				  struct dma_fence *fence)
957 {
958 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
959 
960 	xe_exec_queue_last_fence_put(q, vm);
961 	q->last_fence = dma_fence_get(fence);
962 }
963 
964 /**
965  * xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue
966  * @q: The exec queue
967  * @vm: The VM the engine does a bind or exec for
968  *
969  * Returns:
970  * -ETIME if there exists an unsignalled last fence dependency, zero otherwise.
971  */
xe_exec_queue_last_fence_test_dep(struct xe_exec_queue * q,struct xe_vm * vm)972 int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm)
973 {
974 	struct dma_fence *fence;
975 	int err = 0;
976 
977 	fence = xe_exec_queue_last_fence_get(q, vm);
978 	if (fence) {
979 		err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ?
980 			0 : -ETIME;
981 		dma_fence_put(fence);
982 	}
983 
984 	return err;
985 }
986