xref: /linux/drivers/gpu/drm/xe/xe_exec_queue.c (revision e332935a540eb76dd656663ca908eb0544d96757)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_exec_queue.h"
7 
8 #include <linux/nospec.h>
9 
10 #include <drm/drm_device.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_file.h>
13 #include <uapi/drm/xe_drm.h>
14 
15 #include "xe_device.h"
16 #include "xe_gt.h"
17 #include "xe_hw_engine_class_sysfs.h"
18 #include "xe_hw_engine_group.h"
19 #include "xe_hw_fence.h"
20 #include "xe_irq.h"
21 #include "xe_lrc.h"
22 #include "xe_macros.h"
23 #include "xe_migrate.h"
24 #include "xe_pm.h"
25 #include "xe_ring_ops_types.h"
26 #include "xe_trace.h"
27 #include "xe_vm.h"
28 #include "xe_pxp.h"
29 
30 enum xe_exec_queue_sched_prop {
31 	XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
32 	XE_EXEC_QUEUE_TIMESLICE = 1,
33 	XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
34 	XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
35 };
36 
37 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
38 				      u64 extensions, int ext_number);
39 
__xe_exec_queue_free(struct xe_exec_queue * q)40 static void __xe_exec_queue_free(struct xe_exec_queue *q)
41 {
42 	if (xe_exec_queue_uses_pxp(q))
43 		xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
44 	if (q->vm)
45 		xe_vm_put(q->vm);
46 
47 	if (q->xef)
48 		xe_file_put(q->xef);
49 
50 	kfree(q);
51 }
52 
__xe_exec_queue_alloc(struct xe_device * xe,struct xe_vm * vm,u32 logical_mask,u16 width,struct xe_hw_engine * hwe,u32 flags,u64 extensions)53 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
54 						   struct xe_vm *vm,
55 						   u32 logical_mask,
56 						   u16 width, struct xe_hw_engine *hwe,
57 						   u32 flags, u64 extensions)
58 {
59 	struct xe_exec_queue *q;
60 	struct xe_gt *gt = hwe->gt;
61 	int err;
62 
63 	/* only kernel queues can be permanent */
64 	XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
65 
66 	q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL);
67 	if (!q)
68 		return ERR_PTR(-ENOMEM);
69 
70 	kref_init(&q->refcount);
71 	q->flags = flags;
72 	q->hwe = hwe;
73 	q->gt = gt;
74 	q->class = hwe->class;
75 	q->width = width;
76 	q->msix_vec = XE_IRQ_DEFAULT_MSIX;
77 	q->logical_mask = logical_mask;
78 	q->fence_irq = &gt->fence_irq[hwe->class];
79 	q->ring_ops = gt->ring_ops[hwe->class];
80 	q->ops = gt->exec_queue_ops;
81 	INIT_LIST_HEAD(&q->lr.link);
82 	INIT_LIST_HEAD(&q->multi_gt_link);
83 	INIT_LIST_HEAD(&q->hw_engine_group_link);
84 	INIT_LIST_HEAD(&q->pxp.link);
85 
86 	q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
87 	q->sched_props.preempt_timeout_us =
88 				hwe->eclass->sched_props.preempt_timeout_us;
89 	q->sched_props.job_timeout_ms =
90 				hwe->eclass->sched_props.job_timeout_ms;
91 	if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
92 	    q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
93 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
94 	else
95 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
96 
97 	if (vm)
98 		q->vm = xe_vm_get(vm);
99 
100 	if (extensions) {
101 		/*
102 		 * may set q->usm, must come before xe_lrc_create(),
103 		 * may overwrite q->sched_props, must come before q->ops->init()
104 		 */
105 		err = exec_queue_user_extensions(xe, q, extensions, 0);
106 		if (err) {
107 			__xe_exec_queue_free(q);
108 			return ERR_PTR(err);
109 		}
110 	}
111 
112 	return q;
113 }
114 
__xe_exec_queue_init(struct xe_exec_queue * q)115 static int __xe_exec_queue_init(struct xe_exec_queue *q)
116 {
117 	int i, err;
118 	u32 flags = 0;
119 
120 	/*
121 	 * PXP workloads executing on RCS or CCS must run in isolation (i.e. no
122 	 * other workload can use the EUs at the same time). On MTL this is done
123 	 * by setting the RUNALONE bit in the LRC, while starting on Xe2 there
124 	 * is a dedicated bit for it.
125 	 */
126 	if (xe_exec_queue_uses_pxp(q) &&
127 	    (q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) {
128 		if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20)
129 			flags |= XE_LRC_CREATE_PXP;
130 		else
131 			flags |= XE_LRC_CREATE_RUNALONE;
132 	}
133 
134 	for (i = 0; i < q->width; ++i) {
135 		q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags);
136 		if (IS_ERR(q->lrc[i])) {
137 			err = PTR_ERR(q->lrc[i]);
138 			goto err_lrc;
139 		}
140 	}
141 
142 	err = q->ops->init(q);
143 	if (err)
144 		goto err_lrc;
145 
146 	return 0;
147 
148 err_lrc:
149 	for (i = i - 1; i >= 0; --i)
150 		xe_lrc_put(q->lrc[i]);
151 	return err;
152 }
153 
xe_exec_queue_create(struct xe_device * xe,struct xe_vm * vm,u32 logical_mask,u16 width,struct xe_hw_engine * hwe,u32 flags,u64 extensions)154 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
155 					   u32 logical_mask, u16 width,
156 					   struct xe_hw_engine *hwe, u32 flags,
157 					   u64 extensions)
158 {
159 	struct xe_exec_queue *q;
160 	int err;
161 
162 	/* VMs for GSCCS queues (and only those) must have the XE_VM_FLAG_GSC flag */
163 	xe_assert(xe, !vm || (!!(vm->flags & XE_VM_FLAG_GSC) == !!(hwe->engine_id == XE_HW_ENGINE_GSCCS0)));
164 
165 	q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
166 				  extensions);
167 	if (IS_ERR(q))
168 		return q;
169 
170 	err = __xe_exec_queue_init(q);
171 	if (err)
172 		goto err_post_alloc;
173 
174 	/*
175 	 * We can only add the queue to the PXP list after the init is complete,
176 	 * because the PXP termination can call exec_queue_kill and that will
177 	 * go bad if the queue is only half-initialized. This means that we
178 	 * can't do it when we handle the PXP extension in __xe_exec_queue_alloc
179 	 * and we need to do it here instead.
180 	 */
181 	if (xe_exec_queue_uses_pxp(q)) {
182 		err = xe_pxp_exec_queue_add(xe->pxp, q);
183 		if (err)
184 			goto err_post_alloc;
185 	}
186 
187 	return q;
188 
189 err_post_alloc:
190 	__xe_exec_queue_free(q);
191 	return ERR_PTR(err);
192 }
193 ALLOW_ERROR_INJECTION(xe_exec_queue_create, ERRNO);
194 
xe_exec_queue_create_class(struct xe_device * xe,struct xe_gt * gt,struct xe_vm * vm,enum xe_engine_class class,u32 flags,u64 extensions)195 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
196 						 struct xe_vm *vm,
197 						 enum xe_engine_class class,
198 						 u32 flags, u64 extensions)
199 {
200 	struct xe_hw_engine *hwe, *hwe0 = NULL;
201 	enum xe_hw_engine_id id;
202 	u32 logical_mask = 0;
203 
204 	for_each_hw_engine(hwe, gt, id) {
205 		if (xe_hw_engine_is_reserved(hwe))
206 			continue;
207 
208 		if (hwe->class == class) {
209 			logical_mask |= BIT(hwe->logical_instance);
210 			if (!hwe0)
211 				hwe0 = hwe;
212 		}
213 	}
214 
215 	if (!logical_mask)
216 		return ERR_PTR(-ENODEV);
217 
218 	return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions);
219 }
220 
221 /**
222  * xe_exec_queue_create_bind() - Create bind exec queue.
223  * @xe: Xe device.
224  * @tile: tile which bind exec queue belongs to.
225  * @flags: exec queue creation flags
226  * @extensions: exec queue creation extensions
227  *
228  * Normalize bind exec queue creation. Bind exec queue is tied to migration VM
229  * for access to physical memory required for page table programming. On a
230  * faulting devices the reserved copy engine instance must be used to avoid
231  * deadlocking (user binds cannot get stuck behind faults as kernel binds which
232  * resolve faults depend on user binds). On non-faulting devices any copy engine
233  * can be used.
234  *
235  * Returns exec queue on success, ERR_PTR on failure
236  */
xe_exec_queue_create_bind(struct xe_device * xe,struct xe_tile * tile,u32 flags,u64 extensions)237 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
238 						struct xe_tile *tile,
239 						u32 flags, u64 extensions)
240 {
241 	struct xe_gt *gt = tile->primary_gt;
242 	struct xe_exec_queue *q;
243 	struct xe_vm *migrate_vm;
244 
245 	migrate_vm = xe_migrate_get_vm(tile->migrate);
246 	if (xe->info.has_usm) {
247 		struct xe_hw_engine *hwe = xe_gt_hw_engine(gt,
248 							   XE_ENGINE_CLASS_COPY,
249 							   gt->usm.reserved_bcs_instance,
250 							   false);
251 
252 		if (!hwe) {
253 			xe_vm_put(migrate_vm);
254 			return ERR_PTR(-EINVAL);
255 		}
256 
257 		q = xe_exec_queue_create(xe, migrate_vm,
258 					 BIT(hwe->logical_instance), 1, hwe,
259 					 flags, extensions);
260 	} else {
261 		q = xe_exec_queue_create_class(xe, gt, migrate_vm,
262 					       XE_ENGINE_CLASS_COPY, flags,
263 					       extensions);
264 	}
265 	xe_vm_put(migrate_vm);
266 
267 	return q;
268 }
269 ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
270 
xe_exec_queue_destroy(struct kref * ref)271 void xe_exec_queue_destroy(struct kref *ref)
272 {
273 	struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
274 	struct xe_exec_queue *eq, *next;
275 
276 	if (xe_exec_queue_uses_pxp(q))
277 		xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
278 
279 	xe_exec_queue_last_fence_put_unlocked(q);
280 	if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
281 		list_for_each_entry_safe(eq, next, &q->multi_gt_list,
282 					 multi_gt_link)
283 			xe_exec_queue_put(eq);
284 	}
285 
286 	q->ops->fini(q);
287 }
288 
xe_exec_queue_fini(struct xe_exec_queue * q)289 void xe_exec_queue_fini(struct xe_exec_queue *q)
290 {
291 	int i;
292 
293 	/*
294 	 * Before releasing our ref to lrc and xef, accumulate our run ticks
295 	 * and wakeup any waiters.
296 	 */
297 	xe_exec_queue_update_run_ticks(q);
298 	if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
299 		wake_up_var(&q->xef->exec_queue.pending_removal);
300 
301 	for (i = 0; i < q->width; ++i)
302 		xe_lrc_put(q->lrc[i]);
303 
304 	__xe_exec_queue_free(q);
305 }
306 
xe_exec_queue_assign_name(struct xe_exec_queue * q,u32 instance)307 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
308 {
309 	switch (q->class) {
310 	case XE_ENGINE_CLASS_RENDER:
311 		snprintf(q->name, sizeof(q->name), "rcs%d", instance);
312 		break;
313 	case XE_ENGINE_CLASS_VIDEO_DECODE:
314 		snprintf(q->name, sizeof(q->name), "vcs%d", instance);
315 		break;
316 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
317 		snprintf(q->name, sizeof(q->name), "vecs%d", instance);
318 		break;
319 	case XE_ENGINE_CLASS_COPY:
320 		snprintf(q->name, sizeof(q->name), "bcs%d", instance);
321 		break;
322 	case XE_ENGINE_CLASS_COMPUTE:
323 		snprintf(q->name, sizeof(q->name), "ccs%d", instance);
324 		break;
325 	case XE_ENGINE_CLASS_OTHER:
326 		snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
327 		break;
328 	default:
329 		XE_WARN_ON(q->class);
330 	}
331 }
332 
xe_exec_queue_lookup(struct xe_file * xef,u32 id)333 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
334 {
335 	struct xe_exec_queue *q;
336 
337 	mutex_lock(&xef->exec_queue.lock);
338 	q = xa_load(&xef->exec_queue.xa, id);
339 	if (q)
340 		xe_exec_queue_get(q);
341 	mutex_unlock(&xef->exec_queue.lock);
342 
343 	return q;
344 }
345 
346 enum xe_exec_queue_priority
xe_exec_queue_device_get_max_priority(struct xe_device * xe)347 xe_exec_queue_device_get_max_priority(struct xe_device *xe)
348 {
349 	return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
350 				       XE_EXEC_QUEUE_PRIORITY_NORMAL;
351 }
352 
exec_queue_set_priority(struct xe_device * xe,struct xe_exec_queue * q,u64 value)353 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
354 				   u64 value)
355 {
356 	if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
357 		return -EINVAL;
358 
359 	if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
360 		return -EPERM;
361 
362 	q->sched_props.priority = value;
363 	return 0;
364 }
365 
xe_exec_queue_enforce_schedule_limit(void)366 static bool xe_exec_queue_enforce_schedule_limit(void)
367 {
368 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
369 	return true;
370 #else
371 	return !capable(CAP_SYS_NICE);
372 #endif
373 }
374 
375 static void
xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf * eclass,enum xe_exec_queue_sched_prop prop,u32 * min,u32 * max)376 xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
377 			      enum xe_exec_queue_sched_prop prop,
378 			      u32 *min, u32 *max)
379 {
380 	switch (prop) {
381 	case XE_EXEC_QUEUE_JOB_TIMEOUT:
382 		*min = eclass->sched_props.job_timeout_min;
383 		*max = eclass->sched_props.job_timeout_max;
384 		break;
385 	case XE_EXEC_QUEUE_TIMESLICE:
386 		*min = eclass->sched_props.timeslice_min;
387 		*max = eclass->sched_props.timeslice_max;
388 		break;
389 	case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
390 		*min = eclass->sched_props.preempt_timeout_min;
391 		*max = eclass->sched_props.preempt_timeout_max;
392 		break;
393 	default:
394 		break;
395 	}
396 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
397 	if (capable(CAP_SYS_NICE)) {
398 		switch (prop) {
399 		case XE_EXEC_QUEUE_JOB_TIMEOUT:
400 			*min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
401 			*max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
402 			break;
403 		case XE_EXEC_QUEUE_TIMESLICE:
404 			*min = XE_HW_ENGINE_TIMESLICE_MIN;
405 			*max = XE_HW_ENGINE_TIMESLICE_MAX;
406 			break;
407 		case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
408 			*min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
409 			*max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
410 			break;
411 		default:
412 			break;
413 		}
414 	}
415 #endif
416 }
417 
exec_queue_set_timeslice(struct xe_device * xe,struct xe_exec_queue * q,u64 value)418 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
419 				    u64 value)
420 {
421 	u32 min = 0, max = 0;
422 
423 	xe_exec_queue_get_prop_minmax(q->hwe->eclass,
424 				      XE_EXEC_QUEUE_TIMESLICE, &min, &max);
425 
426 	if (xe_exec_queue_enforce_schedule_limit() &&
427 	    !xe_hw_engine_timeout_in_range(value, min, max))
428 		return -EINVAL;
429 
430 	q->sched_props.timeslice_us = value;
431 	return 0;
432 }
433 
434 static int
exec_queue_set_pxp_type(struct xe_device * xe,struct xe_exec_queue * q,u64 value)435 exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value)
436 {
437 	if (value == DRM_XE_PXP_TYPE_NONE)
438 		return 0;
439 
440 	/* we only support HWDRM sessions right now */
441 	if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
442 		return -EINVAL;
443 
444 	if (!xe_pxp_is_enabled(xe->pxp))
445 		return -ENODEV;
446 
447 	return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM);
448 }
449 
450 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
451 					     struct xe_exec_queue *q,
452 					     u64 value);
453 
454 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
455 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
456 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
457 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE] = exec_queue_set_pxp_type,
458 };
459 
exec_queue_user_ext_set_property(struct xe_device * xe,struct xe_exec_queue * q,u64 extension)460 static int exec_queue_user_ext_set_property(struct xe_device *xe,
461 					    struct xe_exec_queue *q,
462 					    u64 extension)
463 {
464 	u64 __user *address = u64_to_user_ptr(extension);
465 	struct drm_xe_ext_set_property ext;
466 	int err;
467 	u32 idx;
468 
469 	err = copy_from_user(&ext, address, sizeof(ext));
470 	if (XE_IOCTL_DBG(xe, err))
471 		return -EFAULT;
472 
473 	if (XE_IOCTL_DBG(xe, ext.property >=
474 			 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
475 	    XE_IOCTL_DBG(xe, ext.pad) ||
476 	    XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
477 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE &&
478 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE))
479 		return -EINVAL;
480 
481 	idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
482 	if (!exec_queue_set_property_funcs[idx])
483 		return -EINVAL;
484 
485 	return exec_queue_set_property_funcs[idx](xe, q, ext.value);
486 }
487 
488 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
489 					       struct xe_exec_queue *q,
490 					       u64 extension);
491 
492 static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
493 	[DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
494 };
495 
496 #define MAX_USER_EXTENSIONS	16
exec_queue_user_extensions(struct xe_device * xe,struct xe_exec_queue * q,u64 extensions,int ext_number)497 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
498 				      u64 extensions, int ext_number)
499 {
500 	u64 __user *address = u64_to_user_ptr(extensions);
501 	struct drm_xe_user_extension ext;
502 	int err;
503 	u32 idx;
504 
505 	if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
506 		return -E2BIG;
507 
508 	err = copy_from_user(&ext, address, sizeof(ext));
509 	if (XE_IOCTL_DBG(xe, err))
510 		return -EFAULT;
511 
512 	if (XE_IOCTL_DBG(xe, ext.pad) ||
513 	    XE_IOCTL_DBG(xe, ext.name >=
514 			 ARRAY_SIZE(exec_queue_user_extension_funcs)))
515 		return -EINVAL;
516 
517 	idx = array_index_nospec(ext.name,
518 				 ARRAY_SIZE(exec_queue_user_extension_funcs));
519 	err = exec_queue_user_extension_funcs[idx](xe, q, extensions);
520 	if (XE_IOCTL_DBG(xe, err))
521 		return err;
522 
523 	if (ext.next_extension)
524 		return exec_queue_user_extensions(xe, q, ext.next_extension,
525 						  ++ext_number);
526 
527 	return 0;
528 }
529 
calc_validate_logical_mask(struct xe_device * xe,struct drm_xe_engine_class_instance * eci,u16 width,u16 num_placements)530 static u32 calc_validate_logical_mask(struct xe_device *xe,
531 				      struct drm_xe_engine_class_instance *eci,
532 				      u16 width, u16 num_placements)
533 {
534 	int len = width * num_placements;
535 	int i, j, n;
536 	u16 class;
537 	u16 gt_id;
538 	u32 return_mask = 0, prev_mask;
539 
540 	if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
541 			 len > 1))
542 		return 0;
543 
544 	for (i = 0; i < width; ++i) {
545 		u32 current_mask = 0;
546 
547 		for (j = 0; j < num_placements; ++j) {
548 			struct xe_hw_engine *hwe;
549 
550 			n = j * width + i;
551 
552 			hwe = xe_hw_engine_lookup(xe, eci[n]);
553 			if (XE_IOCTL_DBG(xe, !hwe))
554 				return 0;
555 
556 			if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
557 				return 0;
558 
559 			if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
560 			    XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
561 				return 0;
562 
563 			class = eci[n].engine_class;
564 			gt_id = eci[n].gt_id;
565 
566 			if (width == 1 || !i)
567 				return_mask |= BIT(eci[n].engine_instance);
568 			current_mask |= BIT(eci[n].engine_instance);
569 		}
570 
571 		/* Parallel submissions must be logically contiguous */
572 		if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
573 			return 0;
574 
575 		prev_mask = current_mask;
576 	}
577 
578 	return return_mask;
579 }
580 
xe_exec_queue_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)581 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
582 			       struct drm_file *file)
583 {
584 	struct xe_device *xe = to_xe_device(dev);
585 	struct xe_file *xef = to_xe_file(file);
586 	struct drm_xe_exec_queue_create *args = data;
587 	struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
588 	struct drm_xe_engine_class_instance __user *user_eci =
589 		u64_to_user_ptr(args->instances);
590 	struct xe_hw_engine *hwe;
591 	struct xe_vm *vm;
592 	struct xe_tile *tile;
593 	struct xe_exec_queue *q = NULL;
594 	u32 logical_mask;
595 	u32 flags = 0;
596 	u32 id;
597 	u32 len;
598 	int err;
599 
600 	if (XE_IOCTL_DBG(xe, args->flags & ~DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT) ||
601 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
602 		return -EINVAL;
603 
604 	len = args->width * args->num_placements;
605 	if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
606 		return -EINVAL;
607 
608 	err = copy_from_user(eci, user_eci,
609 			     sizeof(struct drm_xe_engine_class_instance) * len);
610 	if (XE_IOCTL_DBG(xe, err))
611 		return -EFAULT;
612 
613 	if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
614 		return -EINVAL;
615 
616 	if (args->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT)
617 		flags |= EXEC_QUEUE_FLAG_LOW_LATENCY;
618 
619 	if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
620 		if (XE_IOCTL_DBG(xe, args->width != 1) ||
621 		    XE_IOCTL_DBG(xe, args->num_placements != 1) ||
622 		    XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
623 			return -EINVAL;
624 
625 		for_each_tile(tile, xe, id) {
626 			struct xe_exec_queue *new;
627 
628 			flags |= EXEC_QUEUE_FLAG_VM;
629 			if (id)
630 				flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
631 
632 			new = xe_exec_queue_create_bind(xe, tile, flags,
633 							args->extensions);
634 			if (IS_ERR(new)) {
635 				err = PTR_ERR(new);
636 				if (q)
637 					goto put_exec_queue;
638 				return err;
639 			}
640 			if (id == 0)
641 				q = new;
642 			else
643 				list_add_tail(&new->multi_gt_list,
644 					      &q->multi_gt_link);
645 		}
646 	} else {
647 		logical_mask = calc_validate_logical_mask(xe, eci,
648 							  args->width,
649 							  args->num_placements);
650 		if (XE_IOCTL_DBG(xe, !logical_mask))
651 			return -EINVAL;
652 
653 		hwe = xe_hw_engine_lookup(xe, eci[0]);
654 		if (XE_IOCTL_DBG(xe, !hwe))
655 			return -EINVAL;
656 
657 		vm = xe_vm_lookup(xef, args->vm_id);
658 		if (XE_IOCTL_DBG(xe, !vm))
659 			return -ENOENT;
660 
661 		err = down_read_interruptible(&vm->lock);
662 		if (err) {
663 			xe_vm_put(vm);
664 			return err;
665 		}
666 
667 		if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
668 			up_read(&vm->lock);
669 			xe_vm_put(vm);
670 			return -ENOENT;
671 		}
672 
673 		q = xe_exec_queue_create(xe, vm, logical_mask,
674 					 args->width, hwe, flags,
675 					 args->extensions);
676 		up_read(&vm->lock);
677 		xe_vm_put(vm);
678 		if (IS_ERR(q))
679 			return PTR_ERR(q);
680 
681 		if (xe_vm_in_preempt_fence_mode(vm)) {
682 			q->lr.context = dma_fence_context_alloc(1);
683 
684 			err = xe_vm_add_compute_exec_queue(vm, q);
685 			if (XE_IOCTL_DBG(xe, err))
686 				goto put_exec_queue;
687 		}
688 
689 		if (q->vm && q->hwe->hw_engine_group) {
690 			err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q);
691 			if (err)
692 				goto put_exec_queue;
693 		}
694 	}
695 
696 	q->xef = xe_file_get(xef);
697 
698 	/* user id alloc must always be last in ioctl to prevent UAF */
699 	err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
700 	if (err)
701 		goto kill_exec_queue;
702 
703 	args->exec_queue_id = id;
704 
705 	return 0;
706 
707 kill_exec_queue:
708 	xe_exec_queue_kill(q);
709 put_exec_queue:
710 	xe_exec_queue_put(q);
711 	return err;
712 }
713 
xe_exec_queue_get_property_ioctl(struct drm_device * dev,void * data,struct drm_file * file)714 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
715 				     struct drm_file *file)
716 {
717 	struct xe_device *xe = to_xe_device(dev);
718 	struct xe_file *xef = to_xe_file(file);
719 	struct drm_xe_exec_queue_get_property *args = data;
720 	struct xe_exec_queue *q;
721 	int ret;
722 
723 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
724 		return -EINVAL;
725 
726 	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
727 	if (XE_IOCTL_DBG(xe, !q))
728 		return -ENOENT;
729 
730 	switch (args->property) {
731 	case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
732 		args->value = q->ops->reset_status(q);
733 		ret = 0;
734 		break;
735 	default:
736 		ret = -EINVAL;
737 	}
738 
739 	xe_exec_queue_put(q);
740 
741 	return ret;
742 }
743 
744 /**
745  * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
746  * @q: The exec_queue
747  *
748  * Return: True if the exec_queue is long-running, false otherwise.
749  */
xe_exec_queue_is_lr(struct xe_exec_queue * q)750 bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
751 {
752 	return q->vm && xe_vm_in_lr_mode(q->vm) &&
753 		!(q->flags & EXEC_QUEUE_FLAG_VM);
754 }
755 
xe_exec_queue_num_job_inflight(struct xe_exec_queue * q)756 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
757 {
758 	return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1;
759 }
760 
761 /**
762  * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
763  * @q: The exec_queue
764  *
765  * Return: True if the exec_queue's ring is full, false otherwise.
766  */
xe_exec_queue_ring_full(struct xe_exec_queue * q)767 bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
768 {
769 	struct xe_lrc *lrc = q->lrc[0];
770 	s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
771 
772 	return xe_exec_queue_num_job_inflight(q) >= max_job;
773 }
774 
775 /**
776  * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
777  * @q: The exec_queue
778  *
779  * FIXME: Need to determine what to use as the short-lived
780  * timeline lock for the exec_queues, so that the return value
781  * of this function becomes more than just an advisory
782  * snapshot in time. The timeline lock must protect the
783  * seqno from racing submissions on the same exec_queue.
784  * Typically vm->resv, but user-created timeline locks use the migrate vm
785  * and never grabs the migrate vm->resv so we have a race there.
786  *
787  * Return: True if the exec_queue is idle, false otherwise.
788  */
xe_exec_queue_is_idle(struct xe_exec_queue * q)789 bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
790 {
791 	if (xe_exec_queue_is_parallel(q)) {
792 		int i;
793 
794 		for (i = 0; i < q->width; ++i) {
795 			if (xe_lrc_seqno(q->lrc[i]) !=
796 			    q->lrc[i]->fence_ctx.next_seqno - 1)
797 				return false;
798 		}
799 
800 		return true;
801 	}
802 
803 	return xe_lrc_seqno(q->lrc[0]) ==
804 		q->lrc[0]->fence_ctx.next_seqno - 1;
805 }
806 
807 /**
808  * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue
809  * from hw
810  * @q: The exec queue
811  *
812  * Update the timestamp saved by HW for this exec queue and save run ticks
813  * calculated by using the delta from last update.
814  */
xe_exec_queue_update_run_ticks(struct xe_exec_queue * q)815 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
816 {
817 	struct xe_device *xe = gt_to_xe(q->gt);
818 	struct xe_lrc *lrc;
819 	u64 old_ts, new_ts;
820 	int idx;
821 
822 	/*
823 	 * Jobs that are executed by kernel doesn't have a corresponding xe_file
824 	 * and thus are not accounted.
825 	 */
826 	if (!q->xef)
827 		return;
828 
829 	/* Synchronize with unbind while holding the xe file open */
830 	if (!drm_dev_enter(&xe->drm, &idx))
831 		return;
832 	/*
833 	 * Only sample the first LRC. For parallel submission, all of them are
834 	 * scheduled together and we compensate that below by multiplying by
835 	 * width - this may introduce errors if that premise is not true and
836 	 * they don't exit 100% aligned. On the other hand, looping through
837 	 * the LRCs and reading them in different time could also introduce
838 	 * errors.
839 	 */
840 	lrc = q->lrc[0];
841 	new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
842 	q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
843 
844 	drm_dev_exit(idx);
845 }
846 
847 /**
848  * xe_exec_queue_kill - permanently stop all execution from an exec queue
849  * @q: The exec queue
850  *
851  * This function permanently stops all activity on an exec queue. If the queue
852  * is actively executing on the HW, it will be kicked off the engine; any
853  * pending jobs are discarded and all future submissions are rejected.
854  * This function is safe to call multiple times.
855  */
xe_exec_queue_kill(struct xe_exec_queue * q)856 void xe_exec_queue_kill(struct xe_exec_queue *q)
857 {
858 	struct xe_exec_queue *eq = q, *next;
859 
860 	list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
861 				 multi_gt_link) {
862 		q->ops->kill(eq);
863 		xe_vm_remove_compute_exec_queue(q->vm, eq);
864 	}
865 
866 	q->ops->kill(q);
867 	xe_vm_remove_compute_exec_queue(q->vm, q);
868 }
869 
xe_exec_queue_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file)870 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
871 				struct drm_file *file)
872 {
873 	struct xe_device *xe = to_xe_device(dev);
874 	struct xe_file *xef = to_xe_file(file);
875 	struct drm_xe_exec_queue_destroy *args = data;
876 	struct xe_exec_queue *q;
877 
878 	if (XE_IOCTL_DBG(xe, args->pad) ||
879 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
880 		return -EINVAL;
881 
882 	mutex_lock(&xef->exec_queue.lock);
883 	q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
884 	if (q)
885 		atomic_inc(&xef->exec_queue.pending_removal);
886 	mutex_unlock(&xef->exec_queue.lock);
887 
888 	if (XE_IOCTL_DBG(xe, !q))
889 		return -ENOENT;
890 
891 	if (q->vm && q->hwe->hw_engine_group)
892 		xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
893 
894 	xe_exec_queue_kill(q);
895 
896 	trace_xe_exec_queue_close(q);
897 	xe_exec_queue_put(q);
898 
899 	return 0;
900 }
901 
xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue * q,struct xe_vm * vm)902 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
903 						    struct xe_vm *vm)
904 {
905 	if (q->flags & EXEC_QUEUE_FLAG_VM) {
906 		lockdep_assert_held(&vm->lock);
907 	} else {
908 		xe_vm_assert_held(vm);
909 		lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
910 	}
911 }
912 
913 /**
914  * xe_exec_queue_last_fence_put() - Drop ref to last fence
915  * @q: The exec queue
916  * @vm: The VM the engine does a bind or exec for
917  */
xe_exec_queue_last_fence_put(struct xe_exec_queue * q,struct xe_vm * vm)918 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
919 {
920 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
921 
922 	xe_exec_queue_last_fence_put_unlocked(q);
923 }
924 
925 /**
926  * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
927  * @q: The exec queue
928  *
929  * Only safe to be called from xe_exec_queue_destroy().
930  */
xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue * q)931 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
932 {
933 	if (q->last_fence) {
934 		dma_fence_put(q->last_fence);
935 		q->last_fence = NULL;
936 	}
937 }
938 
939 /**
940  * xe_exec_queue_last_fence_get() - Get last fence
941  * @q: The exec queue
942  * @vm: The VM the engine does a bind or exec for
943  *
944  * Get last fence, takes a ref
945  *
946  * Returns: last fence if not signaled, dma fence stub if signaled
947  */
xe_exec_queue_last_fence_get(struct xe_exec_queue * q,struct xe_vm * vm)948 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
949 					       struct xe_vm *vm)
950 {
951 	struct dma_fence *fence;
952 
953 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
954 
955 	if (q->last_fence &&
956 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
957 		xe_exec_queue_last_fence_put(q, vm);
958 
959 	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
960 	dma_fence_get(fence);
961 	return fence;
962 }
963 
964 /**
965  * xe_exec_queue_last_fence_get_for_resume() - Get last fence
966  * @q: The exec queue
967  * @vm: The VM the engine does a bind or exec for
968  *
969  * Get last fence, takes a ref. Only safe to be called in the context of
970  * resuming the hw engine group's long-running exec queue, when the group
971  * semaphore is held.
972  *
973  * Returns: last fence if not signaled, dma fence stub if signaled
974  */
xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue * q,struct xe_vm * vm)975 struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
976 							  struct xe_vm *vm)
977 {
978 	struct dma_fence *fence;
979 
980 	lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
981 
982 	if (q->last_fence &&
983 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
984 		xe_exec_queue_last_fence_put_unlocked(q);
985 
986 	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
987 	dma_fence_get(fence);
988 	return fence;
989 }
990 
991 /**
992  * xe_exec_queue_last_fence_set() - Set last fence
993  * @q: The exec queue
994  * @vm: The VM the engine does a bind or exec for
995  * @fence: The fence
996  *
997  * Set the last fence for the engine. Increases reference count for fence, when
998  * closing engine xe_exec_queue_last_fence_put should be called.
999  */
xe_exec_queue_last_fence_set(struct xe_exec_queue * q,struct xe_vm * vm,struct dma_fence * fence)1000 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
1001 				  struct dma_fence *fence)
1002 {
1003 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1004 
1005 	xe_exec_queue_last_fence_put(q, vm);
1006 	q->last_fence = dma_fence_get(fence);
1007 }
1008 
1009 /**
1010  * xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue
1011  * @q: The exec queue
1012  * @vm: The VM the engine does a bind or exec for
1013  *
1014  * Returns:
1015  * -ETIME if there exists an unsignalled last fence dependency, zero otherwise.
1016  */
xe_exec_queue_last_fence_test_dep(struct xe_exec_queue * q,struct xe_vm * vm)1017 int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm)
1018 {
1019 	struct dma_fence *fence;
1020 	int err = 0;
1021 
1022 	fence = xe_exec_queue_last_fence_get(q, vm);
1023 	if (fence) {
1024 		err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ?
1025 			0 : -ETIME;
1026 		dma_fence_put(fence);
1027 	}
1028 
1029 	return err;
1030 }
1031