xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_context.c (revision 40ccd6aa3e2e05be93394e3cd560c718dedfcc77)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2011-2012 Intel Corporation
5  */
6 
7 /*
8  * This file implements HW context support. On gen5+ a HW context consists of an
9  * opaque GPU object which is referenced at times of context saves and restores.
10  * With RC6 enabled, the context is also referenced as the GPU enters and exists
11  * from RC6 (GPU has it's own internal power context, except on gen5). Though
12  * something like a context does exist for the media ring, the code only
13  * supports contexts for the render ring.
14  *
15  * In software, there is a distinction between contexts created by the user,
16  * and the default HW context. The default HW context is used by GPU clients
17  * that do not request setup of their own hardware context. The default
18  * context's state is never restored to help prevent programming errors. This
19  * would happen if a client ran and piggy-backed off another clients GPU state.
20  * The default context only exists to give the GPU some offset to load as the
21  * current to invoke a save of the context we actually care about. In fact, the
22  * code could likely be constructed, albeit in a more complicated fashion, to
23  * never use the default context, though that limits the driver's ability to
24  * swap out, and/or destroy other contexts.
25  *
26  * All other contexts are created as a request by the GPU client. These contexts
27  * store GPU state, and thus allow GPU clients to not re-emit state (and
28  * potentially query certain state) at any time. The kernel driver makes
29  * certain that the appropriate commands are inserted.
30  *
31  * The context life cycle is semi-complicated in that context BOs may live
32  * longer than the context itself because of the way the hardware, and object
33  * tracking works. Below is a very crude representation of the state machine
34  * describing the context life.
35  *                                         refcount     pincount     active
36  * S0: initial state                          0            0           0
37  * S1: context created                        1            0           0
38  * S2: context is currently running           2            1           X
39  * S3: GPU referenced, but not current        2            0           1
40  * S4: context is current, but destroyed      1            1           0
41  * S5: like S3, but destroyed                 1            0           1
42  *
43  * The most common (but not all) transitions:
44  * S0->S1: client creates a context
45  * S1->S2: client submits execbuf with context
46  * S2->S3: other clients submits execbuf with context
47  * S3->S1: context object was retired
48  * S3->S2: clients submits another execbuf
49  * S2->S4: context destroy called with current context
50  * S3->S5->S0: destroy path
51  * S4->S5->S0: destroy path on current context
52  *
53  * There are two confusing terms used above:
54  *  The "current context" means the context which is currently running on the
55  *  GPU. The GPU has loaded its state already and has stored away the gtt
56  *  offset of the BO. The GPU is not actively referencing the data at this
57  *  offset, but it will on the next context switch. The only way to avoid this
58  *  is to do a GPU reset.
59  *
60  *  An "active context' is one which was previously the "current context" and is
61  *  on the active list waiting for the next context switch to occur. Until this
62  *  happens, the object must remain at the same gtt offset. It is therefore
63  *  possible to destroy a context, but it is still active.
64  *
65  */
66 
67 #include <linux/highmem.h>
68 #include <linux/log2.h>
69 #include <linux/nospec.h>
70 
71 #include <drm/drm_cache.h>
72 #include <drm/drm_syncobj.h>
73 
74 #include "gt/gen6_ppgtt.h"
75 #include "gt/intel_context.h"
76 #include "gt/intel_context_param.h"
77 #include "gt/intel_engine_heartbeat.h"
78 #include "gt/intel_engine_user.h"
79 #include "gt/intel_gpu_commands.h"
80 #include "gt/intel_ring.h"
81 
82 #include "pxp/intel_pxp.h"
83 
84 #include "i915_file_private.h"
85 #include "i915_gem_context.h"
86 #include "i915_trace.h"
87 #include "i915_user_extensions.h"
88 
89 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
90 
91 static struct kmem_cache *slab_luts;
92 
93 struct i915_lut_handle *i915_lut_handle_alloc(void)
94 {
95 	return kmem_cache_alloc(slab_luts, GFP_KERNEL);
96 }
97 
98 void i915_lut_handle_free(struct i915_lut_handle *lut)
99 {
100 	return kmem_cache_free(slab_luts, lut);
101 }
102 
103 static void lut_close(struct i915_gem_context *ctx)
104 {
105 	struct radix_tree_iter iter;
106 	void __rcu **slot;
107 
108 	mutex_lock(&ctx->lut_mutex);
109 	rcu_read_lock();
110 	radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
111 		struct i915_vma *vma = rcu_dereference_raw(*slot);
112 		struct drm_i915_gem_object *obj = vma->obj;
113 		struct i915_lut_handle *lut;
114 
115 		if (!kref_get_unless_zero(&obj->base.refcount))
116 			continue;
117 
118 		spin_lock(&obj->lut_lock);
119 		list_for_each_entry(lut, &obj->lut_list, obj_link) {
120 			if (lut->ctx != ctx)
121 				continue;
122 
123 			if (lut->handle != iter.index)
124 				continue;
125 
126 			list_del(&lut->obj_link);
127 			break;
128 		}
129 		spin_unlock(&obj->lut_lock);
130 
131 		if (&lut->obj_link != &obj->lut_list) {
132 			i915_lut_handle_free(lut);
133 			radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
134 			i915_vma_close(vma);
135 			i915_gem_object_put(obj);
136 		}
137 
138 		i915_gem_object_put(obj);
139 	}
140 	rcu_read_unlock();
141 	mutex_unlock(&ctx->lut_mutex);
142 }
143 
144 static struct intel_context *
145 lookup_user_engine(struct i915_gem_context *ctx,
146 		   unsigned long flags,
147 		   const struct i915_engine_class_instance *ci)
148 #define LOOKUP_USER_INDEX BIT(0)
149 {
150 	int idx;
151 
152 	if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
153 		return ERR_PTR(-EINVAL);
154 
155 	if (!i915_gem_context_user_engines(ctx)) {
156 		struct intel_engine_cs *engine;
157 
158 		engine = intel_engine_lookup_user(ctx->i915,
159 						  ci->engine_class,
160 						  ci->engine_instance);
161 		if (!engine)
162 			return ERR_PTR(-EINVAL);
163 
164 		idx = engine->legacy_idx;
165 	} else {
166 		idx = ci->engine_instance;
167 	}
168 
169 	return i915_gem_context_get_engine(ctx, idx);
170 }
171 
172 static int validate_priority(struct drm_i915_private *i915,
173 			     const struct drm_i915_gem_context_param *args)
174 {
175 	s64 priority = args->value;
176 
177 	if (args->size)
178 		return -EINVAL;
179 
180 	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
181 		return -ENODEV;
182 
183 	if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
184 	    priority < I915_CONTEXT_MIN_USER_PRIORITY)
185 		return -EINVAL;
186 
187 	if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
188 	    !capable(CAP_SYS_NICE))
189 		return -EPERM;
190 
191 	return 0;
192 }
193 
194 static void proto_context_close(struct drm_i915_private *i915,
195 				struct i915_gem_proto_context *pc)
196 {
197 	int i;
198 
199 	if (pc->pxp_wakeref)
200 		intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref);
201 	if (pc->vm)
202 		i915_vm_put(pc->vm);
203 	if (pc->user_engines) {
204 		for (i = 0; i < pc->num_user_engines; i++)
205 			kfree(pc->user_engines[i].siblings);
206 		kfree(pc->user_engines);
207 	}
208 	kfree(pc);
209 }
210 
211 static int proto_context_set_persistence(struct drm_i915_private *i915,
212 					 struct i915_gem_proto_context *pc,
213 					 bool persist)
214 {
215 	if (persist) {
216 		/*
217 		 * Only contexts that are short-lived [that will expire or be
218 		 * reset] are allowed to survive past termination. We require
219 		 * hangcheck to ensure that the persistent requests are healthy.
220 		 */
221 		if (!i915->params.enable_hangcheck)
222 			return -EINVAL;
223 
224 		pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
225 	} else {
226 		/* To cancel a context we use "preempt-to-idle" */
227 		if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
228 			return -ENODEV;
229 
230 		/*
231 		 * If the cancel fails, we then need to reset, cleanly!
232 		 *
233 		 * If the per-engine reset fails, all hope is lost! We resort
234 		 * to a full GPU reset in that unlikely case, but realistically
235 		 * if the engine could not reset, the full reset does not fare
236 		 * much better. The damage has been done.
237 		 *
238 		 * However, if we cannot reset an engine by itself, we cannot
239 		 * cleanup a hanging persistent context without causing
240 		 * colateral damage, and we should not pretend we can by
241 		 * exposing the interface.
242 		 */
243 		if (!intel_has_reset_engine(to_gt(i915)))
244 			return -ENODEV;
245 
246 		pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
247 	}
248 
249 	return 0;
250 }
251 
252 static int proto_context_set_protected(struct drm_i915_private *i915,
253 				       struct i915_gem_proto_context *pc,
254 				       bool protected)
255 {
256 	int ret = 0;
257 
258 	if (!protected) {
259 		pc->uses_protected_content = false;
260 	} else if (!intel_pxp_is_enabled(i915->pxp)) {
261 		ret = -ENODEV;
262 	} else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
263 		   !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
264 		ret = -EPERM;
265 	} else {
266 		pc->uses_protected_content = true;
267 
268 		/*
269 		 * protected context usage requires the PXP session to be up,
270 		 * which in turn requires the device to be active.
271 		 */
272 		pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
273 
274 		if (!intel_pxp_is_active(i915->pxp))
275 			ret = intel_pxp_start(i915->pxp);
276 	}
277 
278 	return ret;
279 }
280 
281 static struct i915_gem_proto_context *
282 proto_context_create(struct drm_i915_file_private *fpriv,
283 		     struct drm_i915_private *i915, unsigned int flags)
284 {
285 	struct i915_gem_proto_context *pc, *err;
286 
287 	pc = kzalloc(sizeof(*pc), GFP_KERNEL);
288 	if (!pc)
289 		return ERR_PTR(-ENOMEM);
290 
291 	pc->fpriv = fpriv;
292 	pc->num_user_engines = -1;
293 	pc->user_engines = NULL;
294 	pc->user_flags = BIT(UCONTEXT_BANNABLE) |
295 			 BIT(UCONTEXT_RECOVERABLE);
296 	if (i915->params.enable_hangcheck)
297 		pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
298 	pc->sched.priority = I915_PRIORITY_NORMAL;
299 
300 	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
301 		if (!HAS_EXECLISTS(i915)) {
302 			err = ERR_PTR(-EINVAL);
303 			goto proto_close;
304 		}
305 		pc->single_timeline = true;
306 	}
307 
308 	return pc;
309 
310 proto_close:
311 	proto_context_close(i915, pc);
312 	return err;
313 }
314 
315 static int proto_context_register_locked(struct drm_i915_file_private *fpriv,
316 					 struct i915_gem_proto_context *pc,
317 					 u32 *id)
318 {
319 	int ret;
320 	void *old;
321 
322 	lockdep_assert_held(&fpriv->proto_context_lock);
323 
324 	ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL);
325 	if (ret)
326 		return ret;
327 
328 	old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL);
329 	if (xa_is_err(old)) {
330 		xa_erase(&fpriv->context_xa, *id);
331 		return xa_err(old);
332 	}
333 	WARN_ON(old);
334 
335 	return 0;
336 }
337 
338 static int proto_context_register(struct drm_i915_file_private *fpriv,
339 				  struct i915_gem_proto_context *pc,
340 				  u32 *id)
341 {
342 	int ret;
343 
344 	mutex_lock(&fpriv->proto_context_lock);
345 	ret = proto_context_register_locked(fpriv, pc, id);
346 	mutex_unlock(&fpriv->proto_context_lock);
347 
348 	return ret;
349 }
350 
351 static struct i915_address_space *
352 i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
353 {
354 	struct i915_address_space *vm;
355 
356 	xa_lock(&file_priv->vm_xa);
357 	vm = xa_load(&file_priv->vm_xa, id);
358 	if (vm)
359 		kref_get(&vm->ref);
360 	xa_unlock(&file_priv->vm_xa);
361 
362 	return vm;
363 }
364 
365 static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
366 			    struct i915_gem_proto_context *pc,
367 			    const struct drm_i915_gem_context_param *args)
368 {
369 	struct drm_i915_private *i915 = fpriv->i915;
370 	struct i915_address_space *vm;
371 
372 	if (args->size)
373 		return -EINVAL;
374 
375 	if (!HAS_FULL_PPGTT(i915))
376 		return -ENODEV;
377 
378 	if (upper_32_bits(args->value))
379 		return -ENOENT;
380 
381 	vm = i915_gem_vm_lookup(fpriv, args->value);
382 	if (!vm)
383 		return -ENOENT;
384 
385 	if (pc->vm)
386 		i915_vm_put(pc->vm);
387 	pc->vm = vm;
388 
389 	return 0;
390 }
391 
392 struct set_proto_ctx_engines {
393 	struct drm_i915_private *i915;
394 	unsigned num_engines;
395 	struct i915_gem_proto_engine *engines;
396 };
397 
398 static int
399 set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
400 			      void *data)
401 {
402 	struct i915_context_engines_load_balance __user *ext =
403 		container_of_user(base, typeof(*ext), base);
404 	const struct set_proto_ctx_engines *set = data;
405 	struct drm_i915_private *i915 = set->i915;
406 	struct intel_engine_cs **siblings;
407 	u16 num_siblings, idx;
408 	unsigned int n;
409 	int err;
410 
411 	if (!HAS_EXECLISTS(i915))
412 		return -ENODEV;
413 
414 	if (get_user(idx, &ext->engine_index))
415 		return -EFAULT;
416 
417 	if (idx >= set->num_engines) {
418 		drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
419 			idx, set->num_engines);
420 		return -EINVAL;
421 	}
422 
423 	idx = array_index_nospec(idx, set->num_engines);
424 	if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) {
425 		drm_dbg(&i915->drm,
426 			"Invalid placement[%d], already occupied\n", idx);
427 		return -EEXIST;
428 	}
429 
430 	if (get_user(num_siblings, &ext->num_siblings))
431 		return -EFAULT;
432 
433 	err = check_user_mbz(&ext->flags);
434 	if (err)
435 		return err;
436 
437 	err = check_user_mbz(&ext->mbz64);
438 	if (err)
439 		return err;
440 
441 	if (num_siblings == 0)
442 		return 0;
443 
444 	siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL);
445 	if (!siblings)
446 		return -ENOMEM;
447 
448 	for (n = 0; n < num_siblings; n++) {
449 		struct i915_engine_class_instance ci;
450 
451 		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
452 			err = -EFAULT;
453 			goto err_siblings;
454 		}
455 
456 		siblings[n] = intel_engine_lookup_user(i915,
457 						       ci.engine_class,
458 						       ci.engine_instance);
459 		if (!siblings[n]) {
460 			drm_dbg(&i915->drm,
461 				"Invalid sibling[%d]: { class:%d, inst:%d }\n",
462 				n, ci.engine_class, ci.engine_instance);
463 			err = -EINVAL;
464 			goto err_siblings;
465 		}
466 	}
467 
468 	if (num_siblings == 1) {
469 		set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
470 		set->engines[idx].engine = siblings[0];
471 		kfree(siblings);
472 	} else {
473 		set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED;
474 		set->engines[idx].num_siblings = num_siblings;
475 		set->engines[idx].siblings = siblings;
476 	}
477 
478 	return 0;
479 
480 err_siblings:
481 	kfree(siblings);
482 
483 	return err;
484 }
485 
486 static int
487 set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
488 {
489 	struct i915_context_engines_bond __user *ext =
490 		container_of_user(base, typeof(*ext), base);
491 	const struct set_proto_ctx_engines *set = data;
492 	struct drm_i915_private *i915 = set->i915;
493 	struct i915_engine_class_instance ci;
494 	struct intel_engine_cs *master;
495 	u16 idx, num_bonds;
496 	int err, n;
497 
498 	if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) &&
499 	    !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) {
500 		drm_dbg(&i915->drm,
501 			"Bonding not supported on this platform\n");
502 		return -ENODEV;
503 	}
504 
505 	if (get_user(idx, &ext->virtual_index))
506 		return -EFAULT;
507 
508 	if (idx >= set->num_engines) {
509 		drm_dbg(&i915->drm,
510 			"Invalid index for virtual engine: %d >= %d\n",
511 			idx, set->num_engines);
512 		return -EINVAL;
513 	}
514 
515 	idx = array_index_nospec(idx, set->num_engines);
516 	if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) {
517 		drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
518 		return -EINVAL;
519 	}
520 
521 	if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) {
522 		drm_dbg(&i915->drm,
523 			"Bonding with virtual engines not allowed\n");
524 		return -EINVAL;
525 	}
526 
527 	err = check_user_mbz(&ext->flags);
528 	if (err)
529 		return err;
530 
531 	for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
532 		err = check_user_mbz(&ext->mbz64[n]);
533 		if (err)
534 			return err;
535 	}
536 
537 	if (copy_from_user(&ci, &ext->master, sizeof(ci)))
538 		return -EFAULT;
539 
540 	master = intel_engine_lookup_user(i915,
541 					  ci.engine_class,
542 					  ci.engine_instance);
543 	if (!master) {
544 		drm_dbg(&i915->drm,
545 			"Unrecognised master engine: { class:%u, instance:%u }\n",
546 			ci.engine_class, ci.engine_instance);
547 		return -EINVAL;
548 	}
549 
550 	if (intel_engine_uses_guc(master)) {
551 		drm_dbg(&i915->drm, "bonding extension not supported with GuC submission");
552 		return -ENODEV;
553 	}
554 
555 	if (get_user(num_bonds, &ext->num_bonds))
556 		return -EFAULT;
557 
558 	for (n = 0; n < num_bonds; n++) {
559 		struct intel_engine_cs *bond;
560 
561 		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
562 			return -EFAULT;
563 
564 		bond = intel_engine_lookup_user(i915,
565 						ci.engine_class,
566 						ci.engine_instance);
567 		if (!bond) {
568 			drm_dbg(&i915->drm,
569 				"Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
570 				n, ci.engine_class, ci.engine_instance);
571 			return -EINVAL;
572 		}
573 	}
574 
575 	return 0;
576 }
577 
578 static int
579 set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
580 				      void *data)
581 {
582 	struct i915_context_engines_parallel_submit __user *ext =
583 		container_of_user(base, typeof(*ext), base);
584 	const struct set_proto_ctx_engines *set = data;
585 	struct drm_i915_private *i915 = set->i915;
586 	struct i915_engine_class_instance prev_engine;
587 	u64 flags;
588 	int err = 0, n, i, j;
589 	u16 slot, width, num_siblings;
590 	struct intel_engine_cs **siblings = NULL;
591 	intel_engine_mask_t prev_mask;
592 
593 	if (get_user(slot, &ext->engine_index))
594 		return -EFAULT;
595 
596 	if (get_user(width, &ext->width))
597 		return -EFAULT;
598 
599 	if (get_user(num_siblings, &ext->num_siblings))
600 		return -EFAULT;
601 
602 	if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) &&
603 	    num_siblings != 1) {
604 		drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n",
605 			num_siblings);
606 		return -EINVAL;
607 	}
608 
609 	if (slot >= set->num_engines) {
610 		drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
611 			slot, set->num_engines);
612 		return -EINVAL;
613 	}
614 
615 	if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) {
616 		drm_dbg(&i915->drm,
617 			"Invalid placement[%d], already occupied\n", slot);
618 		return -EINVAL;
619 	}
620 
621 	if (get_user(flags, &ext->flags))
622 		return -EFAULT;
623 
624 	if (flags) {
625 		drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags);
626 		return -EINVAL;
627 	}
628 
629 	for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
630 		err = check_user_mbz(&ext->mbz64[n]);
631 		if (err)
632 			return err;
633 	}
634 
635 	if (width < 2) {
636 		drm_dbg(&i915->drm, "Width (%d) < 2\n", width);
637 		return -EINVAL;
638 	}
639 
640 	if (num_siblings < 1) {
641 		drm_dbg(&i915->drm, "Number siblings (%d) < 1\n",
642 			num_siblings);
643 		return -EINVAL;
644 	}
645 
646 	siblings = kmalloc_array(num_siblings * width,
647 				 sizeof(*siblings),
648 				 GFP_KERNEL);
649 	if (!siblings)
650 		return -ENOMEM;
651 
652 	/* Create contexts / engines */
653 	for (i = 0; i < width; ++i) {
654 		intel_engine_mask_t current_mask = 0;
655 
656 		for (j = 0; j < num_siblings; ++j) {
657 			struct i915_engine_class_instance ci;
658 
659 			n = i * num_siblings + j;
660 			if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
661 				err = -EFAULT;
662 				goto out_err;
663 			}
664 
665 			siblings[n] =
666 				intel_engine_lookup_user(i915, ci.engine_class,
667 							 ci.engine_instance);
668 			if (!siblings[n]) {
669 				drm_dbg(&i915->drm,
670 					"Invalid sibling[%d]: { class:%d, inst:%d }\n",
671 					n, ci.engine_class, ci.engine_instance);
672 				err = -EINVAL;
673 				goto out_err;
674 			}
675 
676 			/*
677 			 * We don't support breadcrumb handshake on these
678 			 * classes
679 			 */
680 			if (siblings[n]->class == RENDER_CLASS ||
681 			    siblings[n]->class == COMPUTE_CLASS) {
682 				err = -EINVAL;
683 				goto out_err;
684 			}
685 
686 			if (n) {
687 				if (prev_engine.engine_class !=
688 				    ci.engine_class) {
689 					drm_dbg(&i915->drm,
690 						"Mismatched class %d, %d\n",
691 						prev_engine.engine_class,
692 						ci.engine_class);
693 					err = -EINVAL;
694 					goto out_err;
695 				}
696 			}
697 
698 			prev_engine = ci;
699 			current_mask |= siblings[n]->logical_mask;
700 		}
701 
702 		if (i > 0) {
703 			if (current_mask != prev_mask << 1) {
704 				drm_dbg(&i915->drm,
705 					"Non contiguous logical mask 0x%x, 0x%x\n",
706 					prev_mask, current_mask);
707 				err = -EINVAL;
708 				goto out_err;
709 			}
710 		}
711 		prev_mask = current_mask;
712 	}
713 
714 	set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL;
715 	set->engines[slot].num_siblings = num_siblings;
716 	set->engines[slot].width = width;
717 	set->engines[slot].siblings = siblings;
718 
719 	return 0;
720 
721 out_err:
722 	kfree(siblings);
723 
724 	return err;
725 }
726 
727 static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
728 	[I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
729 	[I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
730 	[I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] =
731 		set_proto_ctx_engines_parallel_submit,
732 };
733 
734 static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
735 			         struct i915_gem_proto_context *pc,
736 			         const struct drm_i915_gem_context_param *args)
737 {
738 	struct drm_i915_private *i915 = fpriv->i915;
739 	struct set_proto_ctx_engines set = { .i915 = i915 };
740 	struct i915_context_param_engines __user *user =
741 		u64_to_user_ptr(args->value);
742 	unsigned int n;
743 	u64 extensions;
744 	int err;
745 
746 	if (pc->num_user_engines >= 0) {
747 		drm_dbg(&i915->drm, "Cannot set engines twice");
748 		return -EINVAL;
749 	}
750 
751 	if (args->size < sizeof(*user) ||
752 	    !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) {
753 		drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
754 			args->size);
755 		return -EINVAL;
756 	}
757 
758 	set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
759 	/* RING_MASK has no shift so we can use it directly here */
760 	if (set.num_engines > I915_EXEC_RING_MASK + 1)
761 		return -EINVAL;
762 
763 	set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL);
764 	if (!set.engines)
765 		return -ENOMEM;
766 
767 	for (n = 0; n < set.num_engines; n++) {
768 		struct i915_engine_class_instance ci;
769 		struct intel_engine_cs *engine;
770 
771 		if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
772 			kfree(set.engines);
773 			return -EFAULT;
774 		}
775 
776 		memset(&set.engines[n], 0, sizeof(set.engines[n]));
777 
778 		if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
779 		    ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE)
780 			continue;
781 
782 		engine = intel_engine_lookup_user(i915,
783 						  ci.engine_class,
784 						  ci.engine_instance);
785 		if (!engine) {
786 			drm_dbg(&i915->drm,
787 				"Invalid engine[%d]: { class:%d, instance:%d }\n",
788 				n, ci.engine_class, ci.engine_instance);
789 			kfree(set.engines);
790 			return -ENOENT;
791 		}
792 
793 		set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
794 		set.engines[n].engine = engine;
795 	}
796 
797 	err = -EFAULT;
798 	if (!get_user(extensions, &user->extensions))
799 		err = i915_user_extensions(u64_to_user_ptr(extensions),
800 					   set_proto_ctx_engines_extensions,
801 					   ARRAY_SIZE(set_proto_ctx_engines_extensions),
802 					   &set);
803 	if (err) {
804 		kfree(set.engines);
805 		return err;
806 	}
807 
808 	pc->num_user_engines = set.num_engines;
809 	pc->user_engines = set.engines;
810 
811 	return 0;
812 }
813 
814 static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
815 			      struct i915_gem_proto_context *pc,
816 			      struct drm_i915_gem_context_param *args)
817 {
818 	struct drm_i915_private *i915 = fpriv->i915;
819 	struct drm_i915_gem_context_param_sseu user_sseu;
820 	struct intel_sseu *sseu;
821 	int ret;
822 
823 	if (args->size < sizeof(user_sseu))
824 		return -EINVAL;
825 
826 	if (GRAPHICS_VER(i915) != 11)
827 		return -ENODEV;
828 
829 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
830 			   sizeof(user_sseu)))
831 		return -EFAULT;
832 
833 	if (user_sseu.rsvd)
834 		return -EINVAL;
835 
836 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
837 		return -EINVAL;
838 
839 	if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0))
840 		return -EINVAL;
841 
842 	if (pc->num_user_engines >= 0) {
843 		int idx = user_sseu.engine.engine_instance;
844 		struct i915_gem_proto_engine *pe;
845 
846 		if (idx >= pc->num_user_engines)
847 			return -EINVAL;
848 
849 		idx = array_index_nospec(idx, pc->num_user_engines);
850 		pe = &pc->user_engines[idx];
851 
852 		/* Only render engine supports RPCS configuration. */
853 		if (pe->engine->class != RENDER_CLASS)
854 			return -EINVAL;
855 
856 		sseu = &pe->sseu;
857 	} else {
858 		/* Only render engine supports RPCS configuration. */
859 		if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER)
860 			return -EINVAL;
861 
862 		/* There is only one render engine */
863 		if (user_sseu.engine.engine_instance != 0)
864 			return -EINVAL;
865 
866 		sseu = &pc->legacy_rcs_sseu;
867 	}
868 
869 	ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu);
870 	if (ret)
871 		return ret;
872 
873 	args->size = sizeof(user_sseu);
874 
875 	return 0;
876 }
877 
878 static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
879 			       struct i915_gem_proto_context *pc,
880 			       struct drm_i915_gem_context_param *args)
881 {
882 	struct drm_i915_private *i915 = fpriv->i915;
883 	int ret = 0;
884 
885 	switch (args->param) {
886 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
887 		if (args->size)
888 			ret = -EINVAL;
889 		else if (args->value)
890 			pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE);
891 		else
892 			pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE);
893 		break;
894 
895 	case I915_CONTEXT_PARAM_BANNABLE:
896 		if (args->size)
897 			ret = -EINVAL;
898 		else if (!capable(CAP_SYS_ADMIN) && !args->value)
899 			ret = -EPERM;
900 		else if (args->value)
901 			pc->user_flags |= BIT(UCONTEXT_BANNABLE);
902 		else if (pc->uses_protected_content)
903 			ret = -EPERM;
904 		else
905 			pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
906 		break;
907 
908 	case I915_CONTEXT_PARAM_LOW_LATENCY:
909 		if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
910 			pc->user_flags |= BIT(UCONTEXT_LOW_LATENCY);
911 		else
912 			ret = -EINVAL;
913 		break;
914 
915 	case I915_CONTEXT_PARAM_RECOVERABLE:
916 		if (args->size)
917 			ret = -EINVAL;
918 		else if (!args->value)
919 			pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
920 		else if (pc->uses_protected_content)
921 			ret = -EPERM;
922 		else
923 			pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
924 		break;
925 
926 	case I915_CONTEXT_PARAM_PRIORITY:
927 		ret = validate_priority(fpriv->i915, args);
928 		if (!ret)
929 			pc->sched.priority = args->value;
930 		break;
931 
932 	case I915_CONTEXT_PARAM_SSEU:
933 		ret = set_proto_ctx_sseu(fpriv, pc, args);
934 		break;
935 
936 	case I915_CONTEXT_PARAM_VM:
937 		ret = set_proto_ctx_vm(fpriv, pc, args);
938 		break;
939 
940 	case I915_CONTEXT_PARAM_ENGINES:
941 		ret = set_proto_ctx_engines(fpriv, pc, args);
942 		break;
943 
944 	case I915_CONTEXT_PARAM_PERSISTENCE:
945 		if (args->size)
946 			ret = -EINVAL;
947 		else
948 			ret = proto_context_set_persistence(fpriv->i915, pc,
949 							    args->value);
950 		break;
951 
952 	case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
953 		ret = proto_context_set_protected(fpriv->i915, pc,
954 						  args->value);
955 		break;
956 
957 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
958 	case I915_CONTEXT_PARAM_BAN_PERIOD:
959 	case I915_CONTEXT_PARAM_RINGSIZE:
960 	default:
961 		ret = -EINVAL;
962 		break;
963 	}
964 
965 	return ret;
966 }
967 
968 static int intel_context_set_gem(struct intel_context *ce,
969 				 struct i915_gem_context *ctx,
970 				 struct intel_sseu sseu)
971 {
972 	int ret = 0;
973 
974 	GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
975 	RCU_INIT_POINTER(ce->gem_context, ctx);
976 
977 	GEM_BUG_ON(intel_context_is_pinned(ce));
978 
979 	if (ce->engine->class == COMPUTE_CLASS)
980 		ce->ring_size = SZ_512K;
981 	else
982 		ce->ring_size = SZ_16K;
983 
984 	i915_vm_put(ce->vm);
985 	ce->vm = i915_gem_context_get_eb_vm(ctx);
986 
987 	if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
988 	    intel_engine_has_timeslices(ce->engine) &&
989 	    intel_engine_has_semaphores(ce->engine))
990 		__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
991 
992 	if (CONFIG_DRM_I915_REQUEST_TIMEOUT &&
993 	    ctx->i915->params.request_timeout_ms) {
994 		unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
995 
996 		intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
997 	}
998 
999 	/* A valid SSEU has no zero fields */
1000 	if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
1001 		ret = intel_context_reconfigure_sseu(ce, sseu);
1002 
1003 	if (test_bit(UCONTEXT_LOW_LATENCY, &ctx->user_flags))
1004 		__set_bit(CONTEXT_LOW_LATENCY, &ce->flags);
1005 
1006 	return ret;
1007 }
1008 
1009 static void __unpin_engines(struct i915_gem_engines *e, unsigned int count)
1010 {
1011 	while (count--) {
1012 		struct intel_context *ce = e->engines[count], *child;
1013 
1014 		if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags))
1015 			continue;
1016 
1017 		for_each_child(ce, child)
1018 			intel_context_unpin(child);
1019 		intel_context_unpin(ce);
1020 	}
1021 }
1022 
1023 static void unpin_engines(struct i915_gem_engines *e)
1024 {
1025 	__unpin_engines(e, e->num_engines);
1026 }
1027 
1028 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
1029 {
1030 	while (count--) {
1031 		if (!e->engines[count])
1032 			continue;
1033 
1034 		intel_context_put(e->engines[count]);
1035 	}
1036 	kfree(e);
1037 }
1038 
1039 static void free_engines(struct i915_gem_engines *e)
1040 {
1041 	__free_engines(e, e->num_engines);
1042 }
1043 
1044 static void free_engines_rcu(struct rcu_head *rcu)
1045 {
1046 	struct i915_gem_engines *engines =
1047 		container_of(rcu, struct i915_gem_engines, rcu);
1048 
1049 	i915_sw_fence_fini(&engines->fence);
1050 	free_engines(engines);
1051 }
1052 
1053 static void accumulate_runtime(struct i915_drm_client *client,
1054 			       struct i915_gem_engines *engines)
1055 {
1056 	struct i915_gem_engines_iter it;
1057 	struct intel_context *ce;
1058 
1059 	if (!client)
1060 		return;
1061 
1062 	/* Transfer accumulated runtime to the parent GEM context. */
1063 	for_each_gem_engine(ce, engines, it) {
1064 		unsigned int class = ce->engine->uabi_class;
1065 
1066 		GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime));
1067 		atomic64_add(intel_context_get_total_runtime_ns(ce),
1068 			     &client->past_runtime[class]);
1069 	}
1070 }
1071 
1072 static int
1073 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
1074 {
1075 	struct i915_gem_engines *engines =
1076 		container_of(fence, typeof(*engines), fence);
1077 	struct i915_gem_context *ctx = engines->ctx;
1078 
1079 	switch (state) {
1080 	case FENCE_COMPLETE:
1081 		if (!list_empty(&engines->link)) {
1082 			unsigned long flags;
1083 
1084 			spin_lock_irqsave(&ctx->stale.lock, flags);
1085 			list_del(&engines->link);
1086 			spin_unlock_irqrestore(&ctx->stale.lock, flags);
1087 		}
1088 		accumulate_runtime(ctx->client, engines);
1089 		i915_gem_context_put(ctx);
1090 
1091 		break;
1092 
1093 	case FENCE_FREE:
1094 		init_rcu_head(&engines->rcu);
1095 		call_rcu(&engines->rcu, free_engines_rcu);
1096 		break;
1097 	}
1098 
1099 	return NOTIFY_DONE;
1100 }
1101 
1102 static struct i915_gem_engines *alloc_engines(unsigned int count)
1103 {
1104 	struct i915_gem_engines *e;
1105 
1106 	e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
1107 	if (!e)
1108 		return NULL;
1109 
1110 	i915_sw_fence_init(&e->fence, engines_notify);
1111 	return e;
1112 }
1113 
1114 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
1115 						struct intel_sseu rcs_sseu)
1116 {
1117 	const unsigned int max = I915_NUM_ENGINES;
1118 	struct intel_engine_cs *engine;
1119 	struct i915_gem_engines *e, *err;
1120 
1121 	e = alloc_engines(max);
1122 	if (!e)
1123 		return ERR_PTR(-ENOMEM);
1124 
1125 	for_each_uabi_engine(engine, ctx->i915) {
1126 		struct intel_context *ce;
1127 		struct intel_sseu sseu = {};
1128 		int ret;
1129 
1130 		if (engine->legacy_idx == INVALID_ENGINE)
1131 			continue;
1132 
1133 		GEM_BUG_ON(engine->legacy_idx >= max);
1134 		GEM_BUG_ON(e->engines[engine->legacy_idx]);
1135 
1136 		ce = intel_context_create(engine);
1137 		if (IS_ERR(ce)) {
1138 			err = ERR_CAST(ce);
1139 			goto free_engines;
1140 		}
1141 
1142 		e->engines[engine->legacy_idx] = ce;
1143 		e->num_engines = max(e->num_engines, engine->legacy_idx + 1);
1144 
1145 		if (engine->class == RENDER_CLASS)
1146 			sseu = rcs_sseu;
1147 
1148 		ret = intel_context_set_gem(ce, ctx, sseu);
1149 		if (ret) {
1150 			err = ERR_PTR(ret);
1151 			goto free_engines;
1152 		}
1153 
1154 	}
1155 
1156 	return e;
1157 
1158 free_engines:
1159 	free_engines(e);
1160 	return err;
1161 }
1162 
1163 static int perma_pin_contexts(struct intel_context *ce)
1164 {
1165 	struct intel_context *child;
1166 	int i = 0, j = 0, ret;
1167 
1168 	GEM_BUG_ON(!intel_context_is_parent(ce));
1169 
1170 	ret = intel_context_pin(ce);
1171 	if (unlikely(ret))
1172 		return ret;
1173 
1174 	for_each_child(ce, child) {
1175 		ret = intel_context_pin(child);
1176 		if (unlikely(ret))
1177 			goto unwind;
1178 		++i;
1179 	}
1180 
1181 	set_bit(CONTEXT_PERMA_PIN, &ce->flags);
1182 
1183 	return 0;
1184 
1185 unwind:
1186 	intel_context_unpin(ce);
1187 	for_each_child(ce, child) {
1188 		if (j++ < i)
1189 			intel_context_unpin(child);
1190 		else
1191 			break;
1192 	}
1193 
1194 	return ret;
1195 }
1196 
1197 static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
1198 					     unsigned int num_engines,
1199 					     struct i915_gem_proto_engine *pe)
1200 {
1201 	struct i915_gem_engines *e, *err;
1202 	unsigned int n;
1203 
1204 	e = alloc_engines(num_engines);
1205 	if (!e)
1206 		return ERR_PTR(-ENOMEM);
1207 	e->num_engines = num_engines;
1208 
1209 	for (n = 0; n < num_engines; n++) {
1210 		struct intel_context *ce, *child;
1211 		int ret;
1212 
1213 		switch (pe[n].type) {
1214 		case I915_GEM_ENGINE_TYPE_PHYSICAL:
1215 			ce = intel_context_create(pe[n].engine);
1216 			break;
1217 
1218 		case I915_GEM_ENGINE_TYPE_BALANCED:
1219 			ce = intel_engine_create_virtual(pe[n].siblings,
1220 							 pe[n].num_siblings, 0);
1221 			break;
1222 
1223 		case I915_GEM_ENGINE_TYPE_PARALLEL:
1224 			ce = intel_engine_create_parallel(pe[n].siblings,
1225 							  pe[n].num_siblings,
1226 							  pe[n].width);
1227 			break;
1228 
1229 		case I915_GEM_ENGINE_TYPE_INVALID:
1230 		default:
1231 			GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID);
1232 			continue;
1233 		}
1234 
1235 		if (IS_ERR(ce)) {
1236 			err = ERR_CAST(ce);
1237 			goto free_engines;
1238 		}
1239 
1240 		e->engines[n] = ce;
1241 
1242 		ret = intel_context_set_gem(ce, ctx, pe->sseu);
1243 		if (ret) {
1244 			err = ERR_PTR(ret);
1245 			goto free_engines;
1246 		}
1247 		for_each_child(ce, child) {
1248 			ret = intel_context_set_gem(child, ctx, pe->sseu);
1249 			if (ret) {
1250 				err = ERR_PTR(ret);
1251 				goto free_engines;
1252 			}
1253 		}
1254 
1255 		/*
1256 		 * XXX: Must be done after calling intel_context_set_gem as that
1257 		 * function changes the ring size. The ring is allocated when
1258 		 * the context is pinned. If the ring size is changed after
1259 		 * allocation we have a mismatch of the ring size and will cause
1260 		 * the context to hang. Presumably with a bit of reordering we
1261 		 * could move the perma-pin step to the backend function
1262 		 * intel_engine_create_parallel.
1263 		 */
1264 		if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) {
1265 			ret = perma_pin_contexts(ce);
1266 			if (ret) {
1267 				err = ERR_PTR(ret);
1268 				goto free_engines;
1269 			}
1270 		}
1271 	}
1272 
1273 	return e;
1274 
1275 free_engines:
1276 	free_engines(e);
1277 	return err;
1278 }
1279 
1280 static void i915_gem_context_release_work(struct work_struct *work)
1281 {
1282 	struct i915_gem_context *ctx = container_of(work, typeof(*ctx),
1283 						    release_work);
1284 	struct i915_address_space *vm;
1285 
1286 	trace_i915_context_free(ctx);
1287 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1288 
1289 	spin_lock(&ctx->i915->gem.contexts.lock);
1290 	list_del(&ctx->link);
1291 	spin_unlock(&ctx->i915->gem.contexts.lock);
1292 
1293 	if (ctx->syncobj)
1294 		drm_syncobj_put(ctx->syncobj);
1295 
1296 	vm = ctx->vm;
1297 	if (vm)
1298 		i915_vm_put(vm);
1299 
1300 	if (ctx->pxp_wakeref)
1301 		intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref);
1302 
1303 	if (ctx->client)
1304 		i915_drm_client_put(ctx->client);
1305 
1306 	mutex_destroy(&ctx->engines_mutex);
1307 	mutex_destroy(&ctx->lut_mutex);
1308 
1309 	put_pid(ctx->pid);
1310 	mutex_destroy(&ctx->mutex);
1311 
1312 	kfree_rcu(ctx, rcu);
1313 }
1314 
1315 void i915_gem_context_release(struct kref *ref)
1316 {
1317 	struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
1318 
1319 	queue_work(ctx->i915->wq, &ctx->release_work);
1320 }
1321 
1322 static inline struct i915_gem_engines *
1323 __context_engines_static(const struct i915_gem_context *ctx)
1324 {
1325 	return rcu_dereference_protected(ctx->engines, true);
1326 }
1327 
1328 static void __reset_context(struct i915_gem_context *ctx,
1329 			    struct intel_engine_cs *engine)
1330 {
1331 	intel_gt_handle_error(engine->gt, engine->mask, 0,
1332 			      "context closure in %s", ctx->name);
1333 }
1334 
1335 static bool __cancel_engine(struct intel_engine_cs *engine)
1336 {
1337 	/*
1338 	 * Send a "high priority pulse" down the engine to cause the
1339 	 * current request to be momentarily preempted. (If it fails to
1340 	 * be preempted, it will be reset). As we have marked our context
1341 	 * as banned, any incomplete request, including any running, will
1342 	 * be skipped following the preemption.
1343 	 *
1344 	 * If there is no hangchecking (one of the reasons why we try to
1345 	 * cancel the context) and no forced preemption, there may be no
1346 	 * means by which we reset the GPU and evict the persistent hog.
1347 	 * Ergo if we are unable to inject a preemptive pulse that can
1348 	 * kill the banned context, we fallback to doing a local reset
1349 	 * instead.
1350 	 */
1351 	return intel_engine_pulse(engine) == 0;
1352 }
1353 
1354 static struct intel_engine_cs *active_engine(struct intel_context *ce)
1355 {
1356 	struct intel_engine_cs *engine = NULL;
1357 	struct i915_request *rq;
1358 
1359 	if (intel_context_has_inflight(ce))
1360 		return intel_context_inflight(ce);
1361 
1362 	if (!ce->timeline)
1363 		return NULL;
1364 
1365 	/*
1366 	 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
1367 	 * to the request to prevent it being transferred to a new timeline
1368 	 * (and onto a new timeline->requests list).
1369 	 */
1370 	rcu_read_lock();
1371 	list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
1372 		bool found;
1373 
1374 		/* timeline is already completed upto this point? */
1375 		if (!i915_request_get_rcu(rq))
1376 			break;
1377 
1378 		/* Check with the backend if the request is inflight */
1379 		found = true;
1380 		if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
1381 			found = i915_request_active_engine(rq, &engine);
1382 
1383 		i915_request_put(rq);
1384 		if (found)
1385 			break;
1386 	}
1387 	rcu_read_unlock();
1388 
1389 	return engine;
1390 }
1391 
1392 static void
1393 kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent)
1394 {
1395 	struct i915_gem_engines_iter it;
1396 	struct intel_context *ce;
1397 
1398 	/*
1399 	 * Map the user's engine back to the actual engines; one virtual
1400 	 * engine will be mapped to multiple engines, and using ctx->engine[]
1401 	 * the same engine may be have multiple instances in the user's map.
1402 	 * However, we only care about pending requests, so only include
1403 	 * engines on which there are incomplete requests.
1404 	 */
1405 	for_each_gem_engine(ce, engines, it) {
1406 		struct intel_engine_cs *engine;
1407 
1408 		if ((exit || !persistent) && intel_context_revoke(ce))
1409 			continue; /* Already marked. */
1410 
1411 		/*
1412 		 * Check the current active state of this context; if we
1413 		 * are currently executing on the GPU we need to evict
1414 		 * ourselves. On the other hand, if we haven't yet been
1415 		 * submitted to the GPU or if everything is complete,
1416 		 * we have nothing to do.
1417 		 */
1418 		engine = active_engine(ce);
1419 
1420 		/* First attempt to gracefully cancel the context */
1421 		if (engine && !__cancel_engine(engine) && (exit || !persistent))
1422 			/*
1423 			 * If we are unable to send a preemptive pulse to bump
1424 			 * the context from the GPU, we have to resort to a full
1425 			 * reset. We hope the collateral damage is worth it.
1426 			 */
1427 			__reset_context(engines->ctx, engine);
1428 	}
1429 }
1430 
1431 static void kill_context(struct i915_gem_context *ctx)
1432 {
1433 	struct i915_gem_engines *pos, *next;
1434 
1435 	spin_lock_irq(&ctx->stale.lock);
1436 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1437 	list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
1438 		if (!i915_sw_fence_await(&pos->fence)) {
1439 			list_del_init(&pos->link);
1440 			continue;
1441 		}
1442 
1443 		spin_unlock_irq(&ctx->stale.lock);
1444 
1445 		kill_engines(pos, !ctx->i915->params.enable_hangcheck,
1446 			     i915_gem_context_is_persistent(ctx));
1447 
1448 		spin_lock_irq(&ctx->stale.lock);
1449 		GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
1450 		list_safe_reset_next(pos, next, link);
1451 		list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
1452 
1453 		i915_sw_fence_complete(&pos->fence);
1454 	}
1455 	spin_unlock_irq(&ctx->stale.lock);
1456 }
1457 
1458 static void engines_idle_release(struct i915_gem_context *ctx,
1459 				 struct i915_gem_engines *engines)
1460 {
1461 	struct i915_gem_engines_iter it;
1462 	struct intel_context *ce;
1463 
1464 	INIT_LIST_HEAD(&engines->link);
1465 
1466 	engines->ctx = i915_gem_context_get(ctx);
1467 
1468 	for_each_gem_engine(ce, engines, it) {
1469 		int err;
1470 
1471 		/* serialises with execbuf */
1472 		intel_context_close(ce);
1473 		if (!intel_context_pin_if_active(ce))
1474 			continue;
1475 
1476 		/* Wait until context is finally scheduled out and retired */
1477 		err = i915_sw_fence_await_active(&engines->fence,
1478 						 &ce->active,
1479 						 I915_ACTIVE_AWAIT_BARRIER);
1480 		intel_context_unpin(ce);
1481 		if (err)
1482 			goto kill;
1483 	}
1484 
1485 	spin_lock_irq(&ctx->stale.lock);
1486 	if (!i915_gem_context_is_closed(ctx))
1487 		list_add_tail(&engines->link, &ctx->stale.engines);
1488 	spin_unlock_irq(&ctx->stale.lock);
1489 
1490 kill:
1491 	if (list_empty(&engines->link)) /* raced, already closed */
1492 		kill_engines(engines, true,
1493 			     i915_gem_context_is_persistent(ctx));
1494 
1495 	i915_sw_fence_commit(&engines->fence);
1496 }
1497 
1498 static void set_closed_name(struct i915_gem_context *ctx)
1499 {
1500 	char *s;
1501 
1502 	/* Replace '[]' with '<>' to indicate closed in debug prints */
1503 
1504 	s = strrchr(ctx->name, '[');
1505 	if (!s)
1506 		return;
1507 
1508 	*s = '<';
1509 
1510 	s = strchr(s + 1, ']');
1511 	if (s)
1512 		*s = '>';
1513 }
1514 
1515 static void context_close(struct i915_gem_context *ctx)
1516 {
1517 	struct i915_drm_client *client;
1518 
1519 	/* Flush any concurrent set_engines() */
1520 	mutex_lock(&ctx->engines_mutex);
1521 	unpin_engines(__context_engines_static(ctx));
1522 	engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
1523 	i915_gem_context_set_closed(ctx);
1524 	mutex_unlock(&ctx->engines_mutex);
1525 
1526 	mutex_lock(&ctx->mutex);
1527 
1528 	set_closed_name(ctx);
1529 
1530 	/*
1531 	 * The LUT uses the VMA as a backpointer to unref the object,
1532 	 * so we need to clear the LUT before we close all the VMA (inside
1533 	 * the ppgtt).
1534 	 */
1535 	lut_close(ctx);
1536 
1537 	ctx->file_priv = ERR_PTR(-EBADF);
1538 
1539 	client = ctx->client;
1540 	if (client) {
1541 		spin_lock(&client->ctx_lock);
1542 		list_del_rcu(&ctx->client_link);
1543 		spin_unlock(&client->ctx_lock);
1544 	}
1545 
1546 	mutex_unlock(&ctx->mutex);
1547 
1548 	/*
1549 	 * If the user has disabled hangchecking, we can not be sure that
1550 	 * the batches will ever complete after the context is closed,
1551 	 * keeping the context and all resources pinned forever. So in this
1552 	 * case we opt to forcibly kill off all remaining requests on
1553 	 * context close.
1554 	 */
1555 	kill_context(ctx);
1556 
1557 	i915_gem_context_put(ctx);
1558 }
1559 
1560 static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
1561 {
1562 	if (i915_gem_context_is_persistent(ctx) == state)
1563 		return 0;
1564 
1565 	if (state) {
1566 		/*
1567 		 * Only contexts that are short-lived [that will expire or be
1568 		 * reset] are allowed to survive past termination. We require
1569 		 * hangcheck to ensure that the persistent requests are healthy.
1570 		 */
1571 		if (!ctx->i915->params.enable_hangcheck)
1572 			return -EINVAL;
1573 
1574 		i915_gem_context_set_persistence(ctx);
1575 	} else {
1576 		/* To cancel a context we use "preempt-to-idle" */
1577 		if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
1578 			return -ENODEV;
1579 
1580 		/*
1581 		 * If the cancel fails, we then need to reset, cleanly!
1582 		 *
1583 		 * If the per-engine reset fails, all hope is lost! We resort
1584 		 * to a full GPU reset in that unlikely case, but realistically
1585 		 * if the engine could not reset, the full reset does not fare
1586 		 * much better. The damage has been done.
1587 		 *
1588 		 * However, if we cannot reset an engine by itself, we cannot
1589 		 * cleanup a hanging persistent context without causing
1590 		 * colateral damage, and we should not pretend we can by
1591 		 * exposing the interface.
1592 		 */
1593 		if (!intel_has_reset_engine(to_gt(ctx->i915)))
1594 			return -ENODEV;
1595 
1596 		i915_gem_context_clear_persistence(ctx);
1597 	}
1598 
1599 	return 0;
1600 }
1601 
1602 static struct i915_gem_context *
1603 i915_gem_create_context(struct drm_i915_private *i915,
1604 			const struct i915_gem_proto_context *pc)
1605 {
1606 	struct i915_gem_context *ctx;
1607 	struct i915_address_space *vm = NULL;
1608 	struct i915_gem_engines *e;
1609 	int err;
1610 	int i;
1611 
1612 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1613 	if (!ctx)
1614 		return ERR_PTR(-ENOMEM);
1615 
1616 	kref_init(&ctx->ref);
1617 	ctx->i915 = i915;
1618 	ctx->sched = pc->sched;
1619 	mutex_init(&ctx->mutex);
1620 	INIT_LIST_HEAD(&ctx->link);
1621 	INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
1622 
1623 	spin_lock_init(&ctx->stale.lock);
1624 	INIT_LIST_HEAD(&ctx->stale.engines);
1625 
1626 	if (pc->vm) {
1627 		vm = i915_vm_get(pc->vm);
1628 	} else if (HAS_FULL_PPGTT(i915)) {
1629 		struct i915_ppgtt *ppgtt;
1630 
1631 		ppgtt = i915_ppgtt_create(to_gt(i915), 0);
1632 		if (IS_ERR(ppgtt)) {
1633 			drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
1634 				PTR_ERR(ppgtt));
1635 			err = PTR_ERR(ppgtt);
1636 			goto err_ctx;
1637 		}
1638 		ppgtt->vm.fpriv = pc->fpriv;
1639 		vm = &ppgtt->vm;
1640 	}
1641 	if (vm)
1642 		ctx->vm = vm;
1643 
1644 	/* Assign early so intel_context_set_gem can access these flags */
1645 	ctx->user_flags = pc->user_flags;
1646 
1647 	mutex_init(&ctx->engines_mutex);
1648 	if (pc->num_user_engines >= 0) {
1649 		i915_gem_context_set_user_engines(ctx);
1650 		e = user_engines(ctx, pc->num_user_engines, pc->user_engines);
1651 	} else {
1652 		i915_gem_context_clear_user_engines(ctx);
1653 		e = default_engines(ctx, pc->legacy_rcs_sseu);
1654 	}
1655 	if (IS_ERR(e)) {
1656 		err = PTR_ERR(e);
1657 		goto err_vm;
1658 	}
1659 	RCU_INIT_POINTER(ctx->engines, e);
1660 
1661 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
1662 	mutex_init(&ctx->lut_mutex);
1663 
1664 	/* NB: Mark all slices as needing a remap so that when the context first
1665 	 * loads it will restore whatever remap state already exists. If there
1666 	 * is no remap info, it will be a NOP. */
1667 	ctx->remap_slice = ALL_L3_SLICES(i915);
1668 
1669 	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
1670 		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
1671 
1672 	if (pc->single_timeline) {
1673 		err = drm_syncobj_create(&ctx->syncobj,
1674 					 DRM_SYNCOBJ_CREATE_SIGNALED,
1675 					 NULL);
1676 		if (err)
1677 			goto err_engines;
1678 	}
1679 
1680 	if (pc->uses_protected_content) {
1681 		ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1682 		ctx->uses_protected_content = true;
1683 	}
1684 
1685 	trace_i915_context_create(ctx);
1686 
1687 	return ctx;
1688 
1689 err_engines:
1690 	free_engines(e);
1691 err_vm:
1692 	if (ctx->vm)
1693 		i915_vm_put(ctx->vm);
1694 err_ctx:
1695 	kfree(ctx);
1696 	return ERR_PTR(err);
1697 }
1698 
1699 static void init_contexts(struct i915_gem_contexts *gc)
1700 {
1701 	spin_lock_init(&gc->lock);
1702 	INIT_LIST_HEAD(&gc->list);
1703 }
1704 
1705 void i915_gem_init__contexts(struct drm_i915_private *i915)
1706 {
1707 	init_contexts(&i915->gem.contexts);
1708 }
1709 
1710 /*
1711  * Note that this implicitly consumes the ctx reference, by placing
1712  * the ctx in the context_xa.
1713  */
1714 static void gem_context_register(struct i915_gem_context *ctx,
1715 				 struct drm_i915_file_private *fpriv,
1716 				 u32 id)
1717 {
1718 	struct drm_i915_private *i915 = ctx->i915;
1719 	void *old;
1720 
1721 	ctx->file_priv = fpriv;
1722 
1723 	ctx->pid = get_task_pid(current, PIDTYPE_PID);
1724 	ctx->client = i915_drm_client_get(fpriv->client);
1725 
1726 	snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
1727 		 current->comm, pid_nr(ctx->pid));
1728 
1729 	spin_lock(&ctx->client->ctx_lock);
1730 	list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list);
1731 	spin_unlock(&ctx->client->ctx_lock);
1732 
1733 	spin_lock(&i915->gem.contexts.lock);
1734 	list_add_tail(&ctx->link, &i915->gem.contexts.list);
1735 	spin_unlock(&i915->gem.contexts.lock);
1736 
1737 	/* And finally expose ourselves to userspace via the idr */
1738 	old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
1739 	WARN_ON(old);
1740 }
1741 
1742 int i915_gem_context_open(struct drm_i915_private *i915,
1743 			  struct drm_file *file)
1744 {
1745 	struct drm_i915_file_private *file_priv = file->driver_priv;
1746 	struct i915_gem_proto_context *pc;
1747 	struct i915_gem_context *ctx;
1748 	int err;
1749 
1750 	mutex_init(&file_priv->proto_context_lock);
1751 	xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC);
1752 
1753 	/* 0 reserved for the default context */
1754 	xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1);
1755 
1756 	/* 0 reserved for invalid/unassigned ppgtt */
1757 	xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
1758 
1759 	pc = proto_context_create(file_priv, i915, 0);
1760 	if (IS_ERR(pc)) {
1761 		err = PTR_ERR(pc);
1762 		goto err;
1763 	}
1764 
1765 	ctx = i915_gem_create_context(i915, pc);
1766 	proto_context_close(i915, pc);
1767 	if (IS_ERR(ctx)) {
1768 		err = PTR_ERR(ctx);
1769 		goto err;
1770 	}
1771 
1772 	gem_context_register(ctx, file_priv, 0);
1773 
1774 	return 0;
1775 
1776 err:
1777 	xa_destroy(&file_priv->vm_xa);
1778 	xa_destroy(&file_priv->context_xa);
1779 	xa_destroy(&file_priv->proto_context_xa);
1780 	mutex_destroy(&file_priv->proto_context_lock);
1781 	return err;
1782 }
1783 
1784 void i915_gem_context_close(struct drm_file *file)
1785 {
1786 	struct drm_i915_file_private *file_priv = file->driver_priv;
1787 	struct i915_gem_proto_context *pc;
1788 	struct i915_address_space *vm;
1789 	struct i915_gem_context *ctx;
1790 	unsigned long idx;
1791 
1792 	xa_for_each(&file_priv->proto_context_xa, idx, pc)
1793 		proto_context_close(file_priv->i915, pc);
1794 	xa_destroy(&file_priv->proto_context_xa);
1795 	mutex_destroy(&file_priv->proto_context_lock);
1796 
1797 	xa_for_each(&file_priv->context_xa, idx, ctx)
1798 		context_close(ctx);
1799 	xa_destroy(&file_priv->context_xa);
1800 
1801 	xa_for_each(&file_priv->vm_xa, idx, vm)
1802 		i915_vm_put(vm);
1803 	xa_destroy(&file_priv->vm_xa);
1804 }
1805 
1806 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
1807 			     struct drm_file *file)
1808 {
1809 	struct drm_i915_private *i915 = to_i915(dev);
1810 	struct drm_i915_gem_vm_control *args = data;
1811 	struct drm_i915_file_private *file_priv = file->driver_priv;
1812 	struct i915_ppgtt *ppgtt;
1813 	u32 id;
1814 	int err;
1815 
1816 	if (!HAS_FULL_PPGTT(i915))
1817 		return -ENODEV;
1818 
1819 	if (args->flags)
1820 		return -EINVAL;
1821 
1822 	ppgtt = i915_ppgtt_create(to_gt(i915), 0);
1823 	if (IS_ERR(ppgtt))
1824 		return PTR_ERR(ppgtt);
1825 
1826 	if (args->extensions) {
1827 		err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1828 					   NULL, 0,
1829 					   ppgtt);
1830 		if (err)
1831 			goto err_put;
1832 	}
1833 
1834 	err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
1835 		       xa_limit_32b, GFP_KERNEL);
1836 	if (err)
1837 		goto err_put;
1838 
1839 	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1840 	args->vm_id = id;
1841 	ppgtt->vm.fpriv = file_priv;
1842 	return 0;
1843 
1844 err_put:
1845 	i915_vm_put(&ppgtt->vm);
1846 	return err;
1847 }
1848 
1849 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
1850 			      struct drm_file *file)
1851 {
1852 	struct drm_i915_file_private *file_priv = file->driver_priv;
1853 	struct drm_i915_gem_vm_control *args = data;
1854 	struct i915_address_space *vm;
1855 
1856 	if (args->flags)
1857 		return -EINVAL;
1858 
1859 	if (args->extensions)
1860 		return -EINVAL;
1861 
1862 	vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1863 	if (!vm)
1864 		return -ENOENT;
1865 
1866 	i915_vm_put(vm);
1867 	return 0;
1868 }
1869 
1870 static int get_ppgtt(struct drm_i915_file_private *file_priv,
1871 		     struct i915_gem_context *ctx,
1872 		     struct drm_i915_gem_context_param *args)
1873 {
1874 	struct i915_address_space *vm;
1875 	int err;
1876 	u32 id;
1877 
1878 	if (!i915_gem_context_has_full_ppgtt(ctx))
1879 		return -ENODEV;
1880 
1881 	vm = ctx->vm;
1882 	GEM_BUG_ON(!vm);
1883 
1884 	/*
1885 	 * Get a reference for the allocated handle.  Once the handle is
1886 	 * visible in the vm_xa table, userspace could try to close it
1887 	 * from under our feet, so we need to hold the extra reference
1888 	 * first.
1889 	 */
1890 	i915_vm_get(vm);
1891 
1892 	err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1893 	if (err) {
1894 		i915_vm_put(vm);
1895 		return err;
1896 	}
1897 
1898 	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1899 	args->value = id;
1900 	args->size = 0;
1901 
1902 	return err;
1903 }
1904 
1905 int
1906 i915_gem_user_to_context_sseu(struct intel_gt *gt,
1907 			      const struct drm_i915_gem_context_param_sseu *user,
1908 			      struct intel_sseu *context)
1909 {
1910 	const struct sseu_dev_info *device = &gt->info.sseu;
1911 	struct drm_i915_private *i915 = gt->i915;
1912 	unsigned int dev_subslice_mask = intel_sseu_get_hsw_subslices(device, 0);
1913 
1914 	/* No zeros in any field. */
1915 	if (!user->slice_mask || !user->subslice_mask ||
1916 	    !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1917 		return -EINVAL;
1918 
1919 	/* Max > min. */
1920 	if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1921 		return -EINVAL;
1922 
1923 	/*
1924 	 * Some future proofing on the types since the uAPI is wider than the
1925 	 * current internal implementation.
1926 	 */
1927 	if (overflows_type(user->slice_mask, context->slice_mask) ||
1928 	    overflows_type(user->subslice_mask, context->subslice_mask) ||
1929 	    overflows_type(user->min_eus_per_subslice,
1930 			   context->min_eus_per_subslice) ||
1931 	    overflows_type(user->max_eus_per_subslice,
1932 			   context->max_eus_per_subslice))
1933 		return -EINVAL;
1934 
1935 	/* Check validity against hardware. */
1936 	if (user->slice_mask & ~device->slice_mask)
1937 		return -EINVAL;
1938 
1939 	if (user->subslice_mask & ~dev_subslice_mask)
1940 		return -EINVAL;
1941 
1942 	if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1943 		return -EINVAL;
1944 
1945 	context->slice_mask = user->slice_mask;
1946 	context->subslice_mask = user->subslice_mask;
1947 	context->min_eus_per_subslice = user->min_eus_per_subslice;
1948 	context->max_eus_per_subslice = user->max_eus_per_subslice;
1949 
1950 	/* Part specific restrictions. */
1951 	if (GRAPHICS_VER(i915) == 11) {
1952 		unsigned int hw_s = hweight8(device->slice_mask);
1953 		unsigned int hw_ss_per_s = hweight8(dev_subslice_mask);
1954 		unsigned int req_s = hweight8(context->slice_mask);
1955 		unsigned int req_ss = hweight8(context->subslice_mask);
1956 
1957 		/*
1958 		 * Only full subslice enablement is possible if more than one
1959 		 * slice is turned on.
1960 		 */
1961 		if (req_s > 1 && req_ss != hw_ss_per_s)
1962 			return -EINVAL;
1963 
1964 		/*
1965 		 * If more than four (SScount bitfield limit) subslices are
1966 		 * requested then the number has to be even.
1967 		 */
1968 		if (req_ss > 4 && (req_ss & 1))
1969 			return -EINVAL;
1970 
1971 		/*
1972 		 * If only one slice is enabled and subslice count is below the
1973 		 * device full enablement, it must be at most half of the all
1974 		 * available subslices.
1975 		 */
1976 		if (req_s == 1 && req_ss < hw_ss_per_s &&
1977 		    req_ss > (hw_ss_per_s / 2))
1978 			return -EINVAL;
1979 
1980 		/* ABI restriction - VME use case only. */
1981 
1982 		/* All slices or one slice only. */
1983 		if (req_s != 1 && req_s != hw_s)
1984 			return -EINVAL;
1985 
1986 		/*
1987 		 * Half subslices or full enablement only when one slice is
1988 		 * enabled.
1989 		 */
1990 		if (req_s == 1 &&
1991 		    (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1992 			return -EINVAL;
1993 
1994 		/* No EU configuration changes. */
1995 		if ((user->min_eus_per_subslice !=
1996 		     device->max_eus_per_subslice) ||
1997 		    (user->max_eus_per_subslice !=
1998 		     device->max_eus_per_subslice))
1999 			return -EINVAL;
2000 	}
2001 
2002 	return 0;
2003 }
2004 
2005 static int set_sseu(struct i915_gem_context *ctx,
2006 		    struct drm_i915_gem_context_param *args)
2007 {
2008 	struct drm_i915_private *i915 = ctx->i915;
2009 	struct drm_i915_gem_context_param_sseu user_sseu;
2010 	struct intel_context *ce;
2011 	struct intel_sseu sseu;
2012 	unsigned long lookup;
2013 	int ret;
2014 
2015 	if (args->size < sizeof(user_sseu))
2016 		return -EINVAL;
2017 
2018 	if (GRAPHICS_VER(i915) != 11)
2019 		return -ENODEV;
2020 
2021 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2022 			   sizeof(user_sseu)))
2023 		return -EFAULT;
2024 
2025 	if (user_sseu.rsvd)
2026 		return -EINVAL;
2027 
2028 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2029 		return -EINVAL;
2030 
2031 	lookup = 0;
2032 	if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2033 		lookup |= LOOKUP_USER_INDEX;
2034 
2035 	ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2036 	if (IS_ERR(ce))
2037 		return PTR_ERR(ce);
2038 
2039 	/* Only render engine supports RPCS configuration. */
2040 	if (ce->engine->class != RENDER_CLASS) {
2041 		ret = -ENODEV;
2042 		goto out_ce;
2043 	}
2044 
2045 	ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
2046 	if (ret)
2047 		goto out_ce;
2048 
2049 	ret = intel_context_reconfigure_sseu(ce, sseu);
2050 	if (ret)
2051 		goto out_ce;
2052 
2053 	args->size = sizeof(user_sseu);
2054 
2055 out_ce:
2056 	intel_context_put(ce);
2057 	return ret;
2058 }
2059 
2060 static int
2061 set_persistence(struct i915_gem_context *ctx,
2062 		const struct drm_i915_gem_context_param *args)
2063 {
2064 	if (args->size)
2065 		return -EINVAL;
2066 
2067 	return __context_set_persistence(ctx, args->value);
2068 }
2069 
2070 static int set_priority(struct i915_gem_context *ctx,
2071 			const struct drm_i915_gem_context_param *args)
2072 {
2073 	struct i915_gem_engines_iter it;
2074 	struct intel_context *ce;
2075 	int err;
2076 
2077 	err = validate_priority(ctx->i915, args);
2078 	if (err)
2079 		return err;
2080 
2081 	ctx->sched.priority = args->value;
2082 
2083 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2084 		if (!intel_engine_has_timeslices(ce->engine))
2085 			continue;
2086 
2087 		if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
2088 		    intel_engine_has_semaphores(ce->engine))
2089 			intel_context_set_use_semaphores(ce);
2090 		else
2091 			intel_context_clear_use_semaphores(ce);
2092 	}
2093 	i915_gem_context_unlock_engines(ctx);
2094 
2095 	return 0;
2096 }
2097 
2098 static int get_protected(struct i915_gem_context *ctx,
2099 			 struct drm_i915_gem_context_param *args)
2100 {
2101 	args->size = 0;
2102 	args->value = i915_gem_context_uses_protected_content(ctx);
2103 
2104 	return 0;
2105 }
2106 
2107 static int ctx_setparam(struct drm_i915_file_private *fpriv,
2108 			struct i915_gem_context *ctx,
2109 			struct drm_i915_gem_context_param *args)
2110 {
2111 	int ret = 0;
2112 
2113 	switch (args->param) {
2114 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2115 		if (args->size)
2116 			ret = -EINVAL;
2117 		else if (args->value)
2118 			i915_gem_context_set_no_error_capture(ctx);
2119 		else
2120 			i915_gem_context_clear_no_error_capture(ctx);
2121 		break;
2122 
2123 	case I915_CONTEXT_PARAM_BANNABLE:
2124 		if (args->size)
2125 			ret = -EINVAL;
2126 		else if (!capable(CAP_SYS_ADMIN) && !args->value)
2127 			ret = -EPERM;
2128 		else if (args->value)
2129 			i915_gem_context_set_bannable(ctx);
2130 		else if (i915_gem_context_uses_protected_content(ctx))
2131 			ret = -EPERM; /* can't clear this for protected contexts */
2132 		else
2133 			i915_gem_context_clear_bannable(ctx);
2134 		break;
2135 
2136 	case I915_CONTEXT_PARAM_RECOVERABLE:
2137 		if (args->size)
2138 			ret = -EINVAL;
2139 		else if (!args->value)
2140 			i915_gem_context_clear_recoverable(ctx);
2141 		else if (i915_gem_context_uses_protected_content(ctx))
2142 			ret = -EPERM; /* can't set this for protected contexts */
2143 		else
2144 			i915_gem_context_set_recoverable(ctx);
2145 		break;
2146 
2147 	case I915_CONTEXT_PARAM_PRIORITY:
2148 		ret = set_priority(ctx, args);
2149 		break;
2150 
2151 	case I915_CONTEXT_PARAM_SSEU:
2152 		ret = set_sseu(ctx, args);
2153 		break;
2154 
2155 	case I915_CONTEXT_PARAM_PERSISTENCE:
2156 		ret = set_persistence(ctx, args);
2157 		break;
2158 
2159 	case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2160 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
2161 	case I915_CONTEXT_PARAM_BAN_PERIOD:
2162 	case I915_CONTEXT_PARAM_RINGSIZE:
2163 	case I915_CONTEXT_PARAM_VM:
2164 	case I915_CONTEXT_PARAM_ENGINES:
2165 	default:
2166 		ret = -EINVAL;
2167 		break;
2168 	}
2169 
2170 	return ret;
2171 }
2172 
2173 struct create_ext {
2174 	struct i915_gem_proto_context *pc;
2175 	struct drm_i915_file_private *fpriv;
2176 };
2177 
2178 static int create_setparam(struct i915_user_extension __user *ext, void *data)
2179 {
2180 	struct drm_i915_gem_context_create_ext_setparam local;
2181 	const struct create_ext *arg = data;
2182 
2183 	if (copy_from_user(&local, ext, sizeof(local)))
2184 		return -EFAULT;
2185 
2186 	if (local.param.ctx_id)
2187 		return -EINVAL;
2188 
2189 	return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param);
2190 }
2191 
2192 static int invalid_ext(struct i915_user_extension __user *ext, void *data)
2193 {
2194 	return -EINVAL;
2195 }
2196 
2197 static const i915_user_extension_fn create_extensions[] = {
2198 	[I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2199 	[I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
2200 };
2201 
2202 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2203 {
2204 	return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2205 }
2206 
2207 static inline struct i915_gem_context *
2208 __context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2209 {
2210 	struct i915_gem_context *ctx;
2211 
2212 	rcu_read_lock();
2213 	ctx = xa_load(&file_priv->context_xa, id);
2214 	if (ctx && !kref_get_unless_zero(&ctx->ref))
2215 		ctx = NULL;
2216 	rcu_read_unlock();
2217 
2218 	return ctx;
2219 }
2220 
2221 static struct i915_gem_context *
2222 finalize_create_context_locked(struct drm_i915_file_private *file_priv,
2223 			       struct i915_gem_proto_context *pc, u32 id)
2224 {
2225 	struct i915_gem_context *ctx;
2226 	void *old;
2227 
2228 	lockdep_assert_held(&file_priv->proto_context_lock);
2229 
2230 	ctx = i915_gem_create_context(file_priv->i915, pc);
2231 	if (IS_ERR(ctx))
2232 		return ctx;
2233 
2234 	/*
2235 	 * One for the xarray and one for the caller.  We need to grab
2236 	 * the reference *prior* to making the ctx visble to userspace
2237 	 * in gem_context_register(), as at any point after that
2238 	 * userspace can try to race us with another thread destroying
2239 	 * the context under our feet.
2240 	 */
2241 	i915_gem_context_get(ctx);
2242 
2243 	gem_context_register(ctx, file_priv, id);
2244 
2245 	old = xa_erase(&file_priv->proto_context_xa, id);
2246 	GEM_BUG_ON(old != pc);
2247 	proto_context_close(file_priv->i915, pc);
2248 
2249 	return ctx;
2250 }
2251 
2252 struct i915_gem_context *
2253 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2254 {
2255 	struct i915_gem_proto_context *pc;
2256 	struct i915_gem_context *ctx;
2257 
2258 	ctx = __context_lookup(file_priv, id);
2259 	if (ctx)
2260 		return ctx;
2261 
2262 	mutex_lock(&file_priv->proto_context_lock);
2263 	/* Try one more time under the lock */
2264 	ctx = __context_lookup(file_priv, id);
2265 	if (!ctx) {
2266 		pc = xa_load(&file_priv->proto_context_xa, id);
2267 		if (!pc)
2268 			ctx = ERR_PTR(-ENOENT);
2269 		else
2270 			ctx = finalize_create_context_locked(file_priv, pc, id);
2271 	}
2272 	mutex_unlock(&file_priv->proto_context_lock);
2273 
2274 	return ctx;
2275 }
2276 
2277 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2278 				  struct drm_file *file)
2279 {
2280 	struct drm_i915_private *i915 = to_i915(dev);
2281 	struct drm_i915_gem_context_create_ext *args = data;
2282 	struct create_ext ext_data;
2283 	int ret;
2284 	u32 id;
2285 
2286 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
2287 		return -ENODEV;
2288 
2289 	if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2290 		return -EINVAL;
2291 
2292 	ret = intel_gt_terminally_wedged(to_gt(i915));
2293 	if (ret)
2294 		return ret;
2295 
2296 	ext_data.fpriv = file->driver_priv;
2297 	if (client_is_banned(ext_data.fpriv)) {
2298 		drm_dbg(&i915->drm,
2299 			"client %s[%d] banned from creating ctx\n",
2300 			current->comm, task_pid_nr(current));
2301 		return -EIO;
2302 	}
2303 
2304 	ext_data.pc = proto_context_create(file->driver_priv, i915,
2305 					   args->flags);
2306 	if (IS_ERR(ext_data.pc))
2307 		return PTR_ERR(ext_data.pc);
2308 
2309 	if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2310 		ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2311 					   create_extensions,
2312 					   ARRAY_SIZE(create_extensions),
2313 					   &ext_data);
2314 		if (ret)
2315 			goto err_pc;
2316 	}
2317 
2318 	if (GRAPHICS_VER(i915) > 12) {
2319 		struct i915_gem_context *ctx;
2320 
2321 		/* Get ourselves a context ID */
2322 		ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL,
2323 			       xa_limit_32b, GFP_KERNEL);
2324 		if (ret)
2325 			goto err_pc;
2326 
2327 		ctx = i915_gem_create_context(i915, ext_data.pc);
2328 		if (IS_ERR(ctx)) {
2329 			ret = PTR_ERR(ctx);
2330 			goto err_pc;
2331 		}
2332 
2333 		proto_context_close(i915, ext_data.pc);
2334 		gem_context_register(ctx, ext_data.fpriv, id);
2335 	} else {
2336 		ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id);
2337 		if (ret < 0)
2338 			goto err_pc;
2339 	}
2340 
2341 	args->ctx_id = id;
2342 
2343 	return 0;
2344 
2345 err_pc:
2346 	proto_context_close(i915, ext_data.pc);
2347 	return ret;
2348 }
2349 
2350 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2351 				   struct drm_file *file)
2352 {
2353 	struct drm_i915_gem_context_destroy *args = data;
2354 	struct drm_i915_file_private *file_priv = file->driver_priv;
2355 	struct i915_gem_proto_context *pc;
2356 	struct i915_gem_context *ctx;
2357 
2358 	if (args->pad != 0)
2359 		return -EINVAL;
2360 
2361 	if (!args->ctx_id)
2362 		return -ENOENT;
2363 
2364 	/* We need to hold the proto-context lock here to prevent races
2365 	 * with finalize_create_context_locked().
2366 	 */
2367 	mutex_lock(&file_priv->proto_context_lock);
2368 	ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2369 	pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id);
2370 	mutex_unlock(&file_priv->proto_context_lock);
2371 
2372 	if (!ctx && !pc)
2373 		return -ENOENT;
2374 	GEM_WARN_ON(ctx && pc);
2375 
2376 	if (pc)
2377 		proto_context_close(file_priv->i915, pc);
2378 
2379 	if (ctx)
2380 		context_close(ctx);
2381 
2382 	return 0;
2383 }
2384 
2385 static int get_sseu(struct i915_gem_context *ctx,
2386 		    struct drm_i915_gem_context_param *args)
2387 {
2388 	struct drm_i915_gem_context_param_sseu user_sseu;
2389 	struct intel_context *ce;
2390 	unsigned long lookup;
2391 	int err;
2392 
2393 	if (args->size == 0)
2394 		goto out;
2395 	else if (args->size < sizeof(user_sseu))
2396 		return -EINVAL;
2397 
2398 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2399 			   sizeof(user_sseu)))
2400 		return -EFAULT;
2401 
2402 	if (user_sseu.rsvd)
2403 		return -EINVAL;
2404 
2405 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2406 		return -EINVAL;
2407 
2408 	lookup = 0;
2409 	if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2410 		lookup |= LOOKUP_USER_INDEX;
2411 
2412 	ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2413 	if (IS_ERR(ce))
2414 		return PTR_ERR(ce);
2415 
2416 	err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2417 	if (err) {
2418 		intel_context_put(ce);
2419 		return err;
2420 	}
2421 
2422 	user_sseu.slice_mask = ce->sseu.slice_mask;
2423 	user_sseu.subslice_mask = ce->sseu.subslice_mask;
2424 	user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2425 	user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2426 
2427 	intel_context_unlock_pinned(ce);
2428 	intel_context_put(ce);
2429 
2430 	if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2431 			 sizeof(user_sseu)))
2432 		return -EFAULT;
2433 
2434 out:
2435 	args->size = sizeof(user_sseu);
2436 
2437 	return 0;
2438 }
2439 
2440 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2441 				    struct drm_file *file)
2442 {
2443 	struct drm_i915_file_private *file_priv = file->driver_priv;
2444 	struct drm_i915_gem_context_param *args = data;
2445 	struct i915_gem_context *ctx;
2446 	struct i915_address_space *vm;
2447 	int ret = 0;
2448 
2449 	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2450 	if (IS_ERR(ctx))
2451 		return PTR_ERR(ctx);
2452 
2453 	switch (args->param) {
2454 	case I915_CONTEXT_PARAM_GTT_SIZE:
2455 		args->size = 0;
2456 		vm = i915_gem_context_get_eb_vm(ctx);
2457 		args->value = vm->total;
2458 		i915_vm_put(vm);
2459 
2460 		break;
2461 
2462 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2463 		args->size = 0;
2464 		args->value = i915_gem_context_no_error_capture(ctx);
2465 		break;
2466 
2467 	case I915_CONTEXT_PARAM_BANNABLE:
2468 		args->size = 0;
2469 		args->value = i915_gem_context_is_bannable(ctx);
2470 		break;
2471 
2472 	case I915_CONTEXT_PARAM_RECOVERABLE:
2473 		args->size = 0;
2474 		args->value = i915_gem_context_is_recoverable(ctx);
2475 		break;
2476 
2477 	case I915_CONTEXT_PARAM_PRIORITY:
2478 		args->size = 0;
2479 		args->value = ctx->sched.priority;
2480 		break;
2481 
2482 	case I915_CONTEXT_PARAM_SSEU:
2483 		ret = get_sseu(ctx, args);
2484 		break;
2485 
2486 	case I915_CONTEXT_PARAM_VM:
2487 		ret = get_ppgtt(file_priv, ctx, args);
2488 		break;
2489 
2490 	case I915_CONTEXT_PARAM_PERSISTENCE:
2491 		args->size = 0;
2492 		args->value = i915_gem_context_is_persistent(ctx);
2493 		break;
2494 
2495 	case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2496 		ret = get_protected(ctx, args);
2497 		break;
2498 
2499 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
2500 	case I915_CONTEXT_PARAM_BAN_PERIOD:
2501 	case I915_CONTEXT_PARAM_ENGINES:
2502 	case I915_CONTEXT_PARAM_RINGSIZE:
2503 	default:
2504 		ret = -EINVAL;
2505 		break;
2506 	}
2507 
2508 	i915_gem_context_put(ctx);
2509 	return ret;
2510 }
2511 
2512 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2513 				    struct drm_file *file)
2514 {
2515 	struct drm_i915_file_private *file_priv = file->driver_priv;
2516 	struct drm_i915_gem_context_param *args = data;
2517 	struct i915_gem_proto_context *pc;
2518 	struct i915_gem_context *ctx;
2519 	int ret = 0;
2520 
2521 	mutex_lock(&file_priv->proto_context_lock);
2522 	ctx = __context_lookup(file_priv, args->ctx_id);
2523 	if (!ctx) {
2524 		pc = xa_load(&file_priv->proto_context_xa, args->ctx_id);
2525 		if (pc) {
2526 			/* Contexts should be finalized inside
2527 			 * GEM_CONTEXT_CREATE starting with graphics
2528 			 * version 13.
2529 			 */
2530 			WARN_ON(GRAPHICS_VER(file_priv->i915) > 12);
2531 			ret = set_proto_ctx_param(file_priv, pc, args);
2532 		} else {
2533 			ret = -ENOENT;
2534 		}
2535 	}
2536 	mutex_unlock(&file_priv->proto_context_lock);
2537 
2538 	if (ctx) {
2539 		ret = ctx_setparam(file_priv, ctx, args);
2540 		i915_gem_context_put(ctx);
2541 	}
2542 
2543 	return ret;
2544 }
2545 
2546 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2547 				       void *data, struct drm_file *file)
2548 {
2549 	struct drm_i915_private *i915 = to_i915(dev);
2550 	struct drm_i915_reset_stats *args = data;
2551 	struct i915_gem_context *ctx;
2552 
2553 	if (args->flags || args->pad)
2554 		return -EINVAL;
2555 
2556 	ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
2557 	if (IS_ERR(ctx))
2558 		return PTR_ERR(ctx);
2559 
2560 	/*
2561 	 * We opt for unserialised reads here. This may result in tearing
2562 	 * in the extremely unlikely event of a GPU hang on this context
2563 	 * as we are querying them. If we need that extra layer of protection,
2564 	 * we should wrap the hangstats with a seqlock.
2565 	 */
2566 
2567 	if (capable(CAP_SYS_ADMIN))
2568 		args->reset_count = i915_reset_count(&i915->gpu_error);
2569 	else
2570 		args->reset_count = 0;
2571 
2572 	args->batch_active = atomic_read(&ctx->guilty_count);
2573 	args->batch_pending = atomic_read(&ctx->active_count);
2574 
2575 	i915_gem_context_put(ctx);
2576 	return 0;
2577 }
2578 
2579 /* GEM context-engines iterator: for_each_gem_engine() */
2580 struct intel_context *
2581 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2582 {
2583 	const struct i915_gem_engines *e = it->engines;
2584 	struct intel_context *ctx;
2585 
2586 	if (unlikely(!e))
2587 		return NULL;
2588 
2589 	do {
2590 		if (it->idx >= e->num_engines)
2591 			return NULL;
2592 
2593 		ctx = e->engines[it->idx++];
2594 	} while (!ctx);
2595 
2596 	return ctx;
2597 }
2598 
2599 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2600 #include "selftests/mock_context.c"
2601 #include "selftests/i915_gem_context.c"
2602 #endif
2603 
2604 void i915_gem_context_module_exit(void)
2605 {
2606 	kmem_cache_destroy(slab_luts);
2607 }
2608 
2609 int __init i915_gem_context_module_init(void)
2610 {
2611 	slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2612 	if (!slab_luts)
2613 		return -ENOMEM;
2614 
2615 	return 0;
2616 }
2617