xref: /linux/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c (revision 95298d63c67673c654c08952672d016212b26054)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_engine_pm.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_requests.h"
13 #include "gt/intel_reset.h"
14 #include "i915_selftest.h"
15 
16 #include "gem/selftests/igt_gem_utils.h"
17 #include "selftests/i915_random.h"
18 #include "selftests/igt_flush_test.h"
19 #include "selftests/igt_live_test.h"
20 #include "selftests/igt_reset.h"
21 #include "selftests/igt_spinner.h"
22 #include "selftests/mock_drm.h"
23 #include "selftests/mock_gem_device.h"
24 
25 #include "huge_gem_object.h"
26 #include "igt_gem_utils.h"
27 
28 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
29 
30 static inline struct i915_address_space *ctx_vm(struct i915_gem_context *ctx)
31 {
32 	/* single threaded, private ctx */
33 	return rcu_dereference_protected(ctx->vm, true);
34 }
35 
36 static int live_nop_switch(void *arg)
37 {
38 	const unsigned int nctx = 1024;
39 	struct drm_i915_private *i915 = arg;
40 	struct intel_engine_cs *engine;
41 	struct i915_gem_context **ctx;
42 	struct igt_live_test t;
43 	struct file *file;
44 	unsigned long n;
45 	int err = -ENODEV;
46 
47 	/*
48 	 * Create as many contexts as we can feasibly get away with
49 	 * and check we can switch between them rapidly.
50 	 *
51 	 * Serves as very simple stress test for submission and HW switching
52 	 * between contexts.
53 	 */
54 
55 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
56 		return 0;
57 
58 	file = mock_file(i915);
59 	if (IS_ERR(file))
60 		return PTR_ERR(file);
61 
62 	ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
63 	if (!ctx) {
64 		err = -ENOMEM;
65 		goto out_file;
66 	}
67 
68 	for (n = 0; n < nctx; n++) {
69 		ctx[n] = live_context(i915, file);
70 		if (IS_ERR(ctx[n])) {
71 			err = PTR_ERR(ctx[n]);
72 			goto out_file;
73 		}
74 	}
75 
76 	for_each_uabi_engine(engine, i915) {
77 		struct i915_request *rq = NULL;
78 		unsigned long end_time, prime;
79 		ktime_t times[2] = {};
80 
81 		times[0] = ktime_get_raw();
82 		for (n = 0; n < nctx; n++) {
83 			struct i915_request *this;
84 
85 			this = igt_request_alloc(ctx[n], engine);
86 			if (IS_ERR(this)) {
87 				err = PTR_ERR(this);
88 				goto out_file;
89 			}
90 			if (rq) {
91 				i915_request_await_dma_fence(this, &rq->fence);
92 				i915_request_put(rq);
93 			}
94 			rq = i915_request_get(this);
95 			i915_request_add(this);
96 		}
97 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
98 			pr_err("Failed to populated %d contexts\n", nctx);
99 			intel_gt_set_wedged(&i915->gt);
100 			i915_request_put(rq);
101 			err = -EIO;
102 			goto out_file;
103 		}
104 		i915_request_put(rq);
105 
106 		times[1] = ktime_get_raw();
107 
108 		pr_info("Populated %d contexts on %s in %lluns\n",
109 			nctx, engine->name, ktime_to_ns(times[1] - times[0]));
110 
111 		err = igt_live_test_begin(&t, i915, __func__, engine->name);
112 		if (err)
113 			goto out_file;
114 
115 		end_time = jiffies + i915_selftest.timeout_jiffies;
116 		for_each_prime_number_from(prime, 2, 8192) {
117 			times[1] = ktime_get_raw();
118 
119 			rq = NULL;
120 			for (n = 0; n < prime; n++) {
121 				struct i915_request *this;
122 
123 				this = igt_request_alloc(ctx[n % nctx], engine);
124 				if (IS_ERR(this)) {
125 					err = PTR_ERR(this);
126 					goto out_file;
127 				}
128 
129 				if (rq) { /* Force submission order */
130 					i915_request_await_dma_fence(this, &rq->fence);
131 					i915_request_put(rq);
132 				}
133 
134 				/*
135 				 * This space is left intentionally blank.
136 				 *
137 				 * We do not actually want to perform any
138 				 * action with this request, we just want
139 				 * to measure the latency in allocation
140 				 * and submission of our breadcrumbs -
141 				 * ensuring that the bare request is sufficient
142 				 * for the system to work (i.e. proper HEAD
143 				 * tracking of the rings, interrupt handling,
144 				 * etc). It also gives us the lowest bounds
145 				 * for latency.
146 				 */
147 
148 				rq = i915_request_get(this);
149 				i915_request_add(this);
150 			}
151 			GEM_BUG_ON(!rq);
152 			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
153 				pr_err("Switching between %ld contexts timed out\n",
154 				       prime);
155 				intel_gt_set_wedged(&i915->gt);
156 				i915_request_put(rq);
157 				break;
158 			}
159 			i915_request_put(rq);
160 
161 			times[1] = ktime_sub(ktime_get_raw(), times[1]);
162 			if (prime == 2)
163 				times[0] = times[1];
164 
165 			if (__igt_timeout(end_time, NULL))
166 				break;
167 		}
168 
169 		err = igt_live_test_end(&t);
170 		if (err)
171 			goto out_file;
172 
173 		pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
174 			engine->name,
175 			ktime_to_ns(times[0]),
176 			prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
177 	}
178 
179 out_file:
180 	fput(file);
181 	return err;
182 }
183 
184 struct parallel_switch {
185 	struct task_struct *tsk;
186 	struct intel_context *ce[2];
187 };
188 
189 static int __live_parallel_switch1(void *data)
190 {
191 	struct parallel_switch *arg = data;
192 	IGT_TIMEOUT(end_time);
193 	unsigned long count;
194 
195 	count = 0;
196 	do {
197 		struct i915_request *rq = NULL;
198 		int err, n;
199 
200 		err = 0;
201 		for (n = 0; !err && n < ARRAY_SIZE(arg->ce); n++) {
202 			struct i915_request *prev = rq;
203 
204 			rq = i915_request_create(arg->ce[n]);
205 			if (IS_ERR(rq)) {
206 				i915_request_put(prev);
207 				return PTR_ERR(rq);
208 			}
209 
210 			i915_request_get(rq);
211 			if (prev) {
212 				err = i915_request_await_dma_fence(rq, &prev->fence);
213 				i915_request_put(prev);
214 			}
215 
216 			i915_request_add(rq);
217 		}
218 		if (i915_request_wait(rq, 0, HZ / 5) < 0)
219 			err = -ETIME;
220 		i915_request_put(rq);
221 		if (err)
222 			return err;
223 
224 		count++;
225 	} while (!__igt_timeout(end_time, NULL));
226 
227 	pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
228 	return 0;
229 }
230 
231 static int __live_parallel_switchN(void *data)
232 {
233 	struct parallel_switch *arg = data;
234 	struct i915_request *rq = NULL;
235 	IGT_TIMEOUT(end_time);
236 	unsigned long count;
237 	int n;
238 
239 	count = 0;
240 	do {
241 		for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
242 			struct i915_request *prev = rq;
243 			int err = 0;
244 
245 			rq = i915_request_create(arg->ce[n]);
246 			if (IS_ERR(rq)) {
247 				i915_request_put(prev);
248 				return PTR_ERR(rq);
249 			}
250 
251 			i915_request_get(rq);
252 			if (prev) {
253 				err = i915_request_await_dma_fence(rq, &prev->fence);
254 				i915_request_put(prev);
255 			}
256 
257 			i915_request_add(rq);
258 			if (err) {
259 				i915_request_put(rq);
260 				return err;
261 			}
262 		}
263 
264 		count++;
265 	} while (!__igt_timeout(end_time, NULL));
266 	i915_request_put(rq);
267 
268 	pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
269 	return 0;
270 }
271 
272 static int live_parallel_switch(void *arg)
273 {
274 	struct drm_i915_private *i915 = arg;
275 	static int (* const func[])(void *arg) = {
276 		__live_parallel_switch1,
277 		__live_parallel_switchN,
278 		NULL,
279 	};
280 	struct parallel_switch *data = NULL;
281 	struct i915_gem_engines *engines;
282 	struct i915_gem_engines_iter it;
283 	int (* const *fn)(void *arg);
284 	struct i915_gem_context *ctx;
285 	struct intel_context *ce;
286 	struct file *file;
287 	int n, m, count;
288 	int err = 0;
289 
290 	/*
291 	 * Check we can process switches on all engines simultaneously.
292 	 */
293 
294 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
295 		return 0;
296 
297 	file = mock_file(i915);
298 	if (IS_ERR(file))
299 		return PTR_ERR(file);
300 
301 	ctx = live_context(i915, file);
302 	if (IS_ERR(ctx)) {
303 		err = PTR_ERR(ctx);
304 		goto out_file;
305 	}
306 
307 	engines = i915_gem_context_lock_engines(ctx);
308 	count = engines->num_engines;
309 
310 	data = kcalloc(count, sizeof(*data), GFP_KERNEL);
311 	if (!data) {
312 		i915_gem_context_unlock_engines(ctx);
313 		err = -ENOMEM;
314 		goto out_file;
315 	}
316 
317 	m = 0; /* Use the first context as our template for the engines */
318 	for_each_gem_engine(ce, engines, it) {
319 		err = intel_context_pin(ce);
320 		if (err) {
321 			i915_gem_context_unlock_engines(ctx);
322 			goto out;
323 		}
324 		data[m++].ce[0] = intel_context_get(ce);
325 	}
326 	i915_gem_context_unlock_engines(ctx);
327 
328 	/* Clone the same set of engines into the other contexts */
329 	for (n = 1; n < ARRAY_SIZE(data->ce); n++) {
330 		ctx = live_context(i915, file);
331 		if (IS_ERR(ctx)) {
332 			err = PTR_ERR(ctx);
333 			goto out;
334 		}
335 
336 		for (m = 0; m < count; m++) {
337 			if (!data[m].ce[0])
338 				continue;
339 
340 			ce = intel_context_create(data[m].ce[0]->engine);
341 			if (IS_ERR(ce))
342 				goto out;
343 
344 			err = intel_context_pin(ce);
345 			if (err) {
346 				intel_context_put(ce);
347 				goto out;
348 			}
349 
350 			data[m].ce[n] = ce;
351 		}
352 	}
353 
354 	for (fn = func; !err && *fn; fn++) {
355 		struct igt_live_test t;
356 		int n;
357 
358 		err = igt_live_test_begin(&t, i915, __func__, "");
359 		if (err)
360 			break;
361 
362 		for (n = 0; n < count; n++) {
363 			if (!data[n].ce[0])
364 				continue;
365 
366 			data[n].tsk = kthread_run(*fn, &data[n],
367 						  "igt/parallel:%s",
368 						  data[n].ce[0]->engine->name);
369 			if (IS_ERR(data[n].tsk)) {
370 				err = PTR_ERR(data[n].tsk);
371 				break;
372 			}
373 			get_task_struct(data[n].tsk);
374 		}
375 
376 		yield(); /* start all threads before we kthread_stop() */
377 
378 		for (n = 0; n < count; n++) {
379 			int status;
380 
381 			if (IS_ERR_OR_NULL(data[n].tsk))
382 				continue;
383 
384 			status = kthread_stop(data[n].tsk);
385 			if (status && !err)
386 				err = status;
387 
388 			put_task_struct(data[n].tsk);
389 			data[n].tsk = NULL;
390 		}
391 
392 		if (igt_live_test_end(&t))
393 			err = -EIO;
394 	}
395 
396 out:
397 	for (n = 0; n < count; n++) {
398 		for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
399 			if (!data[n].ce[m])
400 				continue;
401 
402 			intel_context_unpin(data[n].ce[m]);
403 			intel_context_put(data[n].ce[m]);
404 		}
405 	}
406 	kfree(data);
407 out_file:
408 	fput(file);
409 	return err;
410 }
411 
412 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
413 {
414 	return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
415 }
416 
417 static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
418 {
419 	return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
420 }
421 
422 static int gpu_fill(struct intel_context *ce,
423 		    struct drm_i915_gem_object *obj,
424 		    unsigned int dw)
425 {
426 	struct i915_vma *vma;
427 	int err;
428 
429 	GEM_BUG_ON(obj->base.size > ce->vm->total);
430 	GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
431 
432 	vma = i915_vma_instance(obj, ce->vm, NULL);
433 	if (IS_ERR(vma))
434 		return PTR_ERR(vma);
435 
436 	err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
437 	if (err)
438 		return err;
439 
440 	/*
441 	 * Within the GTT the huge objects maps every page onto
442 	 * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
443 	 * We set the nth dword within the page using the nth
444 	 * mapping via the GTT - this should exercise the GTT mapping
445 	 * whilst checking that each context provides a unique view
446 	 * into the object.
447 	 */
448 	err = igt_gpu_fill_dw(ce, vma,
449 			      (dw * real_page_count(obj)) << PAGE_SHIFT |
450 			      (dw * sizeof(u32)),
451 			      real_page_count(obj),
452 			      dw);
453 	i915_vma_unpin(vma);
454 
455 	return err;
456 }
457 
458 static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
459 {
460 	const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
461 	unsigned int n, m, need_flush;
462 	int err;
463 
464 	err = i915_gem_object_prepare_write(obj, &need_flush);
465 	if (err)
466 		return err;
467 
468 	for (n = 0; n < real_page_count(obj); n++) {
469 		u32 *map;
470 
471 		map = kmap_atomic(i915_gem_object_get_page(obj, n));
472 		for (m = 0; m < DW_PER_PAGE; m++)
473 			map[m] = value;
474 		if (!has_llc)
475 			drm_clflush_virt_range(map, PAGE_SIZE);
476 		kunmap_atomic(map);
477 	}
478 
479 	i915_gem_object_finish_access(obj);
480 	obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
481 	obj->write_domain = 0;
482 	return 0;
483 }
484 
485 static noinline int cpu_check(struct drm_i915_gem_object *obj,
486 			      unsigned int idx, unsigned int max)
487 {
488 	unsigned int n, m, needs_flush;
489 	int err;
490 
491 	err = i915_gem_object_prepare_read(obj, &needs_flush);
492 	if (err)
493 		return err;
494 
495 	for (n = 0; n < real_page_count(obj); n++) {
496 		u32 *map;
497 
498 		map = kmap_atomic(i915_gem_object_get_page(obj, n));
499 		if (needs_flush & CLFLUSH_BEFORE)
500 			drm_clflush_virt_range(map, PAGE_SIZE);
501 
502 		for (m = 0; m < max; m++) {
503 			if (map[m] != m) {
504 				pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
505 				       __builtin_return_address(0), idx,
506 				       n, real_page_count(obj), m, max,
507 				       map[m], m);
508 				err = -EINVAL;
509 				goto out_unmap;
510 			}
511 		}
512 
513 		for (; m < DW_PER_PAGE; m++) {
514 			if (map[m] != STACK_MAGIC) {
515 				pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
516 				       __builtin_return_address(0), idx, n, m,
517 				       map[m], STACK_MAGIC);
518 				err = -EINVAL;
519 				goto out_unmap;
520 			}
521 		}
522 
523 out_unmap:
524 		kunmap_atomic(map);
525 		if (err)
526 			break;
527 	}
528 
529 	i915_gem_object_finish_access(obj);
530 	return err;
531 }
532 
533 static int file_add_object(struct file *file, struct drm_i915_gem_object *obj)
534 {
535 	int err;
536 
537 	GEM_BUG_ON(obj->base.handle_count);
538 
539 	/* tie the object to the drm_file for easy reaping */
540 	err = idr_alloc(&to_drm_file(file)->object_idr,
541 			&obj->base, 1, 0, GFP_KERNEL);
542 	if (err < 0)
543 		return err;
544 
545 	i915_gem_object_get(obj);
546 	obj->base.handle_count++;
547 	return 0;
548 }
549 
550 static struct drm_i915_gem_object *
551 create_test_object(struct i915_address_space *vm,
552 		   struct file *file,
553 		   struct list_head *objects)
554 {
555 	struct drm_i915_gem_object *obj;
556 	u64 size;
557 	int err;
558 
559 	/* Keep in GEM's good graces */
560 	intel_gt_retire_requests(vm->gt);
561 
562 	size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
563 	size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
564 
565 	obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size);
566 	if (IS_ERR(obj))
567 		return obj;
568 
569 	err = file_add_object(file, obj);
570 	i915_gem_object_put(obj);
571 	if (err)
572 		return ERR_PTR(err);
573 
574 	err = cpu_fill(obj, STACK_MAGIC);
575 	if (err) {
576 		pr_err("Failed to fill object with cpu, err=%d\n",
577 		       err);
578 		return ERR_PTR(err);
579 	}
580 
581 	list_add_tail(&obj->st_link, objects);
582 	return obj;
583 }
584 
585 static unsigned long max_dwords(struct drm_i915_gem_object *obj)
586 {
587 	unsigned long npages = fake_page_count(obj);
588 
589 	GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
590 	return npages / DW_PER_PAGE;
591 }
592 
593 static void throttle_release(struct i915_request **q, int count)
594 {
595 	int i;
596 
597 	for (i = 0; i < count; i++) {
598 		if (IS_ERR_OR_NULL(q[i]))
599 			continue;
600 
601 		i915_request_put(fetch_and_zero(&q[i]));
602 	}
603 }
604 
605 static int throttle(struct intel_context *ce,
606 		    struct i915_request **q, int count)
607 {
608 	int i;
609 
610 	if (!IS_ERR_OR_NULL(q[0])) {
611 		if (i915_request_wait(q[0],
612 				      I915_WAIT_INTERRUPTIBLE,
613 				      MAX_SCHEDULE_TIMEOUT) < 0)
614 			return -EINTR;
615 
616 		i915_request_put(q[0]);
617 	}
618 
619 	for (i = 0; i < count - 1; i++)
620 		q[i] = q[i + 1];
621 
622 	q[i] = intel_context_create_request(ce);
623 	if (IS_ERR(q[i]))
624 		return PTR_ERR(q[i]);
625 
626 	i915_request_get(q[i]);
627 	i915_request_add(q[i]);
628 
629 	return 0;
630 }
631 
632 static int igt_ctx_exec(void *arg)
633 {
634 	struct drm_i915_private *i915 = arg;
635 	struct intel_engine_cs *engine;
636 	int err = -ENODEV;
637 
638 	/*
639 	 * Create a few different contexts (with different mm) and write
640 	 * through each ctx/mm using the GPU making sure those writes end
641 	 * up in the expected pages of our obj.
642 	 */
643 
644 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
645 		return 0;
646 
647 	for_each_uabi_engine(engine, i915) {
648 		struct drm_i915_gem_object *obj = NULL;
649 		unsigned long ncontexts, ndwords, dw;
650 		struct i915_request *tq[5] = {};
651 		struct igt_live_test t;
652 		IGT_TIMEOUT(end_time);
653 		LIST_HEAD(objects);
654 		struct file *file;
655 
656 		if (!intel_engine_can_store_dword(engine))
657 			continue;
658 
659 		if (!engine->context_size)
660 			continue; /* No logical context support in HW */
661 
662 		file = mock_file(i915);
663 		if (IS_ERR(file))
664 			return PTR_ERR(file);
665 
666 		err = igt_live_test_begin(&t, i915, __func__, engine->name);
667 		if (err)
668 			goto out_file;
669 
670 		ncontexts = 0;
671 		ndwords = 0;
672 		dw = 0;
673 		while (!time_after(jiffies, end_time)) {
674 			struct i915_gem_context *ctx;
675 			struct intel_context *ce;
676 
677 			ctx = kernel_context(i915);
678 			if (IS_ERR(ctx)) {
679 				err = PTR_ERR(ctx);
680 				goto out_file;
681 			}
682 
683 			ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
684 			GEM_BUG_ON(IS_ERR(ce));
685 
686 			if (!obj) {
687 				obj = create_test_object(ce->vm, file, &objects);
688 				if (IS_ERR(obj)) {
689 					err = PTR_ERR(obj);
690 					intel_context_put(ce);
691 					kernel_context_close(ctx);
692 					goto out_file;
693 				}
694 			}
695 
696 			err = gpu_fill(ce, obj, dw);
697 			if (err) {
698 				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
699 				       ndwords, dw, max_dwords(obj),
700 				       engine->name,
701 				       yesno(!!rcu_access_pointer(ctx->vm)),
702 				       err);
703 				intel_context_put(ce);
704 				kernel_context_close(ctx);
705 				goto out_file;
706 			}
707 
708 			err = throttle(ce, tq, ARRAY_SIZE(tq));
709 			if (err) {
710 				intel_context_put(ce);
711 				kernel_context_close(ctx);
712 				goto out_file;
713 			}
714 
715 			if (++dw == max_dwords(obj)) {
716 				obj = NULL;
717 				dw = 0;
718 			}
719 
720 			ndwords++;
721 			ncontexts++;
722 
723 			intel_context_put(ce);
724 			kernel_context_close(ctx);
725 		}
726 
727 		pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
728 			ncontexts, engine->name, ndwords);
729 
730 		ncontexts = dw = 0;
731 		list_for_each_entry(obj, &objects, st_link) {
732 			unsigned int rem =
733 				min_t(unsigned int, ndwords - dw, max_dwords(obj));
734 
735 			err = cpu_check(obj, ncontexts++, rem);
736 			if (err)
737 				break;
738 
739 			dw += rem;
740 		}
741 
742 out_file:
743 		throttle_release(tq, ARRAY_SIZE(tq));
744 		if (igt_live_test_end(&t))
745 			err = -EIO;
746 
747 		fput(file);
748 		if (err)
749 			return err;
750 
751 		i915_gem_drain_freed_objects(i915);
752 	}
753 
754 	return 0;
755 }
756 
757 static int igt_shared_ctx_exec(void *arg)
758 {
759 	struct drm_i915_private *i915 = arg;
760 	struct i915_request *tq[5] = {};
761 	struct i915_gem_context *parent;
762 	struct intel_engine_cs *engine;
763 	struct igt_live_test t;
764 	struct file *file;
765 	int err = 0;
766 
767 	/*
768 	 * Create a few different contexts with the same mm and write
769 	 * through each ctx using the GPU making sure those writes end
770 	 * up in the expected pages of our obj.
771 	 */
772 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
773 		return 0;
774 
775 	file = mock_file(i915);
776 	if (IS_ERR(file))
777 		return PTR_ERR(file);
778 
779 	parent = live_context(i915, file);
780 	if (IS_ERR(parent)) {
781 		err = PTR_ERR(parent);
782 		goto out_file;
783 	}
784 
785 	if (!parent->vm) { /* not full-ppgtt; nothing to share */
786 		err = 0;
787 		goto out_file;
788 	}
789 
790 	err = igt_live_test_begin(&t, i915, __func__, "");
791 	if (err)
792 		goto out_file;
793 
794 	for_each_uabi_engine(engine, i915) {
795 		unsigned long ncontexts, ndwords, dw;
796 		struct drm_i915_gem_object *obj = NULL;
797 		IGT_TIMEOUT(end_time);
798 		LIST_HEAD(objects);
799 
800 		if (!intel_engine_can_store_dword(engine))
801 			continue;
802 
803 		dw = 0;
804 		ndwords = 0;
805 		ncontexts = 0;
806 		while (!time_after(jiffies, end_time)) {
807 			struct i915_gem_context *ctx;
808 			struct intel_context *ce;
809 
810 			ctx = kernel_context(i915);
811 			if (IS_ERR(ctx)) {
812 				err = PTR_ERR(ctx);
813 				goto out_test;
814 			}
815 
816 			mutex_lock(&ctx->mutex);
817 			__assign_ppgtt(ctx, ctx_vm(parent));
818 			mutex_unlock(&ctx->mutex);
819 
820 			ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
821 			GEM_BUG_ON(IS_ERR(ce));
822 
823 			if (!obj) {
824 				obj = create_test_object(ctx_vm(parent),
825 							 file, &objects);
826 				if (IS_ERR(obj)) {
827 					err = PTR_ERR(obj);
828 					intel_context_put(ce);
829 					kernel_context_close(ctx);
830 					goto out_test;
831 				}
832 			}
833 
834 			err = gpu_fill(ce, obj, dw);
835 			if (err) {
836 				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
837 				       ndwords, dw, max_dwords(obj),
838 				       engine->name,
839 				       yesno(!!rcu_access_pointer(ctx->vm)),
840 				       err);
841 				intel_context_put(ce);
842 				kernel_context_close(ctx);
843 				goto out_test;
844 			}
845 
846 			err = throttle(ce, tq, ARRAY_SIZE(tq));
847 			if (err) {
848 				intel_context_put(ce);
849 				kernel_context_close(ctx);
850 				goto out_test;
851 			}
852 
853 			if (++dw == max_dwords(obj)) {
854 				obj = NULL;
855 				dw = 0;
856 			}
857 
858 			ndwords++;
859 			ncontexts++;
860 
861 			intel_context_put(ce);
862 			kernel_context_close(ctx);
863 		}
864 		pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
865 			ncontexts, engine->name, ndwords);
866 
867 		ncontexts = dw = 0;
868 		list_for_each_entry(obj, &objects, st_link) {
869 			unsigned int rem =
870 				min_t(unsigned int, ndwords - dw, max_dwords(obj));
871 
872 			err = cpu_check(obj, ncontexts++, rem);
873 			if (err)
874 				goto out_test;
875 
876 			dw += rem;
877 		}
878 
879 		i915_gem_drain_freed_objects(i915);
880 	}
881 out_test:
882 	throttle_release(tq, ARRAY_SIZE(tq));
883 	if (igt_live_test_end(&t))
884 		err = -EIO;
885 out_file:
886 	fput(file);
887 	return err;
888 }
889 
890 static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
891 {
892 	struct drm_i915_gem_object *obj;
893 	u32 *cmd;
894 	int err;
895 
896 	if (INTEL_GEN(vma->vm->i915) < 8)
897 		return ERR_PTR(-EINVAL);
898 
899 	obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
900 	if (IS_ERR(obj))
901 		return ERR_CAST(obj);
902 
903 	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
904 	if (IS_ERR(cmd)) {
905 		err = PTR_ERR(cmd);
906 		goto err;
907 	}
908 
909 	*cmd++ = MI_STORE_REGISTER_MEM_GEN8;
910 	*cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
911 	*cmd++ = lower_32_bits(vma->node.start);
912 	*cmd++ = upper_32_bits(vma->node.start);
913 	*cmd = MI_BATCH_BUFFER_END;
914 
915 	__i915_gem_object_flush_map(obj, 0, 64);
916 	i915_gem_object_unpin_map(obj);
917 
918 	intel_gt_chipset_flush(vma->vm->gt);
919 
920 	vma = i915_vma_instance(obj, vma->vm, NULL);
921 	if (IS_ERR(vma)) {
922 		err = PTR_ERR(vma);
923 		goto err;
924 	}
925 
926 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
927 	if (err)
928 		goto err;
929 
930 	return vma;
931 
932 err:
933 	i915_gem_object_put(obj);
934 	return ERR_PTR(err);
935 }
936 
937 static int
938 emit_rpcs_query(struct drm_i915_gem_object *obj,
939 		struct intel_context *ce,
940 		struct i915_request **rq_out)
941 {
942 	struct i915_request *rq;
943 	struct i915_vma *batch;
944 	struct i915_vma *vma;
945 	int err;
946 
947 	GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
948 
949 	vma = i915_vma_instance(obj, ce->vm, NULL);
950 	if (IS_ERR(vma))
951 		return PTR_ERR(vma);
952 
953 	i915_gem_object_lock(obj);
954 	err = i915_gem_object_set_to_gtt_domain(obj, false);
955 	i915_gem_object_unlock(obj);
956 	if (err)
957 		return err;
958 
959 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
960 	if (err)
961 		return err;
962 
963 	batch = rpcs_query_batch(vma);
964 	if (IS_ERR(batch)) {
965 		err = PTR_ERR(batch);
966 		goto err_vma;
967 	}
968 
969 	rq = i915_request_create(ce);
970 	if (IS_ERR(rq)) {
971 		err = PTR_ERR(rq);
972 		goto err_batch;
973 	}
974 
975 	i915_vma_lock(batch);
976 	err = i915_request_await_object(rq, batch->obj, false);
977 	if (err == 0)
978 		err = i915_vma_move_to_active(batch, rq, 0);
979 	i915_vma_unlock(batch);
980 	if (err)
981 		goto skip_request;
982 
983 	i915_vma_lock(vma);
984 	err = i915_request_await_object(rq, vma->obj, true);
985 	if (err == 0)
986 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
987 	i915_vma_unlock(vma);
988 	if (err)
989 		goto skip_request;
990 
991 	if (rq->engine->emit_init_breadcrumb) {
992 		err = rq->engine->emit_init_breadcrumb(rq);
993 		if (err)
994 			goto skip_request;
995 	}
996 
997 	err = rq->engine->emit_bb_start(rq,
998 					batch->node.start, batch->node.size,
999 					0);
1000 	if (err)
1001 		goto skip_request;
1002 
1003 	i915_vma_unpin_and_release(&batch, 0);
1004 	i915_vma_unpin(vma);
1005 
1006 	*rq_out = i915_request_get(rq);
1007 
1008 	i915_request_add(rq);
1009 
1010 	return 0;
1011 
1012 skip_request:
1013 	i915_request_set_error_once(rq, err);
1014 	i915_request_add(rq);
1015 err_batch:
1016 	i915_vma_unpin_and_release(&batch, 0);
1017 err_vma:
1018 	i915_vma_unpin(vma);
1019 
1020 	return err;
1021 }
1022 
1023 #define TEST_IDLE	BIT(0)
1024 #define TEST_BUSY	BIT(1)
1025 #define TEST_RESET	BIT(2)
1026 
1027 static int
1028 __sseu_prepare(const char *name,
1029 	       unsigned int flags,
1030 	       struct intel_context *ce,
1031 	       struct igt_spinner **spin)
1032 {
1033 	struct i915_request *rq;
1034 	int ret;
1035 
1036 	*spin = NULL;
1037 	if (!(flags & (TEST_BUSY | TEST_RESET)))
1038 		return 0;
1039 
1040 	*spin = kzalloc(sizeof(**spin), GFP_KERNEL);
1041 	if (!*spin)
1042 		return -ENOMEM;
1043 
1044 	ret = igt_spinner_init(*spin, ce->engine->gt);
1045 	if (ret)
1046 		goto err_free;
1047 
1048 	rq = igt_spinner_create_request(*spin, ce, MI_NOOP);
1049 	if (IS_ERR(rq)) {
1050 		ret = PTR_ERR(rq);
1051 		goto err_fini;
1052 	}
1053 
1054 	i915_request_add(rq);
1055 
1056 	if (!igt_wait_for_spinner(*spin, rq)) {
1057 		pr_err("%s: Spinner failed to start!\n", name);
1058 		ret = -ETIMEDOUT;
1059 		goto err_end;
1060 	}
1061 
1062 	return 0;
1063 
1064 err_end:
1065 	igt_spinner_end(*spin);
1066 err_fini:
1067 	igt_spinner_fini(*spin);
1068 err_free:
1069 	kfree(fetch_and_zero(spin));
1070 	return ret;
1071 }
1072 
1073 static int
1074 __read_slice_count(struct intel_context *ce,
1075 		   struct drm_i915_gem_object *obj,
1076 		   struct igt_spinner *spin,
1077 		   u32 *rpcs)
1078 {
1079 	struct i915_request *rq = NULL;
1080 	u32 s_mask, s_shift;
1081 	unsigned int cnt;
1082 	u32 *buf, val;
1083 	long ret;
1084 
1085 	ret = emit_rpcs_query(obj, ce, &rq);
1086 	if (ret)
1087 		return ret;
1088 
1089 	if (spin)
1090 		igt_spinner_end(spin);
1091 
1092 	ret = i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
1093 	i915_request_put(rq);
1094 	if (ret < 0)
1095 		return ret;
1096 
1097 	buf = i915_gem_object_pin_map(obj, I915_MAP_WB);
1098 	if (IS_ERR(buf)) {
1099 		ret = PTR_ERR(buf);
1100 		return ret;
1101 	}
1102 
1103 	if (INTEL_GEN(ce->engine->i915) >= 11) {
1104 		s_mask = GEN11_RPCS_S_CNT_MASK;
1105 		s_shift = GEN11_RPCS_S_CNT_SHIFT;
1106 	} else {
1107 		s_mask = GEN8_RPCS_S_CNT_MASK;
1108 		s_shift = GEN8_RPCS_S_CNT_SHIFT;
1109 	}
1110 
1111 	val = *buf;
1112 	cnt = (val & s_mask) >> s_shift;
1113 	*rpcs = val;
1114 
1115 	i915_gem_object_unpin_map(obj);
1116 
1117 	return cnt;
1118 }
1119 
1120 static int
1121 __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
1122 	     const char *prefix, const char *suffix)
1123 {
1124 	if (slices == expected)
1125 		return 0;
1126 
1127 	if (slices < 0) {
1128 		pr_err("%s: %s read slice count failed with %d%s\n",
1129 		       name, prefix, slices, suffix);
1130 		return slices;
1131 	}
1132 
1133 	pr_err("%s: %s slice count %d is not %u%s\n",
1134 	       name, prefix, slices, expected, suffix);
1135 
1136 	pr_info("RPCS=0x%x; %u%sx%u%s\n",
1137 		rpcs, slices,
1138 		(rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "",
1139 		(rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT,
1140 		(rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : "");
1141 
1142 	return -EINVAL;
1143 }
1144 
1145 static int
1146 __sseu_finish(const char *name,
1147 	      unsigned int flags,
1148 	      struct intel_context *ce,
1149 	      struct drm_i915_gem_object *obj,
1150 	      unsigned int expected,
1151 	      struct igt_spinner *spin)
1152 {
1153 	unsigned int slices = hweight32(ce->engine->sseu.slice_mask);
1154 	u32 rpcs = 0;
1155 	int ret = 0;
1156 
1157 	if (flags & TEST_RESET) {
1158 		ret = intel_engine_reset(ce->engine, "sseu");
1159 		if (ret)
1160 			goto out;
1161 	}
1162 
1163 	ret = __read_slice_count(ce, obj,
1164 				 flags & TEST_RESET ? NULL : spin, &rpcs);
1165 	ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
1166 	if (ret)
1167 		goto out;
1168 
1169 	ret = __read_slice_count(ce->engine->kernel_context, obj, NULL, &rpcs);
1170 	ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
1171 
1172 out:
1173 	if (spin)
1174 		igt_spinner_end(spin);
1175 
1176 	if ((flags & TEST_IDLE) && ret == 0) {
1177 		ret = igt_flush_test(ce->engine->i915);
1178 		if (ret)
1179 			return ret;
1180 
1181 		ret = __read_slice_count(ce, obj, NULL, &rpcs);
1182 		ret = __check_rpcs(name, rpcs, ret, expected,
1183 				   "Context", " after idle!");
1184 	}
1185 
1186 	return ret;
1187 }
1188 
1189 static int
1190 __sseu_test(const char *name,
1191 	    unsigned int flags,
1192 	    struct intel_context *ce,
1193 	    struct drm_i915_gem_object *obj,
1194 	    struct intel_sseu sseu)
1195 {
1196 	struct igt_spinner *spin = NULL;
1197 	int ret;
1198 
1199 	intel_engine_pm_get(ce->engine);
1200 
1201 	ret = __sseu_prepare(name, flags, ce, &spin);
1202 	if (ret)
1203 		goto out_pm;
1204 
1205 	ret = intel_context_reconfigure_sseu(ce, sseu);
1206 	if (ret)
1207 		goto out_spin;
1208 
1209 	ret = __sseu_finish(name, flags, ce, obj,
1210 			    hweight32(sseu.slice_mask), spin);
1211 
1212 out_spin:
1213 	if (spin) {
1214 		igt_spinner_end(spin);
1215 		igt_spinner_fini(spin);
1216 		kfree(spin);
1217 	}
1218 out_pm:
1219 	intel_engine_pm_put(ce->engine);
1220 	return ret;
1221 }
1222 
1223 static int
1224 __igt_ctx_sseu(struct drm_i915_private *i915,
1225 	       const char *name,
1226 	       unsigned int flags)
1227 {
1228 	struct drm_i915_gem_object *obj;
1229 	int inst = 0;
1230 	int ret = 0;
1231 
1232 	if (INTEL_GEN(i915) < 9 || !RUNTIME_INFO(i915)->sseu.has_slice_pg)
1233 		return 0;
1234 
1235 	if (flags & TEST_RESET)
1236 		igt_global_reset_lock(&i915->gt);
1237 
1238 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1239 	if (IS_ERR(obj)) {
1240 		ret = PTR_ERR(obj);
1241 		goto out_unlock;
1242 	}
1243 
1244 	do {
1245 		struct intel_engine_cs *engine;
1246 		struct intel_context *ce;
1247 		struct intel_sseu pg_sseu;
1248 
1249 		engine = intel_engine_lookup_user(i915,
1250 						  I915_ENGINE_CLASS_RENDER,
1251 						  inst++);
1252 		if (!engine)
1253 			break;
1254 
1255 		if (hweight32(engine->sseu.slice_mask) < 2)
1256 			continue;
1257 
1258 		/*
1259 		 * Gen11 VME friendly power-gated configuration with
1260 		 * half enabled sub-slices.
1261 		 */
1262 		pg_sseu = engine->sseu;
1263 		pg_sseu.slice_mask = 1;
1264 		pg_sseu.subslice_mask =
1265 			~(~0 << (hweight32(engine->sseu.subslice_mask) / 2));
1266 
1267 		pr_info("%s: SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
1268 			engine->name, name, flags,
1269 			hweight32(engine->sseu.slice_mask),
1270 			hweight32(pg_sseu.slice_mask));
1271 
1272 		ce = intel_context_create(engine);
1273 		if (IS_ERR(ce)) {
1274 			ret = PTR_ERR(ce);
1275 			goto out_put;
1276 		}
1277 
1278 		ret = intel_context_pin(ce);
1279 		if (ret)
1280 			goto out_ce;
1281 
1282 		/* First set the default mask. */
1283 		ret = __sseu_test(name, flags, ce, obj, engine->sseu);
1284 		if (ret)
1285 			goto out_unpin;
1286 
1287 		/* Then set a power-gated configuration. */
1288 		ret = __sseu_test(name, flags, ce, obj, pg_sseu);
1289 		if (ret)
1290 			goto out_unpin;
1291 
1292 		/* Back to defaults. */
1293 		ret = __sseu_test(name, flags, ce, obj, engine->sseu);
1294 		if (ret)
1295 			goto out_unpin;
1296 
1297 		/* One last power-gated configuration for the road. */
1298 		ret = __sseu_test(name, flags, ce, obj, pg_sseu);
1299 		if (ret)
1300 			goto out_unpin;
1301 
1302 out_unpin:
1303 		intel_context_unpin(ce);
1304 out_ce:
1305 		intel_context_put(ce);
1306 	} while (!ret);
1307 
1308 	if (igt_flush_test(i915))
1309 		ret = -EIO;
1310 
1311 out_put:
1312 	i915_gem_object_put(obj);
1313 
1314 out_unlock:
1315 	if (flags & TEST_RESET)
1316 		igt_global_reset_unlock(&i915->gt);
1317 
1318 	if (ret)
1319 		pr_err("%s: Failed with %d!\n", name, ret);
1320 
1321 	return ret;
1322 }
1323 
1324 static int igt_ctx_sseu(void *arg)
1325 {
1326 	struct {
1327 		const char *name;
1328 		unsigned int flags;
1329 	} *phase, phases[] = {
1330 		{ .name = "basic", .flags = 0 },
1331 		{ .name = "idle", .flags = TEST_IDLE },
1332 		{ .name = "busy", .flags = TEST_BUSY },
1333 		{ .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET },
1334 		{ .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE },
1335 		{ .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE },
1336 	};
1337 	unsigned int i;
1338 	int ret = 0;
1339 
1340 	for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases);
1341 	     i++, phase++)
1342 		ret = __igt_ctx_sseu(arg, phase->name, phase->flags);
1343 
1344 	return ret;
1345 }
1346 
1347 static int igt_ctx_readonly(void *arg)
1348 {
1349 	struct drm_i915_private *i915 = arg;
1350 	unsigned long idx, ndwords, dw, num_engines;
1351 	struct drm_i915_gem_object *obj = NULL;
1352 	struct i915_request *tq[5] = {};
1353 	struct i915_gem_engines_iter it;
1354 	struct i915_address_space *vm;
1355 	struct i915_gem_context *ctx;
1356 	struct intel_context *ce;
1357 	struct igt_live_test t;
1358 	I915_RND_STATE(prng);
1359 	IGT_TIMEOUT(end_time);
1360 	LIST_HEAD(objects);
1361 	struct file *file;
1362 	int err = -ENODEV;
1363 
1364 	/*
1365 	 * Create a few read-only objects (with the occasional writable object)
1366 	 * and try to write into these object checking that the GPU discards
1367 	 * any write to a read-only object.
1368 	 */
1369 
1370 	file = mock_file(i915);
1371 	if (IS_ERR(file))
1372 		return PTR_ERR(file);
1373 
1374 	err = igt_live_test_begin(&t, i915, __func__, "");
1375 	if (err)
1376 		goto out_file;
1377 
1378 	ctx = live_context(i915, file);
1379 	if (IS_ERR(ctx)) {
1380 		err = PTR_ERR(ctx);
1381 		goto out_file;
1382 	}
1383 
1384 	vm = ctx_vm(ctx) ?: &i915->ggtt.alias->vm;
1385 	if (!vm || !vm->has_read_only) {
1386 		err = 0;
1387 		goto out_file;
1388 	}
1389 
1390 	num_engines = 0;
1391 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
1392 		if (intel_engine_can_store_dword(ce->engine))
1393 			num_engines++;
1394 	i915_gem_context_unlock_engines(ctx);
1395 
1396 	ndwords = 0;
1397 	dw = 0;
1398 	while (!time_after(jiffies, end_time)) {
1399 		for_each_gem_engine(ce,
1400 				    i915_gem_context_lock_engines(ctx), it) {
1401 			if (!intel_engine_can_store_dword(ce->engine))
1402 				continue;
1403 
1404 			if (!obj) {
1405 				obj = create_test_object(ce->vm, file, &objects);
1406 				if (IS_ERR(obj)) {
1407 					err = PTR_ERR(obj);
1408 					i915_gem_context_unlock_engines(ctx);
1409 					goto out_file;
1410 				}
1411 
1412 				if (prandom_u32_state(&prng) & 1)
1413 					i915_gem_object_set_readonly(obj);
1414 			}
1415 
1416 			err = gpu_fill(ce, obj, dw);
1417 			if (err) {
1418 				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
1419 				       ndwords, dw, max_dwords(obj),
1420 				       ce->engine->name,
1421 				       yesno(!!ctx_vm(ctx)),
1422 				       err);
1423 				i915_gem_context_unlock_engines(ctx);
1424 				goto out_file;
1425 			}
1426 
1427 			err = throttle(ce, tq, ARRAY_SIZE(tq));
1428 			if (err) {
1429 				i915_gem_context_unlock_engines(ctx);
1430 				goto out_file;
1431 			}
1432 
1433 			if (++dw == max_dwords(obj)) {
1434 				obj = NULL;
1435 				dw = 0;
1436 			}
1437 			ndwords++;
1438 		}
1439 		i915_gem_context_unlock_engines(ctx);
1440 	}
1441 	pr_info("Submitted %lu dwords (across %lu engines)\n",
1442 		ndwords, num_engines);
1443 
1444 	dw = 0;
1445 	idx = 0;
1446 	list_for_each_entry(obj, &objects, st_link) {
1447 		unsigned int rem =
1448 			min_t(unsigned int, ndwords - dw, max_dwords(obj));
1449 		unsigned int num_writes;
1450 
1451 		num_writes = rem;
1452 		if (i915_gem_object_is_readonly(obj))
1453 			num_writes = 0;
1454 
1455 		err = cpu_check(obj, idx++, num_writes);
1456 		if (err)
1457 			break;
1458 
1459 		dw += rem;
1460 	}
1461 
1462 out_file:
1463 	throttle_release(tq, ARRAY_SIZE(tq));
1464 	if (igt_live_test_end(&t))
1465 		err = -EIO;
1466 
1467 	fput(file);
1468 	return err;
1469 }
1470 
1471 static int check_scratch(struct i915_address_space *vm, u64 offset)
1472 {
1473 	struct drm_mm_node *node;
1474 
1475 	mutex_lock(&vm->mutex);
1476 	node = __drm_mm_interval_first(&vm->mm,
1477 				       offset, offset + sizeof(u32) - 1);
1478 	mutex_unlock(&vm->mutex);
1479 	if (!node || node->start > offset)
1480 		return 0;
1481 
1482 	GEM_BUG_ON(offset >= node->start + node->size);
1483 
1484 	pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
1485 	       upper_32_bits(offset), lower_32_bits(offset));
1486 	return -EINVAL;
1487 }
1488 
1489 static int write_to_scratch(struct i915_gem_context *ctx,
1490 			    struct intel_engine_cs *engine,
1491 			    u64 offset, u32 value)
1492 {
1493 	struct drm_i915_private *i915 = ctx->i915;
1494 	struct drm_i915_gem_object *obj;
1495 	struct i915_address_space *vm;
1496 	struct i915_request *rq;
1497 	struct i915_vma *vma;
1498 	u32 *cmd;
1499 	int err;
1500 
1501 	GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1502 
1503 	err = check_scratch(ctx_vm(ctx), offset);
1504 	if (err)
1505 		return err;
1506 
1507 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1508 	if (IS_ERR(obj))
1509 		return PTR_ERR(obj);
1510 
1511 	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1512 	if (IS_ERR(cmd)) {
1513 		err = PTR_ERR(cmd);
1514 		goto out;
1515 	}
1516 
1517 	*cmd++ = MI_STORE_DWORD_IMM_GEN4;
1518 	if (INTEL_GEN(i915) >= 8) {
1519 		*cmd++ = lower_32_bits(offset);
1520 		*cmd++ = upper_32_bits(offset);
1521 	} else {
1522 		*cmd++ = 0;
1523 		*cmd++ = offset;
1524 	}
1525 	*cmd++ = value;
1526 	*cmd = MI_BATCH_BUFFER_END;
1527 	__i915_gem_object_flush_map(obj, 0, 64);
1528 	i915_gem_object_unpin_map(obj);
1529 
1530 	intel_gt_chipset_flush(engine->gt);
1531 
1532 	vm = i915_gem_context_get_vm_rcu(ctx);
1533 	vma = i915_vma_instance(obj, vm, NULL);
1534 	if (IS_ERR(vma)) {
1535 		err = PTR_ERR(vma);
1536 		goto out_vm;
1537 	}
1538 
1539 	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1540 	if (err)
1541 		goto out_vm;
1542 
1543 	rq = igt_request_alloc(ctx, engine);
1544 	if (IS_ERR(rq)) {
1545 		err = PTR_ERR(rq);
1546 		goto err_unpin;
1547 	}
1548 
1549 	i915_vma_lock(vma);
1550 	err = i915_request_await_object(rq, vma->obj, false);
1551 	if (err == 0)
1552 		err = i915_vma_move_to_active(vma, rq, 0);
1553 	i915_vma_unlock(vma);
1554 	if (err)
1555 		goto skip_request;
1556 
1557 	if (rq->engine->emit_init_breadcrumb) {
1558 		err = rq->engine->emit_init_breadcrumb(rq);
1559 		if (err)
1560 			goto skip_request;
1561 	}
1562 
1563 	err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1564 	if (err)
1565 		goto skip_request;
1566 
1567 	i915_vma_unpin(vma);
1568 
1569 	i915_request_add(rq);
1570 
1571 	goto out_vm;
1572 skip_request:
1573 	i915_request_set_error_once(rq, err);
1574 	i915_request_add(rq);
1575 err_unpin:
1576 	i915_vma_unpin(vma);
1577 out_vm:
1578 	i915_vm_put(vm);
1579 out:
1580 	i915_gem_object_put(obj);
1581 	return err;
1582 }
1583 
1584 static int read_from_scratch(struct i915_gem_context *ctx,
1585 			     struct intel_engine_cs *engine,
1586 			     u64 offset, u32 *value)
1587 {
1588 	struct drm_i915_private *i915 = ctx->i915;
1589 	struct drm_i915_gem_object *obj;
1590 	struct i915_address_space *vm;
1591 	const u32 result = 0x100;
1592 	struct i915_request *rq;
1593 	struct i915_vma *vma;
1594 	unsigned int flags;
1595 	u32 *cmd;
1596 	int err;
1597 
1598 	GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1599 
1600 	err = check_scratch(ctx_vm(ctx), offset);
1601 	if (err)
1602 		return err;
1603 
1604 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1605 	if (IS_ERR(obj))
1606 		return PTR_ERR(obj);
1607 
1608 	if (INTEL_GEN(i915) >= 8) {
1609 		const u32 GPR0 = engine->mmio_base + 0x600;
1610 
1611 		vm = i915_gem_context_get_vm_rcu(ctx);
1612 		vma = i915_vma_instance(obj, vm, NULL);
1613 		if (IS_ERR(vma)) {
1614 			err = PTR_ERR(vma);
1615 			goto out_vm;
1616 		}
1617 
1618 		err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1619 		if (err)
1620 			goto out_vm;
1621 
1622 		cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1623 		if (IS_ERR(cmd)) {
1624 			err = PTR_ERR(cmd);
1625 			goto out;
1626 		}
1627 
1628 		memset(cmd, POISON_INUSE, PAGE_SIZE);
1629 		*cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
1630 		*cmd++ = GPR0;
1631 		*cmd++ = lower_32_bits(offset);
1632 		*cmd++ = upper_32_bits(offset);
1633 		*cmd++ = MI_STORE_REGISTER_MEM_GEN8;
1634 		*cmd++ = GPR0;
1635 		*cmd++ = result;
1636 		*cmd++ = 0;
1637 		*cmd = MI_BATCH_BUFFER_END;
1638 
1639 		i915_gem_object_flush_map(obj);
1640 		i915_gem_object_unpin_map(obj);
1641 
1642 		flags = 0;
1643 	} else {
1644 		const u32 reg = engine->mmio_base + 0x420;
1645 
1646 		/* hsw: register access even to 3DPRIM! is protected */
1647 		vm = i915_vm_get(&engine->gt->ggtt->vm);
1648 		vma = i915_vma_instance(obj, vm, NULL);
1649 		if (IS_ERR(vma)) {
1650 			err = PTR_ERR(vma);
1651 			goto out_vm;
1652 		}
1653 
1654 		err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
1655 		if (err)
1656 			goto out_vm;
1657 
1658 		cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1659 		if (IS_ERR(cmd)) {
1660 			err = PTR_ERR(cmd);
1661 			goto out;
1662 		}
1663 
1664 		memset(cmd, POISON_INUSE, PAGE_SIZE);
1665 		*cmd++ = MI_LOAD_REGISTER_MEM;
1666 		*cmd++ = reg;
1667 		*cmd++ = offset;
1668 		*cmd++ = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
1669 		*cmd++ = reg;
1670 		*cmd++ = vma->node.start + result;
1671 		*cmd = MI_BATCH_BUFFER_END;
1672 
1673 		i915_gem_object_flush_map(obj);
1674 		i915_gem_object_unpin_map(obj);
1675 
1676 		flags = I915_DISPATCH_SECURE;
1677 	}
1678 
1679 	intel_gt_chipset_flush(engine->gt);
1680 
1681 	rq = igt_request_alloc(ctx, engine);
1682 	if (IS_ERR(rq)) {
1683 		err = PTR_ERR(rq);
1684 		goto err_unpin;
1685 	}
1686 
1687 	i915_vma_lock(vma);
1688 	err = i915_request_await_object(rq, vma->obj, true);
1689 	if (err == 0)
1690 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1691 	i915_vma_unlock(vma);
1692 	if (err)
1693 		goto skip_request;
1694 
1695 	if (rq->engine->emit_init_breadcrumb) {
1696 		err = rq->engine->emit_init_breadcrumb(rq);
1697 		if (err)
1698 			goto skip_request;
1699 	}
1700 
1701 	err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
1702 	if (err)
1703 		goto skip_request;
1704 
1705 	i915_vma_unpin(vma);
1706 
1707 	i915_request_add(rq);
1708 
1709 	i915_gem_object_lock(obj);
1710 	err = i915_gem_object_set_to_cpu_domain(obj, false);
1711 	i915_gem_object_unlock(obj);
1712 	if (err)
1713 		goto out_vm;
1714 
1715 	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1716 	if (IS_ERR(cmd)) {
1717 		err = PTR_ERR(cmd);
1718 		goto out_vm;
1719 	}
1720 
1721 	*value = cmd[result / sizeof(*cmd)];
1722 	i915_gem_object_unpin_map(obj);
1723 
1724 	goto out_vm;
1725 skip_request:
1726 	i915_request_set_error_once(rq, err);
1727 	i915_request_add(rq);
1728 err_unpin:
1729 	i915_vma_unpin(vma);
1730 out_vm:
1731 	i915_vm_put(vm);
1732 out:
1733 	i915_gem_object_put(obj);
1734 	return err;
1735 }
1736 
1737 static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
1738 {
1739 	struct i915_address_space *vm;
1740 	struct page *page;
1741 	u32 *vaddr;
1742 	int err = 0;
1743 
1744 	vm = ctx_vm(ctx);
1745 	if (!vm)
1746 		return -ENODEV;
1747 
1748 	page = vm->scratch[0].base.page;
1749 	if (!page) {
1750 		pr_err("No scratch page!\n");
1751 		return -EINVAL;
1752 	}
1753 
1754 	vaddr = kmap(page);
1755 	if (!vaddr) {
1756 		pr_err("No (mappable) scratch page!\n");
1757 		return -EINVAL;
1758 	}
1759 
1760 	memcpy(out, vaddr, sizeof(*out));
1761 	if (memchr_inv(vaddr, *out, PAGE_SIZE)) {
1762 		pr_err("Inconsistent initial state of scratch page!\n");
1763 		err = -EINVAL;
1764 	}
1765 	kunmap(page);
1766 
1767 	return err;
1768 }
1769 
1770 static int igt_vm_isolation(void *arg)
1771 {
1772 	struct drm_i915_private *i915 = arg;
1773 	struct i915_gem_context *ctx_a, *ctx_b;
1774 	unsigned long num_engines, count;
1775 	struct intel_engine_cs *engine;
1776 	struct igt_live_test t;
1777 	I915_RND_STATE(prng);
1778 	struct file *file;
1779 	u64 vm_total;
1780 	u32 expected;
1781 	int err;
1782 
1783 	if (INTEL_GEN(i915) < 7)
1784 		return 0;
1785 
1786 	/*
1787 	 * The simple goal here is that a write into one context is not
1788 	 * observed in a second (separate page tables and scratch).
1789 	 */
1790 
1791 	file = mock_file(i915);
1792 	if (IS_ERR(file))
1793 		return PTR_ERR(file);
1794 
1795 	err = igt_live_test_begin(&t, i915, __func__, "");
1796 	if (err)
1797 		goto out_file;
1798 
1799 	ctx_a = live_context(i915, file);
1800 	if (IS_ERR(ctx_a)) {
1801 		err = PTR_ERR(ctx_a);
1802 		goto out_file;
1803 	}
1804 
1805 	ctx_b = live_context(i915, file);
1806 	if (IS_ERR(ctx_b)) {
1807 		err = PTR_ERR(ctx_b);
1808 		goto out_file;
1809 	}
1810 
1811 	/* We can only test vm isolation, if the vm are distinct */
1812 	if (ctx_vm(ctx_a) == ctx_vm(ctx_b))
1813 		goto out_file;
1814 
1815 	/* Read the initial state of the scratch page */
1816 	err = check_scratch_page(ctx_a, &expected);
1817 	if (err)
1818 		goto out_file;
1819 
1820 	err = check_scratch_page(ctx_b, &expected);
1821 	if (err)
1822 		goto out_file;
1823 
1824 	vm_total = ctx_vm(ctx_a)->total;
1825 	GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total);
1826 
1827 	count = 0;
1828 	num_engines = 0;
1829 	for_each_uabi_engine(engine, i915) {
1830 		IGT_TIMEOUT(end_time);
1831 		unsigned long this = 0;
1832 
1833 		if (!intel_engine_can_store_dword(engine))
1834 			continue;
1835 
1836 		/* Not all engines have their own GPR! */
1837 		if (INTEL_GEN(i915) < 8 && engine->class != RENDER_CLASS)
1838 			continue;
1839 
1840 		while (!__igt_timeout(end_time, NULL)) {
1841 			u32 value = 0xc5c5c5c5;
1842 			u64 offset;
1843 
1844 			/* Leave enough space at offset 0 for the batch */
1845 			offset = igt_random_offset(&prng,
1846 						   I915_GTT_PAGE_SIZE, vm_total,
1847 						   sizeof(u32), alignof_dword);
1848 
1849 			err = write_to_scratch(ctx_a, engine,
1850 					       offset, 0xdeadbeef);
1851 			if (err == 0)
1852 				err = read_from_scratch(ctx_b, engine,
1853 							offset, &value);
1854 			if (err)
1855 				goto out_file;
1856 
1857 			if (value != expected) {
1858 				pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
1859 				       engine->name, value,
1860 				       upper_32_bits(offset),
1861 				       lower_32_bits(offset),
1862 				       this);
1863 				err = -EINVAL;
1864 				goto out_file;
1865 			}
1866 
1867 			this++;
1868 		}
1869 		count += this;
1870 		num_engines++;
1871 	}
1872 	pr_info("Checked %lu scratch offsets across %lu engines\n",
1873 		count, num_engines);
1874 
1875 out_file:
1876 	if (igt_live_test_end(&t))
1877 		err = -EIO;
1878 	fput(file);
1879 	return err;
1880 }
1881 
1882 static bool skip_unused_engines(struct intel_context *ce, void *data)
1883 {
1884 	return !ce->state;
1885 }
1886 
1887 static void mock_barrier_task(void *data)
1888 {
1889 	unsigned int *counter = data;
1890 
1891 	++*counter;
1892 }
1893 
1894 static int mock_context_barrier(void *arg)
1895 {
1896 #undef pr_fmt
1897 #define pr_fmt(x) "context_barrier_task():" # x
1898 	struct drm_i915_private *i915 = arg;
1899 	struct i915_gem_context *ctx;
1900 	struct i915_request *rq;
1901 	unsigned int counter;
1902 	int err;
1903 
1904 	/*
1905 	 * The context barrier provides us with a callback after it emits
1906 	 * a request; useful for retiring old state after loading new.
1907 	 */
1908 
1909 	ctx = mock_context(i915, "mock");
1910 	if (!ctx)
1911 		return -ENOMEM;
1912 
1913 	counter = 0;
1914 	err = context_barrier_task(ctx, 0,
1915 				   NULL, NULL, mock_barrier_task, &counter);
1916 	if (err) {
1917 		pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1918 		goto out;
1919 	}
1920 	if (counter == 0) {
1921 		pr_err("Did not retire immediately with 0 engines\n");
1922 		err = -EINVAL;
1923 		goto out;
1924 	}
1925 
1926 	counter = 0;
1927 	err = context_barrier_task(ctx, ALL_ENGINES,
1928 				   skip_unused_engines,
1929 				   NULL,
1930 				   mock_barrier_task,
1931 				   &counter);
1932 	if (err) {
1933 		pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1934 		goto out;
1935 	}
1936 	if (counter == 0) {
1937 		pr_err("Did not retire immediately for all unused engines\n");
1938 		err = -EINVAL;
1939 		goto out;
1940 	}
1941 
1942 	rq = igt_request_alloc(ctx, i915->gt.engine[RCS0]);
1943 	if (IS_ERR(rq)) {
1944 		pr_err("Request allocation failed!\n");
1945 		goto out;
1946 	}
1947 	i915_request_add(rq);
1948 
1949 	counter = 0;
1950 	context_barrier_inject_fault = BIT(RCS0);
1951 	err = context_barrier_task(ctx, ALL_ENGINES,
1952 				   NULL, NULL, mock_barrier_task, &counter);
1953 	context_barrier_inject_fault = 0;
1954 	if (err == -ENXIO)
1955 		err = 0;
1956 	else
1957 		pr_err("Did not hit fault injection!\n");
1958 	if (counter != 0) {
1959 		pr_err("Invoked callback on error!\n");
1960 		err = -EIO;
1961 	}
1962 	if (err)
1963 		goto out;
1964 
1965 	counter = 0;
1966 	err = context_barrier_task(ctx, ALL_ENGINES,
1967 				   skip_unused_engines,
1968 				   NULL,
1969 				   mock_barrier_task,
1970 				   &counter);
1971 	if (err) {
1972 		pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1973 		goto out;
1974 	}
1975 	mock_device_flush(i915);
1976 	if (counter == 0) {
1977 		pr_err("Did not retire on each active engines\n");
1978 		err = -EINVAL;
1979 		goto out;
1980 	}
1981 
1982 out:
1983 	mock_context_close(ctx);
1984 	return err;
1985 #undef pr_fmt
1986 #define pr_fmt(x) x
1987 }
1988 
1989 int i915_gem_context_mock_selftests(void)
1990 {
1991 	static const struct i915_subtest tests[] = {
1992 		SUBTEST(mock_context_barrier),
1993 	};
1994 	struct drm_i915_private *i915;
1995 	int err;
1996 
1997 	i915 = mock_gem_device();
1998 	if (!i915)
1999 		return -ENOMEM;
2000 
2001 	err = i915_subtests(tests, i915);
2002 
2003 	drm_dev_put(&i915->drm);
2004 	return err;
2005 }
2006 
2007 int i915_gem_context_live_selftests(struct drm_i915_private *i915)
2008 {
2009 	static const struct i915_subtest tests[] = {
2010 		SUBTEST(live_nop_switch),
2011 		SUBTEST(live_parallel_switch),
2012 		SUBTEST(igt_ctx_exec),
2013 		SUBTEST(igt_ctx_readonly),
2014 		SUBTEST(igt_ctx_sseu),
2015 		SUBTEST(igt_shared_ctx_exec),
2016 		SUBTEST(igt_vm_isolation),
2017 	};
2018 
2019 	if (intel_gt_is_wedged(&i915->gt))
2020 		return 0;
2021 
2022 	return i915_live_subtests(tests, i915);
2023 }
2024