xref: /linux/drivers/gpu/drm/i915/gt/selftest_workarounds.c (revision ebf68996de0ab250c5d520eb2291ab65643e9a1e)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "i915_selftest.h"
8 #include "intel_reset.h"
9 
10 #include "selftests/igt_flush_test.h"
11 #include "selftests/igt_gem_utils.h"
12 #include "selftests/igt_reset.h"
13 #include "selftests/igt_spinner.h"
14 #include "selftests/igt_wedge_me.h"
15 #include "selftests/mock_context.h"
16 #include "selftests/mock_drm.h"
17 
18 static const struct wo_register {
19 	enum intel_platform platform;
20 	u32 reg;
21 } wo_registers[] = {
22 	{ INTEL_GEMINILAKE, 0x731c }
23 };
24 
25 #define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 8)
26 struct wa_lists {
27 	struct i915_wa_list gt_wa_list;
28 	struct {
29 		char name[REF_NAME_MAX];
30 		struct i915_wa_list wa_list;
31 		struct i915_wa_list ctx_wa_list;
32 	} engine[I915_NUM_ENGINES];
33 };
34 
35 static void
36 reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
37 {
38 	struct intel_engine_cs *engine;
39 	enum intel_engine_id id;
40 
41 	memset(lists, 0, sizeof(*lists));
42 
43 	wa_init_start(&lists->gt_wa_list, "GT_REF");
44 	gt_init_workarounds(i915, &lists->gt_wa_list);
45 	wa_init_finish(&lists->gt_wa_list);
46 
47 	for_each_engine(engine, i915, id) {
48 		struct i915_wa_list *wal = &lists->engine[id].wa_list;
49 		char *name = lists->engine[id].name;
50 
51 		snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
52 
53 		wa_init_start(wal, name);
54 		engine_init_workarounds(engine, wal);
55 		wa_init_finish(wal);
56 
57 		snprintf(name, REF_NAME_MAX, "%s_CTX_REF", engine->name);
58 
59 		__intel_engine_init_ctx_wa(engine,
60 					   &lists->engine[id].ctx_wa_list,
61 					   name);
62 	}
63 }
64 
65 static void
66 reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
67 {
68 	struct intel_engine_cs *engine;
69 	enum intel_engine_id id;
70 
71 	for_each_engine(engine, i915, id)
72 		intel_wa_list_free(&lists->engine[id].wa_list);
73 
74 	intel_wa_list_free(&lists->gt_wa_list);
75 }
76 
77 static struct drm_i915_gem_object *
78 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
79 {
80 	const u32 base = engine->mmio_base;
81 	struct drm_i915_gem_object *result;
82 	struct i915_request *rq;
83 	struct i915_vma *vma;
84 	u32 srm, *cs;
85 	int err;
86 	int i;
87 
88 	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
89 	if (IS_ERR(result))
90 		return result;
91 
92 	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
93 
94 	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
95 	if (IS_ERR(cs)) {
96 		err = PTR_ERR(cs);
97 		goto err_obj;
98 	}
99 	memset(cs, 0xc5, PAGE_SIZE);
100 	i915_gem_object_flush_map(result);
101 	i915_gem_object_unpin_map(result);
102 
103 	vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
104 	if (IS_ERR(vma)) {
105 		err = PTR_ERR(vma);
106 		goto err_obj;
107 	}
108 
109 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
110 	if (err)
111 		goto err_obj;
112 
113 	rq = igt_request_alloc(ctx, engine);
114 	if (IS_ERR(rq)) {
115 		err = PTR_ERR(rq);
116 		goto err_pin;
117 	}
118 
119 	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
120 	if (err)
121 		goto err_req;
122 
123 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
124 	if (INTEL_GEN(ctx->i915) >= 8)
125 		srm++;
126 
127 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
128 	if (IS_ERR(cs)) {
129 		err = PTR_ERR(cs);
130 		goto err_req;
131 	}
132 
133 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
134 		*cs++ = srm;
135 		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
136 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
137 		*cs++ = 0;
138 	}
139 	intel_ring_advance(rq, cs);
140 
141 	i915_gem_object_get(result);
142 	i915_gem_object_set_active_reference(result);
143 
144 	i915_request_add(rq);
145 	i915_vma_unpin(vma);
146 
147 	return result;
148 
149 err_req:
150 	i915_request_add(rq);
151 err_pin:
152 	i915_vma_unpin(vma);
153 err_obj:
154 	i915_gem_object_put(result);
155 	return ERR_PTR(err);
156 }
157 
158 static u32
159 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
160 {
161 	i915_reg_t reg = i < engine->whitelist.count ?
162 			 engine->whitelist.list[i].reg :
163 			 RING_NOPID(engine->mmio_base);
164 
165 	return i915_mmio_reg_offset(reg);
166 }
167 
168 static void
169 print_results(const struct intel_engine_cs *engine, const u32 *results)
170 {
171 	unsigned int i;
172 
173 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
174 		u32 expected = get_whitelist_reg(engine, i);
175 		u32 actual = results[i];
176 
177 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
178 			i, expected, actual);
179 	}
180 }
181 
182 static int check_whitelist(struct i915_gem_context *ctx,
183 			   struct intel_engine_cs *engine)
184 {
185 	struct drm_i915_gem_object *results;
186 	struct igt_wedge_me wedge;
187 	u32 *vaddr;
188 	int err;
189 	int i;
190 
191 	results = read_nonprivs(ctx, engine);
192 	if (IS_ERR(results))
193 		return PTR_ERR(results);
194 
195 	err = 0;
196 	igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
197 		err = i915_gem_object_set_to_cpu_domain(results, false);
198 	if (i915_terminally_wedged(ctx->i915))
199 		err = -EIO;
200 	if (err)
201 		goto out_put;
202 
203 	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
204 	if (IS_ERR(vaddr)) {
205 		err = PTR_ERR(vaddr);
206 		goto out_put;
207 	}
208 
209 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
210 		u32 expected = get_whitelist_reg(engine, i);
211 		u32 actual = vaddr[i];
212 
213 		if (expected != actual) {
214 			print_results(engine, vaddr);
215 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
216 			       i, expected, actual);
217 
218 			err = -EINVAL;
219 			break;
220 		}
221 	}
222 
223 	i915_gem_object_unpin_map(results);
224 out_put:
225 	i915_gem_object_put(results);
226 	return err;
227 }
228 
229 static int do_device_reset(struct intel_engine_cs *engine)
230 {
231 	i915_reset(engine->i915, engine->mask, "live_workarounds");
232 	return 0;
233 }
234 
235 static int do_engine_reset(struct intel_engine_cs *engine)
236 {
237 	return i915_reset_engine(engine, "live_workarounds");
238 }
239 
240 static int
241 switch_to_scratch_context(struct intel_engine_cs *engine,
242 			  struct igt_spinner *spin)
243 {
244 	struct i915_gem_context *ctx;
245 	struct i915_request *rq;
246 	intel_wakeref_t wakeref;
247 	int err = 0;
248 
249 	ctx = kernel_context(engine->i915);
250 	if (IS_ERR(ctx))
251 		return PTR_ERR(ctx);
252 
253 	GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
254 
255 	rq = ERR_PTR(-ENODEV);
256 	with_intel_runtime_pm(engine->i915, wakeref)
257 		rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
258 
259 	kernel_context_close(ctx);
260 
261 	if (IS_ERR(rq)) {
262 		spin = NULL;
263 		err = PTR_ERR(rq);
264 		goto err;
265 	}
266 
267 	i915_request_add(rq);
268 
269 	if (spin && !igt_wait_for_spinner(spin, rq)) {
270 		pr_err("Spinner failed to start\n");
271 		err = -ETIMEDOUT;
272 	}
273 
274 err:
275 	if (err && spin)
276 		igt_spinner_end(spin);
277 
278 	return err;
279 }
280 
281 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
282 					int (*reset)(struct intel_engine_cs *),
283 					const char *name)
284 {
285 	struct drm_i915_private *i915 = engine->i915;
286 	struct i915_gem_context *ctx;
287 	struct igt_spinner spin;
288 	intel_wakeref_t wakeref;
289 	int err;
290 
291 	pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
292 		engine->whitelist.count, name);
293 
294 	err = igt_spinner_init(&spin, i915);
295 	if (err)
296 		return err;
297 
298 	ctx = kernel_context(i915);
299 	if (IS_ERR(ctx))
300 		return PTR_ERR(ctx);
301 
302 	err = check_whitelist(ctx, engine);
303 	if (err) {
304 		pr_err("Invalid whitelist *before* %s reset!\n", name);
305 		goto out;
306 	}
307 
308 	err = switch_to_scratch_context(engine, &spin);
309 	if (err)
310 		goto out;
311 
312 	with_intel_runtime_pm(i915, wakeref)
313 		err = reset(engine);
314 
315 	igt_spinner_end(&spin);
316 	igt_spinner_fini(&spin);
317 
318 	if (err) {
319 		pr_err("%s reset failed\n", name);
320 		goto out;
321 	}
322 
323 	err = check_whitelist(ctx, engine);
324 	if (err) {
325 		pr_err("Whitelist not preserved in context across %s reset!\n",
326 		       name);
327 		goto out;
328 	}
329 
330 	kernel_context_close(ctx);
331 
332 	ctx = kernel_context(i915);
333 	if (IS_ERR(ctx))
334 		return PTR_ERR(ctx);
335 
336 	err = check_whitelist(ctx, engine);
337 	if (err) {
338 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
339 		       name);
340 		goto out;
341 	}
342 
343 out:
344 	kernel_context_close(ctx);
345 	return err;
346 }
347 
348 static struct i915_vma *create_batch(struct i915_gem_context *ctx)
349 {
350 	struct drm_i915_gem_object *obj;
351 	struct i915_vma *vma;
352 	int err;
353 
354 	obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
355 	if (IS_ERR(obj))
356 		return ERR_CAST(obj);
357 
358 	vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
359 	if (IS_ERR(vma)) {
360 		err = PTR_ERR(vma);
361 		goto err_obj;
362 	}
363 
364 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
365 	if (err)
366 		goto err_obj;
367 
368 	err = i915_gem_object_set_to_wc_domain(obj, true);
369 	if (err)
370 		goto err_obj;
371 
372 	return vma;
373 
374 err_obj:
375 	i915_gem_object_put(obj);
376 	return ERR_PTR(err);
377 }
378 
379 static u32 reg_write(u32 old, u32 new, u32 rsvd)
380 {
381 	if (rsvd == 0x0000ffff) {
382 		old &= ~(new >> 16);
383 		old |= new & (new >> 16);
384 	} else {
385 		old &= ~rsvd;
386 		old |= new & rsvd;
387 	}
388 
389 	return old;
390 }
391 
392 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
393 {
394 	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
395 	int i;
396 
397 	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
398 		if (wo_registers[i].platform == platform &&
399 		    wo_registers[i].reg == reg)
400 			return true;
401 	}
402 
403 	return false;
404 }
405 
406 static int check_dirty_whitelist(struct i915_gem_context *ctx,
407 				 struct intel_engine_cs *engine)
408 {
409 	const u32 values[] = {
410 		0x00000000,
411 		0x01010101,
412 		0x10100101,
413 		0x03030303,
414 		0x30300303,
415 		0x05050505,
416 		0x50500505,
417 		0x0f0f0f0f,
418 		0xf00ff00f,
419 		0x10101010,
420 		0xf0f01010,
421 		0x30303030,
422 		0xa0a03030,
423 		0x50505050,
424 		0xc0c05050,
425 		0xf0f0f0f0,
426 		0x11111111,
427 		0x33333333,
428 		0x55555555,
429 		0x0000ffff,
430 		0x00ff00ff,
431 		0xff0000ff,
432 		0xffff00ff,
433 		0xffffffff,
434 	};
435 	struct i915_vma *scratch;
436 	struct i915_vma *batch;
437 	int err = 0, i, v;
438 	u32 *cs, *results;
439 
440 	scratch = create_scratch(&ctx->ppgtt->vm, 2 * ARRAY_SIZE(values) + 1);
441 	if (IS_ERR(scratch))
442 		return PTR_ERR(scratch);
443 
444 	batch = create_batch(ctx);
445 	if (IS_ERR(batch)) {
446 		err = PTR_ERR(batch);
447 		goto out_scratch;
448 	}
449 
450 	for (i = 0; i < engine->whitelist.count; i++) {
451 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
452 		u64 addr = scratch->node.start;
453 		struct i915_request *rq;
454 		u32 srm, lrm, rsvd;
455 		u32 expect;
456 		int idx;
457 
458 		if (wo_register(engine, reg))
459 			continue;
460 
461 		srm = MI_STORE_REGISTER_MEM;
462 		lrm = MI_LOAD_REGISTER_MEM;
463 		if (INTEL_GEN(ctx->i915) >= 8)
464 			lrm++, srm++;
465 
466 		pr_debug("%s: Writing garbage to %x\n",
467 			 engine->name, reg);
468 
469 		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
470 		if (IS_ERR(cs)) {
471 			err = PTR_ERR(cs);
472 			goto out_batch;
473 		}
474 
475 		/* SRM original */
476 		*cs++ = srm;
477 		*cs++ = reg;
478 		*cs++ = lower_32_bits(addr);
479 		*cs++ = upper_32_bits(addr);
480 
481 		idx = 1;
482 		for (v = 0; v < ARRAY_SIZE(values); v++) {
483 			/* LRI garbage */
484 			*cs++ = MI_LOAD_REGISTER_IMM(1);
485 			*cs++ = reg;
486 			*cs++ = values[v];
487 
488 			/* SRM result */
489 			*cs++ = srm;
490 			*cs++ = reg;
491 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
492 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
493 			idx++;
494 		}
495 		for (v = 0; v < ARRAY_SIZE(values); v++) {
496 			/* LRI garbage */
497 			*cs++ = MI_LOAD_REGISTER_IMM(1);
498 			*cs++ = reg;
499 			*cs++ = ~values[v];
500 
501 			/* SRM result */
502 			*cs++ = srm;
503 			*cs++ = reg;
504 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
505 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
506 			idx++;
507 		}
508 		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
509 
510 		/* LRM original -- don't leave garbage in the context! */
511 		*cs++ = lrm;
512 		*cs++ = reg;
513 		*cs++ = lower_32_bits(addr);
514 		*cs++ = upper_32_bits(addr);
515 
516 		*cs++ = MI_BATCH_BUFFER_END;
517 
518 		i915_gem_object_flush_map(batch->obj);
519 		i915_gem_object_unpin_map(batch->obj);
520 		i915_gem_chipset_flush(ctx->i915);
521 
522 		rq = igt_request_alloc(ctx, engine);
523 		if (IS_ERR(rq)) {
524 			err = PTR_ERR(rq);
525 			goto out_batch;
526 		}
527 
528 		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
529 			err = engine->emit_init_breadcrumb(rq);
530 			if (err)
531 				goto err_request;
532 		}
533 
534 		err = engine->emit_bb_start(rq,
535 					    batch->node.start, PAGE_SIZE,
536 					    0);
537 		if (err)
538 			goto err_request;
539 
540 err_request:
541 		i915_request_add(rq);
542 		if (err)
543 			goto out_batch;
544 
545 		if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
546 			pr_err("%s: Futzing %x timedout; cancelling test\n",
547 			       engine->name, reg);
548 			i915_gem_set_wedged(ctx->i915);
549 			err = -EIO;
550 			goto out_batch;
551 		}
552 
553 		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
554 		if (IS_ERR(results)) {
555 			err = PTR_ERR(results);
556 			goto out_batch;
557 		}
558 
559 		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
560 		rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */
561 		if (!rsvd) {
562 			pr_err("%s: Unable to write to whitelisted register %x\n",
563 			       engine->name, reg);
564 			err = -EINVAL;
565 			goto out_unpin;
566 		}
567 
568 		expect = results[0];
569 		idx = 1;
570 		for (v = 0; v < ARRAY_SIZE(values); v++) {
571 			expect = reg_write(expect, values[v], rsvd);
572 			if (results[idx] != expect)
573 				err++;
574 			idx++;
575 		}
576 		for (v = 0; v < ARRAY_SIZE(values); v++) {
577 			expect = reg_write(expect, ~values[v], rsvd);
578 			if (results[idx] != expect)
579 				err++;
580 			idx++;
581 		}
582 		if (err) {
583 			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
584 			       engine->name, err, reg);
585 
586 			pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
587 				engine->name, reg, results[0], rsvd);
588 
589 			expect = results[0];
590 			idx = 1;
591 			for (v = 0; v < ARRAY_SIZE(values); v++) {
592 				u32 w = values[v];
593 
594 				expect = reg_write(expect, w, rsvd);
595 				pr_info("Wrote %08x, read %08x, expect %08x\n",
596 					w, results[idx], expect);
597 				idx++;
598 			}
599 			for (v = 0; v < ARRAY_SIZE(values); v++) {
600 				u32 w = ~values[v];
601 
602 				expect = reg_write(expect, w, rsvd);
603 				pr_info("Wrote %08x, read %08x, expect %08x\n",
604 					w, results[idx], expect);
605 				idx++;
606 			}
607 
608 			err = -EINVAL;
609 		}
610 out_unpin:
611 		i915_gem_object_unpin_map(scratch->obj);
612 		if (err)
613 			break;
614 	}
615 
616 	if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
617 		err = -EIO;
618 out_batch:
619 	i915_vma_unpin_and_release(&batch, 0);
620 out_scratch:
621 	i915_vma_unpin_and_release(&scratch, 0);
622 	return err;
623 }
624 
625 static int live_dirty_whitelist(void *arg)
626 {
627 	struct drm_i915_private *i915 = arg;
628 	struct intel_engine_cs *engine;
629 	struct i915_gem_context *ctx;
630 	enum intel_engine_id id;
631 	intel_wakeref_t wakeref;
632 	struct drm_file *file;
633 	int err = 0;
634 
635 	/* Can the user write to the whitelisted registers? */
636 
637 	if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
638 		return 0;
639 
640 	wakeref = intel_runtime_pm_get(i915);
641 
642 	mutex_unlock(&i915->drm.struct_mutex);
643 	file = mock_file(i915);
644 	mutex_lock(&i915->drm.struct_mutex);
645 	if (IS_ERR(file)) {
646 		err = PTR_ERR(file);
647 		goto out_rpm;
648 	}
649 
650 	ctx = live_context(i915, file);
651 	if (IS_ERR(ctx)) {
652 		err = PTR_ERR(ctx);
653 		goto out_file;
654 	}
655 
656 	for_each_engine(engine, i915, id) {
657 		if (engine->whitelist.count == 0)
658 			continue;
659 
660 		err = check_dirty_whitelist(ctx, engine);
661 		if (err)
662 			goto out_file;
663 	}
664 
665 out_file:
666 	mutex_unlock(&i915->drm.struct_mutex);
667 	mock_file_free(i915, file);
668 	mutex_lock(&i915->drm.struct_mutex);
669 out_rpm:
670 	intel_runtime_pm_put(i915, wakeref);
671 	return err;
672 }
673 
674 static int live_reset_whitelist(void *arg)
675 {
676 	struct drm_i915_private *i915 = arg;
677 	struct intel_engine_cs *engine = i915->engine[RCS0];
678 	int err = 0;
679 
680 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
681 
682 	if (!engine || engine->whitelist.count == 0)
683 		return 0;
684 
685 	igt_global_reset_lock(i915);
686 
687 	if (intel_has_reset_engine(i915)) {
688 		err = check_whitelist_across_reset(engine,
689 						   do_engine_reset,
690 						   "engine");
691 		if (err)
692 			goto out;
693 	}
694 
695 	if (intel_has_gpu_reset(i915)) {
696 		err = check_whitelist_across_reset(engine,
697 						   do_device_reset,
698 						   "device");
699 		if (err)
700 			goto out;
701 	}
702 
703 out:
704 	igt_global_reset_unlock(i915);
705 	return err;
706 }
707 
708 static int read_whitelisted_registers(struct i915_gem_context *ctx,
709 				      struct intel_engine_cs *engine,
710 				      struct i915_vma *results)
711 {
712 	struct i915_request *rq;
713 	int i, err = 0;
714 	u32 srm, *cs;
715 
716 	rq = igt_request_alloc(ctx, engine);
717 	if (IS_ERR(rq))
718 		return PTR_ERR(rq);
719 
720 	srm = MI_STORE_REGISTER_MEM;
721 	if (INTEL_GEN(ctx->i915) >= 8)
722 		srm++;
723 
724 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
725 	if (IS_ERR(cs)) {
726 		err = PTR_ERR(cs);
727 		goto err_req;
728 	}
729 
730 	for (i = 0; i < engine->whitelist.count; i++) {
731 		u64 offset = results->node.start + sizeof(u32) * i;
732 
733 		*cs++ = srm;
734 		*cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
735 		*cs++ = lower_32_bits(offset);
736 		*cs++ = upper_32_bits(offset);
737 	}
738 	intel_ring_advance(rq, cs);
739 
740 err_req:
741 	i915_request_add(rq);
742 
743 	if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
744 		err = -EIO;
745 
746 	return err;
747 }
748 
749 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
750 				       struct intel_engine_cs *engine)
751 {
752 	struct i915_request *rq;
753 	struct i915_vma *batch;
754 	int i, err = 0;
755 	u32 *cs;
756 
757 	batch = create_batch(ctx);
758 	if (IS_ERR(batch))
759 		return PTR_ERR(batch);
760 
761 	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
762 	if (IS_ERR(cs)) {
763 		err = PTR_ERR(cs);
764 		goto err_batch;
765 	}
766 
767 	*cs++ = MI_LOAD_REGISTER_IMM(engine->whitelist.count);
768 	for (i = 0; i < engine->whitelist.count; i++) {
769 		*cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
770 		*cs++ = 0xffffffff;
771 	}
772 	*cs++ = MI_BATCH_BUFFER_END;
773 
774 	i915_gem_object_flush_map(batch->obj);
775 	i915_gem_chipset_flush(ctx->i915);
776 
777 	rq = igt_request_alloc(ctx, engine);
778 	if (IS_ERR(rq)) {
779 		err = PTR_ERR(rq);
780 		goto err_unpin;
781 	}
782 
783 	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
784 		err = engine->emit_init_breadcrumb(rq);
785 		if (err)
786 			goto err_request;
787 	}
788 
789 	/* Perform the writes from an unprivileged "user" batch */
790 	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
791 
792 err_request:
793 	i915_request_add(rq);
794 	if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
795 		err = -EIO;
796 
797 err_unpin:
798 	i915_gem_object_unpin_map(batch->obj);
799 err_batch:
800 	i915_vma_unpin_and_release(&batch, 0);
801 	return err;
802 }
803 
804 struct regmask {
805 	i915_reg_t reg;
806 	unsigned long gen_mask;
807 };
808 
809 static bool find_reg(struct drm_i915_private *i915,
810 		     i915_reg_t reg,
811 		     const struct regmask *tbl,
812 		     unsigned long count)
813 {
814 	u32 offset = i915_mmio_reg_offset(reg);
815 
816 	while (count--) {
817 		if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
818 		    i915_mmio_reg_offset(tbl->reg) == offset)
819 			return true;
820 		tbl++;
821 	}
822 
823 	return false;
824 }
825 
826 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
827 {
828 	/* Alas, we must pardon some whitelists. Mistakes already made */
829 	static const struct regmask pardon[] = {
830 		{ GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
831 		{ GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
832 	};
833 
834 	return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
835 }
836 
837 static bool result_eq(struct intel_engine_cs *engine,
838 		      u32 a, u32 b, i915_reg_t reg)
839 {
840 	if (a != b && !pardon_reg(engine->i915, reg)) {
841 		pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
842 		       i915_mmio_reg_offset(reg), a, b);
843 		return false;
844 	}
845 
846 	return true;
847 }
848 
849 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
850 {
851 	/* Some registers do not seem to behave and our writes unreadable */
852 	static const struct regmask wo[] = {
853 		{ GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
854 	};
855 
856 	return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
857 }
858 
859 static bool result_neq(struct intel_engine_cs *engine,
860 		       u32 a, u32 b, i915_reg_t reg)
861 {
862 	if (a == b && !writeonly_reg(engine->i915, reg)) {
863 		pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
864 		       i915_mmio_reg_offset(reg), a);
865 		return false;
866 	}
867 
868 	return true;
869 }
870 
871 static int
872 check_whitelisted_registers(struct intel_engine_cs *engine,
873 			    struct i915_vma *A,
874 			    struct i915_vma *B,
875 			    bool (*fn)(struct intel_engine_cs *engine,
876 				       u32 a, u32 b,
877 				       i915_reg_t reg))
878 {
879 	u32 *a, *b;
880 	int i, err;
881 
882 	a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
883 	if (IS_ERR(a))
884 		return PTR_ERR(a);
885 
886 	b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
887 	if (IS_ERR(b)) {
888 		err = PTR_ERR(b);
889 		goto err_a;
890 	}
891 
892 	err = 0;
893 	for (i = 0; i < engine->whitelist.count; i++) {
894 		if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg))
895 			err = -EINVAL;
896 	}
897 
898 	i915_gem_object_unpin_map(B->obj);
899 err_a:
900 	i915_gem_object_unpin_map(A->obj);
901 	return err;
902 }
903 
904 static int live_isolated_whitelist(void *arg)
905 {
906 	struct drm_i915_private *i915 = arg;
907 	struct {
908 		struct i915_gem_context *ctx;
909 		struct i915_vma *scratch[2];
910 	} client[2] = {};
911 	struct intel_engine_cs *engine;
912 	enum intel_engine_id id;
913 	int i, err = 0;
914 
915 	/*
916 	 * Check that a write into a whitelist register works, but
917 	 * invisible to a second context.
918 	 */
919 
920 	if (!intel_engines_has_context_isolation(i915))
921 		return 0;
922 
923 	if (!i915->kernel_context->ppgtt)
924 		return 0;
925 
926 	for (i = 0; i < ARRAY_SIZE(client); i++) {
927 		struct i915_gem_context *c;
928 
929 		c = kernel_context(i915);
930 		if (IS_ERR(c)) {
931 			err = PTR_ERR(c);
932 			goto err;
933 		}
934 
935 		client[i].scratch[0] = create_scratch(&c->ppgtt->vm, 1024);
936 		if (IS_ERR(client[i].scratch[0])) {
937 			err = PTR_ERR(client[i].scratch[0]);
938 			kernel_context_close(c);
939 			goto err;
940 		}
941 
942 		client[i].scratch[1] = create_scratch(&c->ppgtt->vm, 1024);
943 		if (IS_ERR(client[i].scratch[1])) {
944 			err = PTR_ERR(client[i].scratch[1]);
945 			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
946 			kernel_context_close(c);
947 			goto err;
948 		}
949 
950 		client[i].ctx = c;
951 	}
952 
953 	for_each_engine(engine, i915, id) {
954 		if (!engine->whitelist.count)
955 			continue;
956 
957 		/* Read default values */
958 		err = read_whitelisted_registers(client[0].ctx, engine,
959 						 client[0].scratch[0]);
960 		if (err)
961 			goto err;
962 
963 		/* Try to overwrite registers (should only affect ctx0) */
964 		err = scrub_whitelisted_registers(client[0].ctx, engine);
965 		if (err)
966 			goto err;
967 
968 		/* Read values from ctx1, we expect these to be defaults */
969 		err = read_whitelisted_registers(client[1].ctx, engine,
970 						 client[1].scratch[0]);
971 		if (err)
972 			goto err;
973 
974 		/* Verify that both reads return the same default values */
975 		err = check_whitelisted_registers(engine,
976 						  client[0].scratch[0],
977 						  client[1].scratch[0],
978 						  result_eq);
979 		if (err)
980 			goto err;
981 
982 		/* Read back the updated values in ctx0 */
983 		err = read_whitelisted_registers(client[0].ctx, engine,
984 						 client[0].scratch[1]);
985 		if (err)
986 			goto err;
987 
988 		/* User should be granted privilege to overwhite regs */
989 		err = check_whitelisted_registers(engine,
990 						  client[0].scratch[0],
991 						  client[0].scratch[1],
992 						  result_neq);
993 		if (err)
994 			goto err;
995 	}
996 
997 err:
998 	for (i = 0; i < ARRAY_SIZE(client); i++) {
999 		if (!client[i].ctx)
1000 			break;
1001 
1002 		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1003 		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1004 		kernel_context_close(client[i].ctx);
1005 	}
1006 
1007 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
1008 		err = -EIO;
1009 
1010 	return err;
1011 }
1012 
1013 static bool
1014 verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1015 		const char *str)
1016 {
1017 	struct drm_i915_private *i915 = ctx->i915;
1018 	struct i915_gem_engines_iter it;
1019 	struct intel_context *ce;
1020 	bool ok = true;
1021 
1022 	ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
1023 
1024 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1025 		enum intel_engine_id id = ce->engine->id;
1026 
1027 		ok &= engine_wa_list_verify(ce,
1028 					    &lists->engine[id].wa_list,
1029 					    str) == 0;
1030 
1031 		ok &= engine_wa_list_verify(ce,
1032 					    &lists->engine[id].ctx_wa_list,
1033 					    str) == 0;
1034 	}
1035 	i915_gem_context_unlock_engines(ctx);
1036 
1037 	return ok;
1038 }
1039 
1040 static int
1041 live_gpu_reset_workarounds(void *arg)
1042 {
1043 	struct drm_i915_private *i915 = arg;
1044 	struct i915_gem_context *ctx;
1045 	intel_wakeref_t wakeref;
1046 	struct wa_lists lists;
1047 	bool ok;
1048 
1049 	if (!intel_has_gpu_reset(i915))
1050 		return 0;
1051 
1052 	ctx = kernel_context(i915);
1053 	if (IS_ERR(ctx))
1054 		return PTR_ERR(ctx);
1055 
1056 	pr_info("Verifying after GPU reset...\n");
1057 
1058 	igt_global_reset_lock(i915);
1059 	wakeref = intel_runtime_pm_get(i915);
1060 
1061 	reference_lists_init(i915, &lists);
1062 
1063 	ok = verify_wa_lists(ctx, &lists, "before reset");
1064 	if (!ok)
1065 		goto out;
1066 
1067 	i915_reset(i915, ALL_ENGINES, "live_workarounds");
1068 
1069 	ok = verify_wa_lists(ctx, &lists, "after reset");
1070 
1071 out:
1072 	kernel_context_close(ctx);
1073 	reference_lists_fini(i915, &lists);
1074 	intel_runtime_pm_put(i915, wakeref);
1075 	igt_global_reset_unlock(i915);
1076 
1077 	return ok ? 0 : -ESRCH;
1078 }
1079 
1080 static int
1081 live_engine_reset_workarounds(void *arg)
1082 {
1083 	struct drm_i915_private *i915 = arg;
1084 	struct intel_engine_cs *engine;
1085 	struct i915_gem_context *ctx;
1086 	struct igt_spinner spin;
1087 	enum intel_engine_id id;
1088 	struct i915_request *rq;
1089 	intel_wakeref_t wakeref;
1090 	struct wa_lists lists;
1091 	int ret = 0;
1092 
1093 	if (!intel_has_reset_engine(i915))
1094 		return 0;
1095 
1096 	ctx = kernel_context(i915);
1097 	if (IS_ERR(ctx))
1098 		return PTR_ERR(ctx);
1099 
1100 	igt_global_reset_lock(i915);
1101 	wakeref = intel_runtime_pm_get(i915);
1102 
1103 	reference_lists_init(i915, &lists);
1104 
1105 	for_each_engine(engine, i915, id) {
1106 		bool ok;
1107 
1108 		pr_info("Verifying after %s reset...\n", engine->name);
1109 
1110 		ok = verify_wa_lists(ctx, &lists, "before reset");
1111 		if (!ok) {
1112 			ret = -ESRCH;
1113 			goto err;
1114 		}
1115 
1116 		i915_reset_engine(engine, "live_workarounds");
1117 
1118 		ok = verify_wa_lists(ctx, &lists, "after idle reset");
1119 		if (!ok) {
1120 			ret = -ESRCH;
1121 			goto err;
1122 		}
1123 
1124 		ret = igt_spinner_init(&spin, i915);
1125 		if (ret)
1126 			goto err;
1127 
1128 		rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
1129 		if (IS_ERR(rq)) {
1130 			ret = PTR_ERR(rq);
1131 			igt_spinner_fini(&spin);
1132 			goto err;
1133 		}
1134 
1135 		i915_request_add(rq);
1136 
1137 		if (!igt_wait_for_spinner(&spin, rq)) {
1138 			pr_err("Spinner failed to start\n");
1139 			igt_spinner_fini(&spin);
1140 			ret = -ETIMEDOUT;
1141 			goto err;
1142 		}
1143 
1144 		i915_reset_engine(engine, "live_workarounds");
1145 
1146 		igt_spinner_end(&spin);
1147 		igt_spinner_fini(&spin);
1148 
1149 		ok = verify_wa_lists(ctx, &lists, "after busy reset");
1150 		if (!ok) {
1151 			ret = -ESRCH;
1152 			goto err;
1153 		}
1154 	}
1155 
1156 err:
1157 	reference_lists_fini(i915, &lists);
1158 	intel_runtime_pm_put(i915, wakeref);
1159 	igt_global_reset_unlock(i915);
1160 	kernel_context_close(ctx);
1161 
1162 	igt_flush_test(i915, I915_WAIT_LOCKED);
1163 
1164 	return ret;
1165 }
1166 
1167 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1168 {
1169 	static const struct i915_subtest tests[] = {
1170 		SUBTEST(live_dirty_whitelist),
1171 		SUBTEST(live_reset_whitelist),
1172 		SUBTEST(live_isolated_whitelist),
1173 		SUBTEST(live_gpu_reset_workarounds),
1174 		SUBTEST(live_engine_reset_workarounds),
1175 	};
1176 	int err;
1177 
1178 	if (i915_terminally_wedged(i915))
1179 		return 0;
1180 
1181 	mutex_lock(&i915->drm.struct_mutex);
1182 	err = i915_subtests(tests, i915);
1183 	mutex_unlock(&i915->drm.struct_mutex);
1184 
1185 	return err;
1186 }
1187